Menu

Please Fill Parts Clearvars Clc Addpath Generation Addpath Basicblocks Addpath Algorith Q43779738

Please fill the parts with “@”:

clearvars
clc
addpath(‘../Generation’)
addpath(‘../Basic_blocks’)
addpath(‘../Algorithms’)

% Loading scenarios
% ===========================
scenario=4;
[data_class set_up]=scenarios_regression(scenario);

% Definition of the problem
%===================================
loss_lasso = @(N,U,x,y,lambda)(1/N*(U*x-y)’*(U*x-y)+lambda*norm(x,1))
subgrad_lasso = @(N,U,x,y,lambda)(2/N*U’*(U*x-y)+lambda*sign(x));
grad_LS = @(N,U,x,y,lambda) (2/N*U’*(U*x-y));

% Solution of the empirical risk using CVX
%=========================================
x_lasso_cvx=solver_cvx(set_up,@(N,A,x,y,lambda)loss_lasso(N,A,x,y,lambda));
loss_opt=loss_lasso(set_up.Niter_train,set_up.Utrain(:,1:set_up.M+1),x_lasso_cvx,set_up.ytrain(:,1),set_up.Lambda);

% % Gradient descent
out_subgd =grad_FOM(set_up,@(N,A,x,y,lambda)subgrad_lasso(N,A,x,y,lambda));
out_subgd_decay =grad_FOM_decay(set_up,@(N,A,x,y,lambda)subgrad_lasso(N,A,x,y,lambda));
loss_subgrad=eval_loss(out_subgd,set_up,@(N,A,x,y,lambda)loss_lasso(N,A,x,y,lambda));
loss_subgrad_decay=eval_loss(out_subgd_decay,set_up,@(N,A,x,y,lambda)loss_lasso(N,A,x,y,lambda));

out_ista=ista_lasso(set_up,@(N,A,x,y,lambda)grad_LS(N,A,x,y,lambda));
out_fista=fista_lasso(set_up,@(N,A,x,y,lambda)grad_LS(N,A,x,y,lambda));
loss_ista=eval_loss(out_ista,set_up,@(N,A,x,y,lambda)loss_lasso(N,A,x,y,lambda));
loss_fista=eval_loss(out_fista,set_up,@(N,A,x,y,lambda)loss_lasso(N,A,x,y,lambda));

% FLEXA algorithm for lasso
out_flexa =flexa_lasso(set_up);
out_flexa2=[ out_flexa(:,2:set_up.Number_iter_FLEXA)kron(out_flexa(:,set_up.Number_iter_FLEXA),…
ones(1,set_up.Niter_train-set_up.Number_iter_FLEXA+1))];
loss_flexa=eval_loss(out_flexa2,set_up,@(N,A,x,y,lambda)loss_lasso(N,A,x,y,lambda));

figure(1)
% % Plot of learning curves
plot(1:set_up.Niter_train,10*log10(sum((loss_subgrad-loss_opt*ones(1,set_up.Niter_train)).^2,1)),’b’,’LineWidth’,3),
hold
plot(1:set_up.Niter_train,10*log10(sum((loss_subgrad_decay-loss_opt*ones(1,set_up.Niter_train)).^2,1)),’r’,’LineWidth’,3),
plot(1:set_up.Niter_train,10*log10(sum((loss_ista-loss_opt*ones(1,set_up.Niter_train)).^2,1)),’m’,’LineWidth’,3),
plot(1:set_up.Niter_train,10*log10(sum((loss_fista-loss_opt*ones(1,set_up.Niter_train)).^2,1)),’c’,’LineWidth’,3),
plot(1:set_up.Niter_train,10*log10(sum((loss_flexa-loss_opt*ones(1,set_up.Niter_train)).^2,1)),’k’,’LineWidth’,3),
hold off
legend(‘Subgradient.Fixed’,’Subgradient.Decay’,’ISTA’,’FISTA’,’FLEXA’),grid
xlabel(‘Iterations’)
ylabel(‘MSE’)
title(‘Lasso. Different implementations’)

figure(2)
% Let’s make a zoom
% Plot of learning curves
show=30;
plot(1:show,10*log10(sum((loss_subgrad(1:show)-loss_opt*ones(1,show)).^2,1)),’b’,’LineWidth’,3),
hold
plot(1:show,10*log10(sum((loss_subgrad_decay(1:show)-loss_opt*ones(1,show)).^2,1)),’r’,’LineWidth’,3),
plot(1:show-1,10*log10(sum((loss_ista(2:show)-loss_opt*ones(1,show-1)).^2,1)),’m’,’LineWidth’,3),
plot(1:show-1,10*log10(sum((loss_fista(2:show)-loss_opt*ones(1,show-1)).^2,1)),’c’,’LineWidth’,3),
plot(1:show,10*log10((loss_flexa(1:show)-loss_opt*ones(1,show)).^2),’k’,’LineWidth’,3),
hold off
legend(‘Subgradient.Fixed’,’Subgradient.Decay’,’ISTA’,’FISTA’,’FLEXA’),grid
xlabel(‘Iterations’)
ylabel(‘MSE’)
title(‘Lasso. Different implementations. Zoom’)

Expert Answer


Answer to Please fill the parts with “@”: clearvars clc addpath(‘../Generation’) addpath(‘../Basic_blocks’) addpath(‘../Algorithms…

OR