% TEST Sparse Regression: Main file for solving sparse regression problem % with inexact augmented lagrangian method. % minimize ||AA(w) - b||^2 % subject to: w = z.^2 % |z|_2 < tau % % % % Author: Mehmet Fatih Sahin % Date : 27.12.2018 clearvars % close all vec = @(x)reshape( x, numel( x ), 1 ); addpath(genpath('./functions/')); addpath(genpath('./helpers/')); %% function settings sparse_reg_load_data(); Ahat = [A, -A]; %% Initializations f_grad_u = @(u, beta_, yc) [Ahat'*(Ahat*u(1:2*d) - b); zeros(2*d,1)] + 1/(beta_)*[u(1:2*d) - u(2*d+1:4*d).^2; -2*(u(1:2*d) - u(2*d+1:4*d).^2).*u(2*d+1:4*d)] + [yc; -2*yc.*u(2*d+1:4*d)]; f_u = @(u) 1/2*norm(Ahat*u(1:2*d) - b, 'fro')^2; proj = @(u) [u(1:2*d); u(2*d+1:4*d)*min(1, sqrt(tau)/norm(u(2*d+1:4*d)))]; get_feas = @(u) u(1:2*d) - u(2*d+1:4*d).^2; gamma_rule = @(beta_, yc) 1e-2; % rule for the stepsize: later add linesearch for that %% Define algorithmic parameters and rules beta0 = 1e0; % initial value for the penalty parameter sigma0 = beta0*1e2; % initial value for dual step size clearvars update_params update_params = @(feas_cur, feas_old, k_t, iter, beta_old, sigma_old, sigma_vec, beta_vec) update_params_nonadaptive(feas_cur, feas_old, k_t, iter, beta_old, sigma_old, sigma_vec, beta_vec, sigma0, beta0); params.averaging = 0; params.iters = 1e4;% Number of iterations params.verbose = 1e3; params.Uinit = 1*rand(4*d, 1); % In case a random initialization is used params.ycinit = zeros(2*d,1); params.savehist = 1; params.startfeasible = 0; % start from a feasible point params.gamma = 2; params.theta = 2; sparse_reg_cvx() % params.x_cvx = x_cvx; %% Run the algorithm [Udl, output] = linearized_augmented_lagrangian_ls(f_u, f_grad_u, proj, update_params, ... gamma_rule, get_feas, params); %% Plot results plot_results()