From e16cbac1fe51ec243d04a7e2475336b9aebdb4cb Mon Sep 17 00:00:00 2001 From: Jiayu Zhou Date: Mon, 27 Feb 2017 15:07:47 -0500 Subject: [PATCH] Formatting issue and variable names. --- MALSAR/functions/dirty/Logistic_Dirty.m | 67 +++++++++++++++++++------ examples/example_Dirty.m | 2 +- examples/example_Dirty_Classify.m | 10 ++-- 3 files changed, 58 insertions(+), 21 deletions(-) diff --git a/MALSAR/functions/dirty/Logistic_Dirty.m b/MALSAR/functions/dirty/Logistic_Dirty.m index f2c1a1d..ad8b549 100644 --- a/MALSAR/functions/dirty/Logistic_Dirty.m +++ b/MALSAR/functions/dirty/Logistic_Dirty.m @@ -1,11 +1,48 @@ -function [W, C, P, Q, L, F] = Logistic_Dirty(X, Y, rho1, rho2, opts) -%W: model parameters -%C: constant parameters -%P: shared structure -%Q: non_shared structure -%L: objective value for every iteration -%F: loss value for every iteration +%% FUNCTION Least_Dirty +% Dirty Multi-Task Learning with Least Squares Loss. +% +%% INPUT +% X: {n * d} * t - input matrix +% Y: {n * 1} * t - output matrix +% rho1: group sparsity regularization parameter +% rho2: elementwise sparsity regularization parameter +% +%% OUTPUT +% W: model: d * t +% C: constant parameters +% P: group sparsity structure (joint feature selection) +% Q: elementwise sparsity component +% funcVal: function (objective) value vector. +% lossVal: loss value for every iteration. +% +%% LICENSE +% This program is free software: you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation, either version 3 of the License, or +% (at your option) any later version. +% +% This program is distributed in the hope that it will be useful, +% but WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +% GNU General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with this program. If not, see . +% +% Copyright (C) 2011 - 2012 Jiayu Zhou, Pinghua Gong and Jieping Ye +% +% You are suggested to first read the Manual. +% For any problem, please contact with Jiayu Zhou via jiayu.zhou@asu.edu +% +% Last modified on June 3, 2012. +% +%% RELATED PAPERS +% +% [1] Jalali, A. and Ravikumar, P. and Sanghavi, S. and Ruan, C. A dirty +% model for multi-task learning, NIPS 2010. +% +function [W, C, P, Q, funcVal, lossVal] = Logistic_Dirty(X, Y, rho1, rho2, opts) if nargin <4 error('\n Inputs: X, Y, rho1, should be specified!\n'); @@ -20,8 +57,8 @@ task_num = length (X); dimension = size(X{1}, 1); -L = []; -F = []; +funcVal = []; +lossVal = []; %initialize a starting point C0_prep = zeros(1, task_num); @@ -141,27 +178,27 @@ Qz = Qzp; Cz = Czp; - L = cat(1, L, Fzp + rho1*L1infnorm(Pzp) + rho2*L11norm(Qzp)); - F = cat(1, F, Fzp); + funcVal = cat(1, funcVal, Fzp + rho1*L1infnorm(Pzp) + rho2*L11norm(Qzp)); + lossVal = cat(1, lossVal, Fzp); % test stop condition. switch(opts.tFlag) case 0 if iter>=2 - if (abs( L(end) - L(end-1) ) <= opts.tol) + if (abs( funcVal(end) - funcVal(end-1) ) <= opts.tol) break; end end case 1 if iter>=2 - if (abs( L(end) - L(end-1) ) <=... - opts.tol* L(end-1)) + if (abs( funcVal(end) - funcVal(end-1) ) <=... + opts.tol* funcVal(end-1)) break; end end case 2 - if ( L(end)<= opts.tol) + if ( funcVal(end)<= opts.tol) break; end case 3 diff --git a/examples/example_Dirty.m b/examples/example_Dirty.m index 71a5299..8f85cf7 100644 --- a/examples/example_Dirty.m +++ b/examples/example_Dirty.m @@ -62,7 +62,7 @@ rho_1 = 350;% rho1: group sparsity regularization parameter rho_2 = 10;% rho2: elementwise sparsity regularization parameter -[W funcVal P Q] = Least_Dirty(X, Y, rho_1, rho_2, opts); +[W, funcVal, P, Q] = Least_Dirty(X, Y, rho_1, rho_2, opts); diff --git a/examples/example_Dirty_Classify.m b/examples/example_Dirty_Classify.m index d771f85..f5fd06d 100644 --- a/examples/example_Dirty_Classify.m +++ b/examples/example_Dirty_Classify.m @@ -41,8 +41,8 @@ opts.maxIter = 60000; % lambda range -lambda1_range = [1:-0.01:0.01]; -lambda2_range = [2:-0.05:0.05]; +lambda1_range = 1:-0.01:0.01; +lambda2_range = 2:-0.05:0.05; %container for holding the results r_acc=cell(1,3); @@ -77,20 +77,20 @@ %inner cv fprintf('inner CV started\n') - [best_lambda1 best_lambda2 accuracy_mat] = CrossValidationDirty( Xtr, Ytr, ... + [best_lambda1, best_lambda2, accuracy_mat] = CrossValidationDirty( Xtr, Ytr, ... 'Logistic_Dirty', opts, lambda1_range,lambda2_range, in_cv_fold, ... 'eval_MTL_accuracy'); %train %warm start for one turn - [W C P Q L F] = Logistic_Dirty(Xtr, Ytr, best_lambda1, best_lambda2, opts); + [W, C, P, Q, funcVal, lossVal] = Logistic_Dirty(Xtr, Ytr, best_lambda1, best_lambda2, opts); opts2=opts; opts2.init=1; opts2.C0=C; opts2.P0=P; opts2.Q0=Q; opts2.tol = 10^-10; - [W2 C2 P2 Q2 L2 F2] = Logistic_Dirty(Xtr, Ytr, best_lambda1, best_lambda2, opts2); + [W2, C2, P2, Q2, funcVal2, lossVal2] = Logistic_Dirty(Xtr, Ytr, best_lambda1, best_lambda2, opts2); %test