-
Notifications
You must be signed in to change notification settings - Fork 2
/
coupled_DL_recoupled_CCCA_mod.m
154 lines (133 loc) · 5.38 KB
/
coupled_DL_recoupled_CCCA_mod.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
% Main Function of Coupled Dictionary Learning
% Input:
% Alphap,Alphas: Initial sparse coefficient of two domains
% Xh ,Xl : Image Data Pairs of two domains
% Dh ,Dl : Initial Dictionaries
% Wh ,Wl : Initial Projection Matrix
% par : Parameters
%
%
% Output
% Alphap,Alphas: Output sparse coefficient of two domains
% Dh ,Dl : Output Coupled Dictionaries
% Uh ,Ul : Output Projection Matrix for Alpha
%
function [Alphah, Alphal, XH_t, XL_t, Dh, Dl, Wh, Wl, Uh, Ul, f] = coupled_DL_recoupled_CCCA_mod(Alphah, Alphal, XH_t, XL_t, Dh, Dl, Wh, Wl, par, label_h, label_l,knn,eta,option)
% coupled_DL_recoupled(Alphah, Alphal, XH_t, XL_t, Dh, Dl, Wh, Wl, par);
%% parameter setting
[dimX, numX] = size(XH_t);
dimY = size(Alphah, 1);
numD = size(Dh, 2);
rho = par.rho;
lambda1 = par.lambda1;
lambda2 = par.lambda2;
mu = par.mu;
sqrtmu = sqrt(mu);
nu = par.nu;
nIter = par.nIter;
t0 = par.t0;
epsilon = par.epsilon;
param.lambda = lambda1; % not more than 20 non-zeros coefficients
param.lambda2 = lambda2;
%param.mode = 1; % penalized formulation
param.approx=0;
param.K = par.K;
param.L = par.L;
f = 0;
%keyboard;
%% Initialize Us, Up as I
% initially Wl and Wh are the identity matrices
Ul = Wl;
Uh = Wh;
% Iteratively solve D A U
for t = 1 : 10
%% Updating Ws and Wp => Updating Us and Up
% Find the transformation matrices using CCA
set_kapa_cca;
% modifications
if option==1
[Wl,Wh,~] = cluster_cca_mod(full(Alphal),full(Alphah),label_l,label_h,kapa_cca,knn,eta);
elseif option==2
% GCDL 1
[Wl,Wh,~] = cluster_cca_mod2(full(Alphal),full(Alphah),label_l,label_h,kapa_cca,knn,eta,0);
elseif option==3
[Wl,Wh,~] = cluster_cca_mod2(full(Alphal),full(Alphah),label_l,label_h,kapa_cca,knn,eta,1);
elseif option==4
% GCDL 2
[Wl,Wh,~] = cluster_cca_mod3(full(Alphal),full(Alphah),label_l,label_h,kapa_cca,knn,eta,0);
elseif option==5
[Wl,Wh,~] = cluster_cca_mod3(full(Alphal),full(Alphah),label_l,label_h,kapa_cca,knn,eta,1);
end
Wl = real(Wl);
Wh = real(Wh);
Ul = Wl.';
Uh = Wh.';
sub_id = unique(label_h);
nSub = length(sub_id);
Alphal_full = full(Alphal);
Alphah_full = full(Alphah);
Alphal_inclass = zeros(size(Alphal_full,1),nSub);
Alphah_inclass = Alphal_inclass;
Xl_inclass = -0.5*ones(nSub,length(label_l));
Xh_inclass = -0.5*ones(nSub,length(label_h));
% Here I am normalizing the data
for i = 1:length(label_h)
normVal = norm(Uh*Alphah_full(:,i));
Alphah_full(:,i) = Alphah_full(:,i)/normVal;
end;
% Here I am normalizing the data
for i = 1:length(label_l)
normVal = norm(Ul*Alphal_full(:,i));
Alphal_full(:,i) = Alphal_full(:,i)/normVal;
end;
for subNo = 1:nSub
currSubId = sub_id(subNo);
indexvect = find(label_l == currSubId);
Alphal_inclass(:,subNo) = median(Alphal_full(:,indexvect(1:length(indexvect))),2);
Xl_inclass(subNo,indexvect) = 0.8;
indexvect = find(label_h == currSubId);
Alphah_inclass(:,subNo) = median(Alphah_full(:,indexvect(1:length(indexvect))),2);
Xh_inclass(subNo,indexvect) = 0.8;
end;
Ph = (Uh'*Ul*Alphal_inclass)';
Pl = (Ul'*Uh*Alphah_inclass)';
%% Updating Alphas and Alphap
% What Happens If I vary the parameters ?
mu = 0.04;
sqrtmu = sqrt(mu);
% Remember that Xl_inclass is basically Kx and Pl is basically Px
% Remember that Xh_inclass is basically Ky and Ph is basically Py
% The way that Kx and Px are formed are a little different
% instead of Kx being (N1XN2) we make it as Kx(unique labels (N1) X N2)
% So accordingly also Px is formed : for that Alphal_inclasss is used.
% Instead of using all the aplha's data we basically select the
% mean/median of that particular class using the supervised
% information.
% The code will thus run much faster
% From the paper it is given as Px = Ay.'*Ty.'*Tx (Now Tx and Ty are
% the Ul and Uh) and instead of using the whole Ay we utilize a subset
% of that only for faster computation
% Note using the whole matrix works fine but them again it is also time
% consuming
param.lambda = 0.01;
Alphal = mexLasso([XL_t; sqrtmu * Xl_inclass], [Dl; sqrtmu * Pl],param);
param.lambda = 0.01;
Alphah = mexLasso([XH_t; sqrtmu * Xh_inclass], [Dh; sqrtmu * Ph],param);
dictSize = par.K;
%% Updating Ds and Dp
for i=1:dictSize
ai = Alphal(i,:);
Y = XL_t-Dl*Alphal+Dl(:,i)*ai;
di = Y*ai';
di = di./(norm(di,2) + eps);
Dl(:,i) = di;
end
for i=1:dictSize
ai = Alphah(i,:);
Y = XH_t-Dh*Alphah+Dh(:,i)*ai;
di = Y*ai';
di = di./(norm(di,2) + eps);
Dh(:,i) = di;
end
end
return;