1 | %DLPC LP-classifier on dissimilarity (proximity) data |
---|
2 | % |
---|
3 | % [W1,W2,W3] = DLPC(D,BIAS,TYPE,PARAM) |
---|
4 | % |
---|
5 | % INPUT |
---|
6 | % D Dissimilarity (proximity) dataset |
---|
7 | % BIAS YES or NO (optional; default: 1 (YES)) |
---|
8 | % TYPE Type of a classifier |
---|
9 | % 'SIMPLE' - the most simple formulation; no sparse solution; PARAM = []; |
---|
10 | % 'STANDARD' - minimization of the training misclassification errors; |
---|
11 | % no sparse solution; PARAM = []; |
---|
12 | % 'C-SPARSE' - sparse solution; a formulation similar to the LP_1 SVM; |
---|
13 | % PARAM is a tradeoff parameter, similar as in the traditional |
---|
14 | % SVM; (optional; DEFAULT: 1). |
---|
15 | % 'MU-SPARSE' - sparse solution; a formulation similar to the LP_1 SVM, |
---|
16 | % based on the paper of Graepel, Herbrich, Smola etc |
---|
17 | % 'Classification on proximity data with LP-machines'. |
---|
18 | % PARAM is a tradeoff parameter, usually PARAM = 0.05 or 0.1. |
---|
19 | % It is an upper bound on the misclassfied training objects. |
---|
20 | % So, for well separable problems, PARAM = 0.01 or PARAM = 0.02. |
---|
21 | % (optional; DEFAULT: the LOO 1-NN error * 1.3). |
---|
22 | % PARAM Parameter connected to the TYPE, as above |
---|
23 | % |
---|
24 | % OUTPUT |
---|
25 | % W1 LP-Classifier in the complete dissimilarity space |
---|
26 | % W2 LP-Classifier in a reduced dissimilarity space |
---|
27 | % W3 Object selection mapping; the indices of support objects are in +W3. |
---|
28 | % |
---|
29 | % DEFAULTS |
---|
30 | % BIAS = 1 |
---|
31 | % TYPE = 'STANDARD' |
---|
32 | % PARAM = [] |
---|
33 | % |
---|
34 | % DESCRIPTION |
---|
35 | % Classification problem on a N x M dissimilarity dataset D with LP-machines. |
---|
36 | % D should be described by both label and feature lists. If D is a square, |
---|
37 | % symmetric matrix, then the feature list should be the same as the label list. |
---|
38 | % |
---|
39 | % Assume a 2-class problem. Let DLPC select J support objects. Then: |
---|
40 | % W1 is an M x 2 classifier in the original dissimilarity space, W2 is an J x 2 |
---|
41 | % classifier in the dissimilarity space defined by the J support objects |
---|
42 | % and W3 is an M x R feature selection such that W1 = W3 * W2. |
---|
43 | % Note that the indices of the support objects can be retrieved by +W3. |
---|
44 | % |
---|
45 | % A linear classifier is built on D: |
---|
46 | % |
---|
47 | % f(D(x,*)) = diag(Y) * D(x,*) * W + W0, |
---|
48 | % |
---|
49 | % where Y are labels (+/- 1) and W are the weights. If BIAS is 1, then W0 is also |
---|
50 | % sought, otherwise it equals 0, hence the hyperplane is forced to go through the origin. |
---|
51 | % |
---|
52 | % For C-class problems, C classifiers are trained, one against all others. |
---|
53 | % In such a case, only W1 is returned and W3 in now NOT a feature selection, |
---|
54 | % but directly the indices of the support objects. |
---|
55 | |
---|
56 | |
---|
57 | % |
---|
58 | % Let [n,k] = size(D). Assume a two-class problem. |
---|
59 | % Any multi-class problem is converted one-against-all to two-class problems. |
---|
60 | % Y are the labels (converted +/-1) |
---|
61 | % D_Y = diag(Y_r) * D * diag(Y_c), |
---|
62 | % where Y_r are the labels for rows of D and |
---|
63 | % Y_c are the labels for columns. |
---|
64 | % alpha is the sought solution. |
---|
65 | % |
---|
66 | % Internal - the optimization schema (A,b,f) refer to the constraints in |
---|
67 | % the standard Matlab LINPROG procedure. |
---|
68 | % |
---|
69 | % 'simple': |
---|
70 | % min 0^T * alpha(1:k) (= 0) |
---|
71 | % s.t. [D_Y Y] * [alpha(1:k) alpha_0)] >= 1 |
---|
72 | % |
---|
73 | % A = [D_Y Y] is n x (k+1) |
---|
74 | % b = 1(n,1) |
---|
75 | % f = 0(k+1,1) |
---|
76 | % |
---|
77 | % |
---|
78 | % 'standard': |
---|
79 | % min p^T * beta(1:n), p = 1/n_i for the class_i |
---|
80 | % s.t. [D_Y Y] * [alpha(1:k) alpha_0] + beta(1:n) >= 1, |
---|
81 | % beta >= 0 |
---|
82 | % |
---|
83 | % A = [D_Y Y eye(n)] |
---|
84 | % b = 1(n,1) |
---|
85 | % f = [0(k+1,1) p]^T |
---|
86 | % |
---|
87 | % |
---|
88 | % 'c-sparse': |
---|
89 | % min sum(alpha(1:k)) + sum (alpha2(1:k)) + C*sum(ksi) |
---|
90 | % s.t. [D_Y Y] * [alpha(1:k)-alpha2(1:k) alpha_0] + ksi(1:n) >= 1, |
---|
91 | % ksi >= 0 |
---|
92 | % alpha, alpha2 >= 0 |
---|
93 | % |
---|
94 | % A = [D_Y -D_Y Y eye(n,n)] |
---|
95 | % b = 1(n,1) |
---|
96 | % f = [0(k+1,1)]^T |
---|
97 | % |
---|
98 | % |
---|
99 | % 'mu-sparse': |
---|
100 | % min sum(ksi)/n - mu*g |
---|
101 | % s.t. [D_Y -D_Y Y] * [alpha(1:k)-alpha2(1:k) alpha_0] + ksi(1:n) >= g, |
---|
102 | % sum(alpha) + sum(alpha2) = 1 |
---|
103 | % alpha, alpha2 >= 0 |
---|
104 | % ksi >= 0 |
---|
105 | % |
---|
106 | % A = [D_Y -D_Y Y eye(n,n)] |
---|
107 | % b = 1(n,1) |
---|
108 | % f = [0(2k+1,1)] |
---|
109 | % |
---|
110 | |
---|
111 | |
---|
112 | % Elzbieta Pekalska, Robert P.W. Duin, e.pekalska@tudelft.nl |
---|
113 | % Faculty of Electrical Engineering, Mathematics and Computer Science, |
---|
114 | % Delft University of Technology, The Netherlands. |
---|
115 | |
---|
116 | |
---|
117 | |
---|
118 | function [W1,W2,W3] = dlpc (d,is_w0,type,par,usematlab,prec) |
---|
119 | if nargin < 6, prec = 1e-7; end |
---|
120 | if nargin < 5, usematlab = 0; end |
---|
121 | if nargin < 3 | isempty(type), type = 'standard'; end |
---|
122 | if nargin < 4 |
---|
123 | par = []; |
---|
124 | end |
---|
125 | if nargin < 2 | isempty(is_w0), is_w0 = 1; end |
---|
126 | if nargin < 1 | isempty(d) |
---|
127 | W1 = mapping(mfilename,{is_w0,type,par,usematlab}); |
---|
128 | W1 = setname(W1,'DLPC'); |
---|
129 | W2 = []; |
---|
130 | W3 = []; |
---|
131 | return |
---|
132 | end |
---|
133 | |
---|
134 | |
---|
135 | if ~isdataset(d), |
---|
136 | error('The first parameter should be a dataset.') |
---|
137 | end |
---|
138 | if ~isnumeric(is_w0) | (is_w0 ~= 0 & is_w0 ~= 1), |
---|
139 | error('The second parameter should be 0 or 1.'); |
---|
140 | end |
---|
141 | |
---|
142 | if isempty(par), |
---|
143 | switch upper(type) |
---|
144 | case 'MU-SPARSE', |
---|
145 | par = max(0.01,1.3*testkd(d,1,'loo')); % upperbound error: 1.3 * loo 1-nn error |
---|
146 | case 'C-SPARSE', |
---|
147 | par = 1; |
---|
148 | case {'SIMPLE','STANDARD'}, |
---|
149 | par = []; |
---|
150 | otherwise |
---|
151 | error('Wrong type.') |
---|
152 | end |
---|
153 | end |
---|
154 | |
---|
155 | lab = getnlab(d); |
---|
156 | lablist = getlablist(d); |
---|
157 | featlab = getfeatlab(d); |
---|
158 | [m,k,C] = getsize(d); |
---|
159 | |
---|
160 | [nl, fe, fl] = renumlab(lablist,featlab); |
---|
161 | if max(fe) > size(lablist,1) |
---|
162 | error('Feature labels of the dataset do not match with class labels.') |
---|
163 | end |
---|
164 | |
---|
165 | |
---|
166 | |
---|
167 | z = (is_w0 > 0); % is bias used or not? |
---|
168 | |
---|
169 | % This is the status of the optimization procedure. |
---|
170 | % For GLPK, this is the exit code; see GLPKMEX for details. |
---|
171 | % For Matlab LINPROG, if negative then no solution is found. |
---|
172 | |
---|
173 | status = 1; |
---|
174 | |
---|
175 | |
---|
176 | % If more than 2 classes, do one against all others. |
---|
177 | if C > 2, |
---|
178 | % W1 = mclassc(d,mapping(mfilename,{is_w0,type,par,usematlab})); |
---|
179 | |
---|
180 | W1 = []; |
---|
181 | W2 = []; |
---|
182 | W3 = []; |
---|
183 | N = []; |
---|
184 | for i=1:C |
---|
185 | mlab = 2 - (lab == i); |
---|
186 | mfe = 2 - (fe == i); |
---|
187 | dd = setlabels(d,mlab); |
---|
188 | dd = setfeatlab(dd,mfe); |
---|
189 | if ~isempty(d.prior) |
---|
190 | dd = setprior(dd,[d.prior(i),1-d.prior(i)]'); |
---|
191 | end |
---|
192 | [v1,v2,v3]= dlpc(dd,is_w0,type,par,usematlab); |
---|
193 | j = +v3; |
---|
194 | if isempty(v1), |
---|
195 | W1 = []; |
---|
196 | W2 = []; |
---|
197 | W3 = []; |
---|
198 | prwarning(1,'No solution found.'); |
---|
199 | return; |
---|
200 | end |
---|
201 | W1 = [W1,setlabels(v1(:,1),lablist(i,:))]; |
---|
202 | W2 = [W2;setlabels(v2(:,1),lablist(i,:))]; |
---|
203 | W3(j) = ones(length(j),1); |
---|
204 | N = [N j]; |
---|
205 | end |
---|
206 | [N1,N2,N3] = unique(N); |
---|
207 | W3 = featsel(k,N1); |
---|
208 | %disp(size(W3,2)) |
---|
209 | W2 = featsel(length(N1),N3)*W2; |
---|
210 | return |
---|
211 | |
---|
212 | else |
---|
213 | |
---|
214 | Y1 = 3 - 2 * lab; % labels +/-1 |
---|
215 | Y = 3 - 2 * fe; % featlabels +/-1 |
---|
216 | |
---|
217 | alpha(1:k+1,1) = 0; |
---|
218 | |
---|
219 | |
---|
220 | switch type |
---|
221 | case 'simple', |
---|
222 | f = zeros(k+z,1); |
---|
223 | b = -ones(m,1); |
---|
224 | if is_w0, |
---|
225 | A = -[(Y1*Y').* +d Y1]; |
---|
226 | else |
---|
227 | A = -[(Y1*Y').* +d]; |
---|
228 | end |
---|
229 | |
---|
230 | % if (exist('glpkmex')>0) & (usematlab==0) |
---|
231 | % smin = 1; % solve minimum |
---|
232 | % ctype = char(ones(m,1)*abs('U')); % Sign of inequalities |
---|
233 | % vartype = char(ones(k+z,1)*abs('C')); % Continous variables |
---|
234 | % lpsolver = 2; % Interior Point Method |
---|
235 | % lpsolver = 1; % Revised Simlex Method |
---|
236 | % params.msglev = 0; % no outputs |
---|
237 | % [al,fval,status] = glpkmex(smin,f,A,b,ctype,[],[],vartype,params,lpsolver); |
---|
238 | % else |
---|
239 | % [al,fval,status] = linprog(f,A,b); |
---|
240 | % end |
---|
241 | |
---|
242 | [al,fval,status] = linprog(f,A,b); |
---|
243 | alpha(1:k+z) = al; |
---|
244 | |
---|
245 | |
---|
246 | |
---|
247 | case 'standard', |
---|
248 | L = ones(k,1); |
---|
249 | I = find(Y==1); |
---|
250 | if ~isempty(I) |
---|
251 | L(I) = L(I)/length(I); |
---|
252 | end |
---|
253 | J = find(Y==-1); |
---|
254 | if ~isempty(J) |
---|
255 | L(J) = L(J)/length(J); |
---|
256 | end |
---|
257 | |
---|
258 | f = [zeros(k+z,1); L]; |
---|
259 | lb = [-Inf .*ones(k+z,1); zeros(k,1)]; |
---|
260 | ub = Inf .* ones(2*k+z,1); |
---|
261 | b = -ones(m,1); |
---|
262 | if is_w0, |
---|
263 | A = -[(Y1*Y').* +d Y1 eye(m,k)]; |
---|
264 | else |
---|
265 | A = -[(Y1*Y').* +d eye(m,k)]; |
---|
266 | end |
---|
267 | |
---|
268 | % if (exist('glpkmex')>0) & (usematlab==0) |
---|
269 | % smin = 1; % solve minimum |
---|
270 | % ctype = char(ones(m,1)*abs('U')); % Sign of inequalities |
---|
271 | % vartype = char(ones(2*k+z,1)*abs('C')); % Continous variables |
---|
272 | %% lpsolver = 2; % Interior Point Method |
---|
273 | % lpsolver = 1; % Revised Simlex Method |
---|
274 | % params.msglev = 0; % no outputs |
---|
275 | % [sss,hostname] = unix('hostname'); |
---|
276 | % hostname = hostname(1:end-1); |
---|
277 | % if strcmp(hostname,'saturnus') | strcmp(hostname,'polaris') | strcmp(hostname,'neptunus') |
---|
278 | % [al,fval,status] = glpkmex_redhat(smin,f,A,b,ctype,lb,ub,vartype,params,lpsolver); |
---|
279 | % else |
---|
280 | % [al,fval,status] = glpkmex(smin,f,A,b,ctype,lb,ub,vartype,params,lpsolver); |
---|
281 | % end |
---|
282 | % else |
---|
283 | % [al,fval,ststus] = linprog(f,A,b,[],[],lb,ub); |
---|
284 | % end |
---|
285 | |
---|
286 | [al,fval,ststus] = linprog(f,A,b,[],[],lb,ub); |
---|
287 | alpha(1:k+z) = al(1:k+z); |
---|
288 | |
---|
289 | |
---|
290 | |
---|
291 | case 'c-sparse', |
---|
292 | L = ones(k,1); |
---|
293 | ub = Inf .* ones(3*k+z,1); |
---|
294 | lb = [zeros(2*k,1); -Inf.*ones(z,1); zeros(k,1)]; |
---|
295 | b = -ones(m,1); |
---|
296 | |
---|
297 | dd = (Y1*Y').* +d; |
---|
298 | if is_w0, |
---|
299 | f = [ones(2*k,1); 0; par*L]; |
---|
300 | A = -[dd -dd Y1 eye(m,k)]; |
---|
301 | else |
---|
302 | f = [ones(2*k,1); par*L]; |
---|
303 | A = -[dd -dd eye(m,k)]; |
---|
304 | end |
---|
305 | if (exist('glpkmex')>0) & (usematlab==0) |
---|
306 | smin = 1; % solve minimum |
---|
307 | ctype = char([ones(m,1)*abs('U')]); % Sign of inequalities |
---|
308 | vartype = char(ones(3*k+z,1)*abs('C')) ; % Continuous variables |
---|
309 | % lpsolver = 1; % Revised Simlex Method |
---|
310 | lpsolver = 2; % Interior Point Method |
---|
311 | params.msglev = 0; % no outputs |
---|
312 | params.itlim = 400; % iteration limit |
---|
313 | [sss,hostname] = unix('hostname'); |
---|
314 | hostname = hostname(1:end-1); |
---|
315 | if strcmp(hostname,'saturnus') | strcmp(hostname,'polaris') | strcmp(hostname,'neptunus') |
---|
316 | [al,fval,status] = glpkmex_redhat(smin,f,A,b,ctype,lb,ub,vartype,params,lpsolver); |
---|
317 | else |
---|
318 | [al,fval,status] = glpkmex(smin,f,A,b,ctype,lb,ub,vartype,params,lpsolver); |
---|
319 | end |
---|
320 | else |
---|
321 | [al,fval,status] = linprog (f,A,b,[],[],lb,ub); |
---|
322 | end |
---|
323 | alpha(1:k) = al(1:k) - al(k+1:2*k); |
---|
324 | if is_w0, |
---|
325 | alpha(k+1) = al(2*k+1); |
---|
326 | end |
---|
327 | |
---|
328 | |
---|
329 | |
---|
330 | case 'mu-sparse', |
---|
331 | L = ones(k,1)/k; |
---|
332 | f = [zeros(2*k+z,1); L; -par]; |
---|
333 | ub = Inf .* ones(3*k+1+z,1); |
---|
334 | lb = [zeros(2*k,1); -Inf.*ones(z,1); zeros(k+1,1)]; |
---|
335 | Aeq = [ones(2*k,1); zeros(k+1+z,1)]'; |
---|
336 | beq = 1; |
---|
337 | b = zeros(m,1); |
---|
338 | dd = (Y1*Y').* +d; |
---|
339 | |
---|
340 | if is_w0, |
---|
341 | A = -[dd -dd Y1 eye(m,k) -ones(m,1)]; |
---|
342 | else |
---|
343 | A = -[dd -dd eye(m,k) -ones(m,1)]; |
---|
344 | end |
---|
345 | |
---|
346 | if (exist('glpkmex')>0) & (usematlab==0) |
---|
347 | smin = 1; % solve minimum |
---|
348 | ctype = char([ones(m,1)*abs('U'); 'S']); % Sign of inequalities |
---|
349 | vartype = char(ones(3*k+1+z,1)*abs('C')); % Continous variables |
---|
350 | % lpsolver = 1; % Revised Simlex Method |
---|
351 | lpsolver = 2; % Interior Point Method |
---|
352 | params.msglev = 0; % no outputs, but doesn't seem to work |
---|
353 | params.itlim = 400; % iteration limit |
---|
354 | [sss,hostname] = unix('hostname'); |
---|
355 | hostname = hostname(1:end-1); |
---|
356 | if strcmp(hostname,'saturnus') | strcmp(hostname,'polaris') | strcmp(hostname,'neptunus') |
---|
357 | [al,fval,status] = glpkmex_redhat(smin,f,[A; Aeq],[b; beq],ctype,lb,ub,vartype,params,lpsolver); |
---|
358 | else |
---|
359 | [al,fval,status] = glpkmex(smin,f,[A; Aeq],[b; beq],ctype,lb,ub,vartype,params,lpsolver); |
---|
360 | end |
---|
361 | else |
---|
362 | [al,fval,status] = linprog(f,A,b,Aeq,beq,lb,ub); |
---|
363 | end |
---|
364 | |
---|
365 | alpha(1:k) = al(1:k) - al(k+1:2*k); |
---|
366 | if is_w0, |
---|
367 | alpha(k+1) = al(2*k+1); |
---|
368 | end |
---|
369 | |
---|
370 | otherwise |
---|
371 | error ('Wrong type.'); |
---|
372 | end |
---|
373 | end |
---|
374 | |
---|
375 | if (status <= 0) | (status > 181 | status == 150), |
---|
376 | prwarning(1,'Fisher classifier is trained.'); |
---|
377 | W1 = fisherc(d); |
---|
378 | W2 = W1; |
---|
379 | W3 = featsel(k,[1:k]); |
---|
380 | return; |
---|
381 | end |
---|
382 | |
---|
383 | |
---|
384 | % Choose support objects |
---|
385 | ss = sum(abs(alpha(1:k))); |
---|
386 | J = find(abs(alpha(1:k)) > ss*prec); |
---|
387 | if ~isempty(J), |
---|
388 | W3 = featsel(k,J); |
---|
389 | w = [Y; 1] .* alpha(1:k+1); |
---|
390 | W2 = affine(w(J),w(k+1),d(:,J),lablist,k,2); |
---|
391 | W2 = cnormc(W2,d(:,J)); |
---|
392 | W1 = W3*W2; |
---|
393 | W1 = setname(W1,'DLPC'); |
---|
394 | W2 = setname(W2,'DLPC'); |
---|
395 | else |
---|
396 | prwarning(1,'No support objects found. Fisher classifier is trained.'); |
---|
397 | W1 = fisherc(d); |
---|
398 | W2 = W1; |
---|
399 | W3 = featsel(k,[1:k]); |
---|
400 | return; |
---|
401 | end |
---|
402 | % disp(size(W3,2)) |
---|
403 | return; |
---|