[10] | 1 | %PE_EM Pseudo-Euclidean Linear Embedding |
---|
| 2 | % |
---|
| 3 | % W = PE_EM(D,ALF,P) |
---|
| 4 | % OR |
---|
| 5 | % W = PE_EM(W,ALF) |
---|
| 6 | % |
---|
| 7 | % INPUT |
---|
| 8 | % D NxN symmetric dissimilarity matrix (dataset) |
---|
| 9 | % W Trained linear embedding into a pseudo-Euclidean space |
---|
| 10 | % ALF Parameter determining the dimensionality and the mapping (optional, default: Inf) |
---|
| 11 | % (0,1) - Fraction of the total (absolute value) preserved variance |
---|
| 12 | % Inf - No dimensionality reduction, keeping all dimensions (it's VERY noisy) |
---|
| 13 | % 'p' - Projection into a Euclidean space based on positive eigenvalues only |
---|
| 14 | % 'PARp' - Projection into a Euclidean space based on the PAR fraction of |
---|
| 15 | % positive eigenvalues; e.g. ALF = '0.9p' |
---|
| 16 | % 'n' - Projection into a Euclidean space based on negative eigenvalues only |
---|
| 17 | % 'PARn' - Projection into a (negative) Euclidean space based on the PAR fraction |
---|
| 18 | % of negative eigenvalues; e.g. ALF = '0.7n' |
---|
| 19 | % 'P1pP2n'- Projection into a Euclidean space based on the P1 positive eigenvalues |
---|
| 20 | % and P2 negative eigenvalues; e.g. ALF = '0.7p0.1n', ALF = '7p2n' |
---|
| 21 | % 1 .. N - Number of dimensions in total |
---|
| 22 | % [P1 P2] - P1 dimensions or preserved fraction of variance in the positive subspace |
---|
| 23 | % and P2 dimensions or preserved fraction of variance in the negative |
---|
| 24 | % subspace; e.g. ALF = [5 10], ALF = [0.9 0.1] |
---|
| 25 | % P Integer between 0 and N specifying which object is mapped at the origin; |
---|
| 26 | % 0 stands for the mean; (optional, default: 0) |
---|
| 27 | % |
---|
| 28 | % OUTPUT |
---|
| 29 | % W Linear embedding into a pseudo-Euclidean space |
---|
| 30 | % |
---|
| 31 | % DEFAULT |
---|
| 32 | % P = 0 |
---|
| 33 | % ALF = INF |
---|
| 34 | % |
---|
| 35 | % DESCRIPTION |
---|
| 36 | % Linear mapping W onto an M-dimensional Pseudo-Euclidean _PE) subspace from a |
---|
| 37 | % symmetric, square dissimilarity matrix D such that the dissimilarities are preserved. |
---|
| 38 | % M is determined by ALF. E.g., the subspace is found such that at least a fraction |
---|
| 39 | % ALF of the total variance is preserved for ALF in (0,1). The resulting X is found |
---|
| 40 | % by D*W. The signature of the obtained PE space (numbers of positive and negative |
---|
| 41 | % directions) can be found by PE_SIG(W). The spectrum of the obtained space |
---|
| 42 | % can be found by PE_SPEC(W). |
---|
| 43 | % |
---|
| 44 | % A trained mapping can be reduced further by: W = PE_EM(W,ALF) |
---|
| 45 | % The signature of the obtained PE space can be found by PE_SIG(W) |
---|
| 46 | % The spectrum of |
---|
| 47 | % |
---|
| 48 | % SEE ALSO |
---|
| 49 | % MAPPINGS, DATASETS, AUGPSEM, PCA, PE_PCA, PE_SPEC, GETSIG, SETSIG |
---|
| 50 | % |
---|
| 51 | % LITERATURE |
---|
| 52 | % 1. L. Goldfarb, A unified approach to pattern recognition, Pattern Recognition, vol.17, |
---|
| 53 | % 575-582, 1984. |
---|
| 54 | % 2. E. Pekalska, P. Paclik, and R.P.W. Duin, A Generalized Kernel Approach to |
---|
| 55 | % Dissimilarity-based Classification, Journal of Machine Learning Research, |
---|
| 56 | % vol.2, no.2, 175-211, 2002. |
---|
| 57 | |
---|
| 58 | % Copyright: Elzbieta Pekalska, ela.pekalska@googlemail.com |
---|
| 59 | % Faculty EWI, Delft University of Technology and |
---|
| 60 | % School of Computer Science, University of Manchester |
---|
| 61 | |
---|
| 62 | % This routine is adapted from PSEM. |
---|
| 63 | % Old PSEM mappings W may be converted by W = PE_EM(W) |
---|
| 64 | |
---|
| 65 | function [W,sig,L,Q] = pe_em(d,alf,pzero,prec) |
---|
| 66 | |
---|
| 67 | % PREC is the precision parameter used for the automatic |
---|
| 68 | % selection (heuristic) of the number of dominant eigenvalues. |
---|
| 69 | % This happens when SELEIGS is called with the parameter 'CUT'. |
---|
| 70 | |
---|
| 71 | if nargin < 4, prec = []; end |
---|
| 72 | if nargin < 3, pzero = []; end |
---|
| 73 | if nargin < 2 alf = []; end |
---|
| 74 | |
---|
| 75 | if nargin < 1 | isempty(d), |
---|
| 76 | W = mapping(mfilename,{alf,pzero,prec}); |
---|
| 77 | W = setname(W,'PE embedding'); |
---|
| 78 | return |
---|
| 79 | end |
---|
| 80 | |
---|
| 81 | if isempty(prec), prec = 1e-4; end |
---|
| 82 | if isempty(pzero), pzero = 0; end |
---|
| 83 | if isempty(alf), alf = inf; end |
---|
| 84 | |
---|
| 85 | if (isdataset(d) | isa(d,'double')) |
---|
| 86 | if ismapping(alf) |
---|
| 87 | % APPLY THE MAPPING |
---|
| 88 | [m,n] = size(d); |
---|
| 89 | d = d.^2; |
---|
| 90 | |
---|
| 91 | Q = getdata(alf,'evec'); % Eigenvectors |
---|
| 92 | me = getdata(alf,'mdis'); % Vector of the average squared original dissimilarities |
---|
| 93 | p = getdata(alf,'mean'); % p=0 -> the mean of the embedded configuration lies at 0, |
---|
| 94 | % otherwise, it lies at pzero |
---|
| 95 | L = getdata(alf,'eval'); % Eigenvalues |
---|
| 96 | |
---|
| 97 | % Project new data depending on p |
---|
| 98 | % (whether the mean or other object lies at the origin) |
---|
| 99 | if p == 0, |
---|
| 100 | H = -repmat(1,n,n)/n; |
---|
| 101 | H(1:n+1:end) = H(1:n+1:end) + 1; % H = eye(n) - ones(n,n)/n |
---|
| 102 | W = -0.5 * (d - me(ones(m,1),:)) * H * Q * diag(sqrt(abs(L))./L); |
---|
| 103 | else |
---|
| 104 | W = 0.5 * (d(:,p) * ones(1,n) + me(ones(m,1),:) - d) * Q * diag(sqrt(abs(L))./L); |
---|
| 105 | end |
---|
| 106 | |
---|
| 107 | % Store signature in the USER field |
---|
| 108 | if isdataset(W), |
---|
| 109 | W = setname(W,['Projected ' updname(W.name)]); |
---|
| 110 | W = setsig(W,getdata(alf,'sig')); |
---|
| 111 | end |
---|
| 112 | return |
---|
| 113 | end |
---|
| 114 | end |
---|
| 115 | |
---|
| 116 | |
---|
| 117 | |
---|
| 118 | % REDUCE A TRAINED MAPPING |
---|
| 119 | if ismapping(d) |
---|
| 120 | data = getdata(d); |
---|
| 121 | if iscell(data) % check for old type of mapping |
---|
| 122 | ispsem(d); |
---|
| 123 | dat.evec = data{1}; |
---|
| 124 | dat.mdis = data{2}; |
---|
| 125 | dat.mean = data{3}; |
---|
| 126 | dat.eval = data{4}; |
---|
| 127 | dat.sig = data{5}; |
---|
| 128 | dat.prec = data{6}; |
---|
| 129 | d = setmapping_file(d,mfilename); |
---|
| 130 | d = setdata(d,dat); |
---|
| 131 | if nargin < 2 |
---|
| 132 | W = d; |
---|
| 133 | return % conversion only |
---|
| 134 | else |
---|
| 135 | data = dat; |
---|
| 136 | end |
---|
| 137 | end |
---|
| 138 | Q = data.evec; % Eigenvectors |
---|
| 139 | L = data.eval; % Eigenvalues |
---|
| 140 | m = size(Q,1); |
---|
| 141 | |
---|
| 142 | [ll,K] = sort(-abs(L)); |
---|
| 143 | L = L(K); |
---|
| 144 | Q = Q(:,K); |
---|
| 145 | [J,sig] = seleigs(L,alf,getdata(d,'prec')); |
---|
| 146 | data.evec = Q(:,J); % Eigenvectors |
---|
| 147 | data.eval = L(J); % Eigenvalues |
---|
| 148 | |
---|
| 149 | W = mapping(mfilename,'trained',data,[],m,length(J)); |
---|
| 150 | W = setname(W,'PE embedding'); |
---|
| 151 | return |
---|
| 152 | end |
---|
| 153 | |
---|
| 154 | |
---|
| 155 | |
---|
| 156 | % TRAIN THE MAPPING |
---|
| 157 | % Tolerance value used in comparisons |
---|
| 158 | if mean(+d(:)) < 1, |
---|
| 159 | tol = 1e-12; |
---|
| 160 | else |
---|
| 161 | tol = 1e-10; |
---|
| 162 | end |
---|
| 163 | |
---|
| 164 | [n,m] = size(d); |
---|
| 165 | if ~issym(d,tol), |
---|
| 166 | prwarning(1,'Matrix should be symmetric. It is made symmetric by averaging.') |
---|
| 167 | d = 0.5*(d+d'); |
---|
| 168 | end |
---|
| 169 | |
---|
| 170 | if pzero > n, |
---|
| 171 | error('Wrong third parameter.'); |
---|
| 172 | end |
---|
| 173 | |
---|
| 174 | d = (+d).^2; |
---|
| 175 | |
---|
| 176 | if pzero == 0, |
---|
| 177 | % Project the data such that the mean lies at the origin |
---|
| 178 | H = -repmat(1/n,n,n); |
---|
| 179 | H(1:n+1:end) = H(1:n+1:end) + 1; % H = eye(n) - ones(n,n)/n |
---|
| 180 | H = -0.5 * H * d * H; % H is now the matrix of inner products |
---|
| 181 | else |
---|
| 182 | % Project the data such that pzero's object lies at the origin |
---|
| 183 | H = 0.5 * (d(:,pzero) * ones(1,n) + ones(n,1) * d(:,pzero)' - d); |
---|
| 184 | end |
---|
| 185 | H = 0.5*(H+H'); % Make sure H is symmetric |
---|
| 186 | |
---|
| 187 | [Q,L] = preig(H); |
---|
| 188 | Q = real(Q); |
---|
| 189 | l = diag(real(L)); |
---|
| 190 | [lm,Z] = sort(-abs(l)); |
---|
| 191 | Q = Q(:,Z); |
---|
| 192 | l = l(Z); % Eigenvalues are sorted by decreasing absolute value |
---|
| 193 | |
---|
| 194 | [J,sig] = seleigs(l,alf,prec); % J is the index of the selected eigenvalues |
---|
| 195 | data.eval = l(J); % Eigenvalues |
---|
| 196 | data.evec = Q(:,J); % Eigenvectors |
---|
| 197 | data.mdis = mean(+d,2)'; |
---|
| 198 | data.mean = pzero; |
---|
| 199 | data.sig = sig; |
---|
| 200 | data.prec = prec; |
---|
| 201 | |
---|
| 202 | %A = Q * diag(sqrt(abs(L))); % Data in a pseudo-Euclidean space |
---|
| 203 | |
---|
| 204 | % Determine the mapping depending on pzero |
---|
| 205 | if pzero == 0, |
---|
| 206 | W = mapping(mfilename,'trained',data,[],m,sum(sig)); |
---|
| 207 | else |
---|
| 208 | data.mdis = +d(:,pzero)'; |
---|
| 209 | W = mapping(mfilename,'trained',data,[],m,sum(sig)); |
---|
| 210 | end |
---|
| 211 | W = setname(W,'PE embedding'); |
---|
| 212 | return |
---|