1 | %TQDC Trade-off Quadratic Discriminant (Regularized Bayes Normal Classifier)
|
---|
2 | %
|
---|
3 | % W = TQDC(A,ALF,R,S,DIM)
|
---|
4 | %
|
---|
5 | % INPUT
|
---|
6 | % A NxK dataset (N points in a K-dimensional space)
|
---|
7 | % ALF Trade-off parameter, ALF in [0,1] (optional; default: ALF = 0.1)
|
---|
8 | % R,S Additional regularization parameters, 0 <= R,S <= 1
|
---|
9 | % (optional; default: no regularization, i.e. R,S = 0)
|
---|
10 | % DIM Dimension of subspace structure in covariance matrix (default: K)
|
---|
11 | %
|
---|
12 | % OUTPUT
|
---|
13 | % W Quadratic Bayes Normal Classifier mapping
|
---|
14 | %
|
---|
15 | % DESCRIPTION
|
---|
16 | % Computation of the quadratic classifier between the classes of the dataset
|
---|
17 | % A assuming normal densities. Each class covariance matrix Gi (i=1..C) is
|
---|
18 | % modeled as a convex combination between the original class covariance Gi and
|
---|
19 | % the diagonal marix Gdiag retrieved from the overall weighted (by priors)
|
---|
20 | % covariance matrix. So,
|
---|
21 | % Gi = (1-ALF)*Gi + ALF*Gdiag
|
---|
22 | % If ALF=0, then you will get QDC.
|
---|
23 | % If ALF=1, then you will get NMSC.
|
---|
24 | %
|
---|
25 | % R and S (0 <= R,S <= 1) are additional parameters used for regularizing the
|
---|
26 | % resulting covariance matrices by
|
---|
27 | % Gi = (1-R-S)*Gi + R*diag(diag(Gi)) + S*mean(diag(Gi))*eye(size(Gi,1))
|
---|
28 | % This covariance matrix is then decomposed as Gi = W*W' + sigma^2 * eye(K),
|
---|
29 | % where W is a KxM matrix containing the M leading principal components.
|
---|
30 | %
|
---|
31 | % The use of soft labels is supported. The classification A*W is computed by
|
---|
32 | % NORMAL_MAP.
|
---|
33 | %
|
---|
34 | % DEFAULT
|
---|
35 | % ALF = 0.1
|
---|
36 | % R = 0
|
---|
37 | % S = 0
|
---|
38 | % DIM = K (data dimension)
|
---|
39 | %
|
---|
40 | % EXAMPLES
|
---|
41 | % PREX_MCPLOT, PREX_PLOTC.
|
---|
42 | %
|
---|
43 | % REFERENCES
|
---|
44 | % 1. R.O. Duda, P.E. Hart, and D.G. Stork, Pattern classification, 2nd
|
---|
45 | % edition, John Wiley and Sons, New York, 2001.
|
---|
46 | % 2. A. Webb, Statistical Pattern Recognition, John Wiley & Sons,
|
---|
47 | % New York, 2002.
|
---|
48 | %
|
---|
49 | % SEE ALSO
|
---|
50 | % MAPPINGS, DATASETS, NMC, NMSC, LDC, UDC, QDC, QUADRC, NORMAL_MAP
|
---|
51 |
|
---|
52 | % Copyright: R.P.W. Duin, E. Pekalska, D.M.J. Tax and P. Paclik
|
---|
53 | % ela.pekalska@googlemail.com
|
---|
54 | % Faculty EWI, Delft University of Technology and
|
---|
55 | % School of Computer Science, University of Manchester
|
---|
56 |
|
---|
57 |
|
---|
58 | function w = tqdc(a,alf,r,s,dim)
|
---|
59 |
|
---|
60 | if (nargin < 5)
|
---|
61 | prwarning(4,'Subspace dimensionality DIM not provided, assuming K.');
|
---|
62 | dim = [];
|
---|
63 | end
|
---|
64 | if (nargin < 4)
|
---|
65 | prwarning(4,'Regularisation parameter S not given, assuming 0.');
|
---|
66 | s = 0;
|
---|
67 | end
|
---|
68 | if (nargin < 3)
|
---|
69 | prwarning(4,'Regularisation parameter R not given, assuming 0.');
|
---|
70 | r = 0;
|
---|
71 | end
|
---|
72 | if (nargin < 2)
|
---|
73 | prwarning(4,'Trade-off parameter ALF not given, assuming 0.1.');
|
---|
74 | alf = 0.1;
|
---|
75 | end
|
---|
76 |
|
---|
77 | % No input arguments: return an UNTRAINED mapping
|
---|
78 | if (nargin < 1) | (isempty(a))
|
---|
79 | w = mapping(mfilename,{alf,r,s,dim});
|
---|
80 | w = setname(w,'Trade-off Bayes-Normal-2');
|
---|
81 | return
|
---|
82 | end
|
---|
83 |
|
---|
84 |
|
---|
85 | % TRAIN the classifier
|
---|
86 | islabtype(a,'crisp','soft');
|
---|
87 | isvaldset(a,2,2); % at least 2 objects per class and 2 classes
|
---|
88 |
|
---|
89 | [m,k,c] = getsize(a);
|
---|
90 |
|
---|
91 | % If the subspace dimensionality is not given, set it to the data dimensionality.
|
---|
92 | if (isempty(dim)),
|
---|
93 | dim = k;
|
---|
94 | end
|
---|
95 |
|
---|
96 | if (dim < 1) | (dim > k)
|
---|
97 | error ('Number of dimensions DIM should lie in the range [1,K].');
|
---|
98 | end
|
---|
99 |
|
---|
100 | % Assert whether A has the right labtype.
|
---|
101 | islabtype(a,'crisp','soft');
|
---|
102 |
|
---|
103 | % Get mean vectors and class covariance matrices.
|
---|
104 | [U,G] = meancov(a);
|
---|
105 |
|
---|
106 | % Calculate means and priors.
|
---|
107 | pars.mean = +U;
|
---|
108 | pars.prior = getprior(a);
|
---|
109 |
|
---|
110 | % in the NMSC limit:
|
---|
111 | Gtot = zeros(c,k);
|
---|
112 | for j = 1:c
|
---|
113 | Gtot(j,:) = diag(G(:,:,j))';
|
---|
114 | end
|
---|
115 | Gtot = diag(pars.prior*Gtot);
|
---|
116 |
|
---|
117 | % Calculate class covariance matrices.
|
---|
118 |
|
---|
119 | pars.cov = zeros(k,k,c);
|
---|
120 | for j = 1:c
|
---|
121 | F = G(:,:,j);
|
---|
122 | F = (1-alf)*F + alf*Gtot;
|
---|
123 |
|
---|
124 | % Regularize, if requested.
|
---|
125 | if (s > 0) | (r > 0)
|
---|
126 | F = (1-r-s) * F + r * diag(diag(F)) +s*mean(diag(F))*eye(size(F,1));
|
---|
127 | end
|
---|
128 |
|
---|
129 | % If DIM < K, extract the first DIM principal components and estimate
|
---|
130 | % the noise outside the subspace.
|
---|
131 |
|
---|
132 | if (dim < k)
|
---|
133 | [eigvec,eigval] = preig(F);
|
---|
134 | eigval = diag(eigval);
|
---|
135 | [dummy,ind] = sort(-eigval);
|
---|
136 |
|
---|
137 | % Estimate sigma^2 as avg. eigenvalue outside subspace.
|
---|
138 | sigma2 = mean(eigval(ind(dim+1:end)));
|
---|
139 |
|
---|
140 | % Subspace basis: first DIM eigenvectors * sqrt(eigenvalues).
|
---|
141 | F = eigvec(:,ind(1:dim)) * diag(eigval(ind(1:dim))) * eigvec(:,ind(1:dim))' + sigma2 * eye(k);
|
---|
142 | end
|
---|
143 | pars.cov(:,:,j) = F;
|
---|
144 | end
|
---|
145 |
|
---|
146 | w = mapping('normal_map','trained',pars,getlab(U),k,c);
|
---|
147 | w = setname(w,'Trade-off Bayes-Normal-2');
|
---|
148 | w = setcost(w,a);
|
---|
149 |
|
---|
150 | return;
|
---|