Commit a16a89e24d0372bae4757c7f186f7bf5eb87e798

Authored by Josh Klontz
1 parent b52a58b6

maintainance

openbr/core/common.h
... ... @@ -118,22 +118,22 @@ T Max(const QList<T> &vals)
118 118 /*!
119 119 * \brief Returns the mean and standard deviation of a vector of values.
120 120 */
121   -template <typename T>
122   -void Mean(const QList<T> &vals, double *mean)
  121 +template <template<class> class V, typename T>
  122 +void Mean(const V<T> &vals, double *mean)
123 123 {
124 124 const int size = vals.size();
125 125  
126 126 // Compute Mean
127 127 double sum = 0;
128   - for (int i=0; i<size; i++) sum += vals[i];
  128 + foreach (int val, vals) sum += val;
129 129 *mean = (size == 0) ? 0 : sum / size;
130 130 }
131 131  
132 132 /*!
133 133 * \brief Returns the mean and standard deviation of a vector of values.
134 134 */
135   -template <typename T>
136   -void MeanStdDev(const QList<T> &vals, double *mean, double *stddev)
  135 +template <template<class> class V, typename T>
  136 +void MeanStdDev(const V<T> &vals, double *mean, double *stddev)
137 137 {
138 138 const int size = vals.size();
139 139  
... ... @@ -141,8 +141,8 @@ void MeanStdDev(const QList&lt;T&gt; &amp;vals, double *mean, double *stddev)
141 141  
142 142 // Compute Standard Deviation
143 143 double variance = 0;
144   - for (int i=0; i<size; i++) {
145   - double delta = vals[i] - *mean;
  144 + foreach (T val, vals) {
  145 + const double delta = val - *mean;
146 146 variance += delta * delta;
147 147 }
148 148 *stddev = (size == 0) ? 0 : sqrt(variance/size);
... ... @@ -193,8 +193,8 @@ QList&lt;T&gt; CumSum(const QList&lt;T&gt; &amp;vals)
193 193 /*!
194 194 * \brief Calculate DKE bandwidth parameter 'h'
195 195 */
196   -template <typename T>
197   -double KernelDensityBandwidth(const QList<T> &vals)
  196 +template <template<class> class V, typename T>
  197 +double KernelDensityBandwidth(const V<T> &vals)
198 198 {
199 199 double mean, stddev;
200 200 MeanStdDev(vals, &mean, &stddev);
... ... @@ -204,8 +204,8 @@ double KernelDensityBandwidth(const QList&lt;T&gt; &amp;vals)
204 204 /*!
205 205 * \brief Compute kernel density at value x with bandwidth h.
206 206 */
207   -template <typename T>
208   -double KernelDensityEstimation(const QList<T> &vals, double x, double h)
  207 +template <template<class> class V, typename T>
  208 +double KernelDensityEstimation(const V<T> &vals, double x, double h)
209 209 {
210 210 double y = 0;
211 211 foreach (T val, vals)
... ... @@ -320,16 +320,15 @@ QList&lt;T&gt; RemoveOutliers(QList&lt;T&gt; vals)
320 320 /*!
321 321 * \brief Sorts and evenly downsamples a vector to size k.
322 322 */
323   -template <typename T>
324   -QList<T> Downsample(QList<T> vals, long k)
  323 +template <template<class> class V, typename T>
  324 +V<T> Downsample(V<T> vals, int k)
325 325 {
326   - // Use 'long' instead of 'int' so multiplication doesn't overflow
327   - qSort(vals);
328   - long size = (long)vals.size();
  326 + std::sort(vals.begin(), vals.end());
  327 + int size = vals.size();
329 328 if (size <= k) return vals;
330 329  
331   - QList<T> newVals; newVals.reserve(k);
332   - for (long i=0; i<k; i++) newVals.push_back(vals[i * (size-1) / (k-1)]);
  330 + V<T> newVals; newVals.reserve(k);
  331 + for (int i=0; i<k; i++) newVals.push_back(vals[long(i) * long(size-1) / long(k-1)]);
333 332 return newVals;
334 333 }
335 334  
... ...
openbr/plugins/algorithms.cpp
... ... @@ -42,7 +42,7 @@ class AlgorithmsInitializer : public Initializer
42 42 Globals->abbreviations.insert("OpenBR", "FaceRecognition");
43 43 Globals->abbreviations.insert("GenderEstimation", "GenderClassification");
44 44 Globals->abbreviations.insert("AgeEstimation", "AgeRegression");
45   - Globals->abbreviations.insert("FaceRecognitionHoG", "Open+Cvt(Gray)+Cascade(FrontalFace)+ASEFEyes+Affine(64,64,0.25,0.35)+Gradient+Bin(0,360,8,true)+Merge+Integral+IntegralSampler+RootNorm+ProductQuantization(2,true):ProductQuantization(true)");
  45 + Globals->abbreviations.insert("FaceRecognitionHoG", "Open+Cvt(Gray)+Cascade(FrontalFace)+ASEFEyes+Affine(64,64,0.25,0.35)+Gradient+Bin(0,360,8,true)+Merge+Integral+IntegralSampler+ProductQuantization(2,L1,true):ProductQuantization(true)");
46 46  
47 47 // Generic Image Processing
48 48 Globals->abbreviations.insert("SIFT", "Open+KeyPointDetector(SIFT)+KeyPointDescriptor(SIFT):KeyPointMatcher(BruteForce)");
... ...
openbr/plugins/distance.cpp
... ... @@ -37,6 +37,7 @@ class DistDistance : public Distance
37 37 Q_OBJECT
38 38 Q_ENUMS(Metric)
39 39 Q_PROPERTY(Metric metric READ get_metric WRITE set_metric RESET reset_metric STORED false)
  40 + Q_PROPERTY(bool negLogPlusOne READ get_negLogPlusOne WRITE set_negLogPlusOne RESET reset_negLogPlusOne STORED false)
40 41  
41 42 public:
42 43 /*!< */
... ... @@ -51,6 +52,7 @@ public:
51 52  
52 53 private:
53 54 BR_PROPERTY(Metric, metric, L2)
  55 + BR_PROPERTY(bool, negLogPlusOne, true)
54 56  
55 57 float compare(const Template &a, const Template &b) const
56 58 {
... ... @@ -61,8 +63,7 @@ private:
61 63 float result = std::numeric_limits<float>::max();
62 64 switch (metric) {
63 65 case Correlation:
64   - result = -compareHist(a, b, CV_COMP_CORREL);
65   - break;
  66 + return compareHist(a, b, CV_COMP_CORREL);
66 67 case ChiSquared:
67 68 result = compareHist(a, b, CV_COMP_CHISQR);
68 69 break;
... ... @@ -82,8 +83,7 @@ private:
82 83 result = norm(a, b, NORM_L2);
83 84 break;
84 85 case Cosine:
85   - result = cosine(a, b);
86   - break;
  86 + return cosine(a, b);
87 87 default:
88 88 qFatal("Invalid metric");
89 89 }
... ... @@ -91,7 +91,7 @@ private:
91 91 if (result != result)
92 92 qFatal("NaN result.");
93 93  
94   - return -log(result+1);
  94 + return negLogPlusOne ? -log(result+1) : result;
95 95 }
96 96  
97 97 static float cosine(const Mat &a, const Mat &b)
... ...
openbr/plugins/quantize.cpp
... ... @@ -18,6 +18,7 @@
18 18 #include <QtConcurrentRun>
19 19 #include <openbr/openbr_plugin.h>
20 20  
  21 +#include "openbr/core/common.h"
21 22 #include "openbr/core/opencvutils.h"
22 23  
23 24 using namespace cv;
... ... @@ -133,7 +134,7 @@ class ProductQuantizationDistance : public Distance
133 134 const uchar *bData = b[i].data;
134 135 const float *lut = (const float*)ProductQuantizationLUTs[i].data;
135 136 for (int j=0; j<elements; j++)
136   - distance += lut[i*256*256 + aData[j]*256+bData[j]];
  137 + distance += lut[j*256*256 + aData[j]*256+bData[j]];
137 138 }
138 139 if (!bayesian) distance = -log(distance+1);
139 140 return distance;
... ... @@ -151,8 +152,10 @@ class ProductQuantizationTransform : public Transform
151 152 {
152 153 Q_OBJECT
153 154 Q_PROPERTY(int n READ get_n WRITE set_n RESET reset_n STORED false)
  155 + Q_PROPERTY(br::Distance *distance READ get_distance WRITE set_distance RESET reset_distance STORED false)
154 156 Q_PROPERTY(bool bayesian READ get_bayesian WRITE set_bayesian RESET reset_bayesian STORED false)
155 157 BR_PROPERTY(int, n, 2)
  158 + BR_PROPERTY(br::Distance*, distance, Distance::make("L2", this))
156 159 BR_PROPERTY(bool, bayesian, false)
157 160  
158 161 int index;
... ... @@ -166,76 +169,82 @@ public:
166 169 }
167 170  
168 171 private:
169   - static double likelihoodRatio(const QPair<int,int> &totals, const QList<int> &targets, const QList<int> &queries)
  172 + void _train(const Mat &data, const QList<int> &labels, Mat *lut, Mat *center)
170 173 {
171   - int positives = 1, negatives = 1; // Equal priors
172   - foreach (int t, targets)
173   - foreach (int q, queries)
174   - if (t == q) positives++;
175   - else negatives++;
176   - return log((float(positives)/float(totals.first)) / (float(negatives)/float(totals.second)));
177   - }
  174 + Mat clusterLabels;
  175 + kmeans(data, 256, clusterLabels, TermCriteria(TermCriteria::MAX_ITER, 10, 0), 3, KMEANS_PP_CENTERS, *center);
178 176  
179   - void _train(const Mat &data, const QPair<int,int> &totals, Mat &lut, int i, const QList<int> &templateLabels)
180   - {
181   - Mat labels, center;
182   - kmeans(data.colRange(i*n,(i+1)*n), 256, labels, TermCriteria(TermCriteria::MAX_ITER, 10, 0), 3, KMEANS_PP_CENTERS, center);
183   - QList<int> clusterLabels = OpenCVUtils::matrixToVector<int>(labels);
  177 + for (int j=0; j<256; j++)
  178 + for (int k=0; k<256; k++)
  179 + lut->at<float>(0,j*256+k) = distance->compare(center->row(j), center->row(k));
  180 +
  181 + if (!bayesian) return;
  182 +
  183 + QList<int> indicies = OpenCVUtils::matrixToVector<int>(clusterLabels);
  184 + QVector<float> genuineScores; genuineScores.reserve(data.rows);
  185 + QVector<float> impostorScores; impostorScores.reserve(data.rows*data.rows/2);
  186 + for (int i=0; i<indicies.size(); i++)
  187 + for (int j=i+1; j<indicies.size(); j++) {
  188 + const float score = lut->at<float>(0, indicies[i]*256+indicies[j]);
  189 + if (labels[i] == labels[j]) genuineScores.append(score);
  190 + else impostorScores.append(score);
  191 + }
  192 + genuineScores = Common::Downsample(genuineScores, 256);
  193 + impostorScores = Common::Downsample(impostorScores, 256);
184 194  
185   - QHash< int, QList<int> > clusters; // QHash<clusterLabel, QList<templateLabel>>
186   - for (int j=0; j<clusterLabels.size(); j++)
187   - clusters[clusterLabels[j]].append(templateLabels[j]);
  195 + double hGenuine = Common::KernelDensityBandwidth(genuineScores);
  196 + double hImpostor = Common::KernelDensityBandwidth(impostorScores);
188 197  
189 198 for (int j=0; j<256; j++)
190 199 for (int k=0; k<256; k++)
191   - lut.at<float>(i,j*256+k) = bayesian ? likelihoodRatio(totals, clusters[j], clusters[k]) :
192   - norm(center.row(j), center.row(k), NORM_L2);
193   - centers[i] = center;
  200 + lut->at<float>(0,j*256+k) = log(Common::KernelDensityEstimation(genuineScores, lut->at<float>(0,j*256+k), hGenuine) /
  201 + Common::KernelDensityEstimation(impostorScores, lut->at<float>(0,j*256+k), hImpostor));
194 202 }
195 203  
196 204 void train(const TemplateList &src)
197 205 {
198 206 Mat data = OpenCVUtils::toMat(src.data());
199 207 if (data.cols % n != 0) qFatal("Expected dimensionality to be divisible by n.");
200   - const QList<int> templateLabels = src.labels<int>();
201   - int totalPositives = 0, totalNegatives = 0;
202   - for (int i=0; i<templateLabels.size(); i++)
203   - for (int j=0; j<templateLabels.size(); j++)
204   - if (templateLabels[i] == templateLabels[j]) totalPositives++;
205   - else totalNegatives++;
206   - QPair<int,int> totals(totalPositives, totalNegatives);
  208 + const QList<int> labels = src.labels<int>();
207 209  
208 210 Mat &lut = ProductQuantizationLUTs[index];
209 211 lut = Mat(data.cols/n, 256*256, CV_32FC1);
210 212  
211   - for (int i=0; i<lut.rows; i++)
  213 + QList<Mat> subdata, subluts;
  214 + for (int i=0; i<lut.rows; i++) {
212 215 centers.append(Mat());
  216 + subdata.append(data.colRange(i*n,(i+1)*n));
  217 + subluts.append(lut.row(i));
  218 + }
213 219  
214 220 QFutureSynchronizer<void> futures;
215 221 for (int i=0; i<lut.rows; i++) {
216   - if (Globals->parallelism) futures.addFuture(QtConcurrent::run(this, &ProductQuantizationTransform::_train, data, totals, lut, i, templateLabels));
217   - else _train (data, totals, lut, i, templateLabels);
  222 + if (Globals->parallelism) futures.addFuture(QtConcurrent::run(this, &ProductQuantizationTransform::_train, subdata[i], labels, &subluts[i], &centers[i]));
  223 + else _train (subdata[i], labels, &subluts[i], &centers[i]);
218 224 }
219 225 futures.waitForFinished();
220 226 }
221 227  
  228 + int getIndex(const Mat &m, const Mat &center) const
  229 + {
  230 + int bestIndex = 0;
  231 + double bestDistance = std::numeric_limits<double>::max();
  232 + for (int j=0; j<256; j++) {
  233 + double distance = norm(m, center.row(j), NORM_L2);
  234 + if (distance < bestDistance) {
  235 + bestDistance = distance;
  236 + bestIndex = j;
  237 + }
  238 + }
  239 + return bestIndex;
  240 + }
  241 +
222 242 void project(const Template &src, Template &dst) const
223 243 {
224 244 Mat m = src.m().reshape(1, 1);
225 245 dst = Mat(1, m.cols/n, CV_8UC1);
226   - for (int i=0; i<dst.m().cols; i++) {
227   - int bestIndex = 0;
228   - double bestDistance = std::numeric_limits<double>::max();
229   - Mat m_i = m.colRange(i*n, (i+1)*n);
230   - for (int j=0; j<256; j++) {
231   - double distance = norm(m_i, centers[index].row(j), NORM_L2);
232   - if (distance < bestDistance) {
233   - bestDistance = distance;
234   - bestIndex = j;
235   - }
236   - }
237   - dst.m().at<uchar>(0,i) = bestIndex;
238   - }
  246 + for (int i=0; i<dst.m().cols; i++)
  247 + dst.m().at<uchar>(0,i) = getIndex(m.colRange(i*n, (i+1)*n), centers[i]);
239 248 }
240 249  
241 250 void store(QDataStream &stream) const
... ...