Commit a28029c8aaca3409a327de87b6a73ce2895331e1

Authored by Scott Klum
2 parents f2e360a1 a0c58d41

Merge branch 'master' of https://github.com/biometrics/openbr

CHANGELOG.md
... ... @@ -6,6 +6,7 @@
6 6 * NEC3 refactored
7 7 * Updated transform API to add support for time-varying transforms per issue (#23)
8 8 * Refactored File class to improve point and rect storage (#22)
  9 +* Added algorithm to show face detection results (#25)
9 10  
10 11 0.2.0 - 2/23/13
11 12 ===============
... ...
app/br/CMakeLists.txt
... ... @@ -8,3 +8,4 @@ install(TARGETS br RUNTIME DESTINATION bin)
8 8  
9 9 add_test(NAME br_initialize WORKING_DIRECTORY ${CMAKE_BINARY_DIR} COMMAND br)
10 10 add_test(NAME br_objects WORKING_DIRECTORY ${CMAKE_BINARY_DIR} COMMAND br -objects)
  11 +add_test(NAME br_draw_face_detection WORKING_DIRECTORY ${CMAKE_BINARY_DIR} COMMAND br -algorithm DrawFaceDetection -enroll ../data/family.jpg)
... ...
app/br/br.cpp
... ... @@ -31,10 +31,20 @@
31 31 * \endcode
32 32 *
33 33 * \section cli_examples Examples
  34 + * - \ref cli_show_face_detection
34 35 * - \ref cli_age_estimation
35 36 * - \ref cli_face_recognition
36 37 * - \ref cli_face_recognition_evaluation
37 38 * - \ref cli_gender_estimation
  39 + * - \ref cli_show_face_detection
  40 + */
  41 +
  42 +/*!
  43 + * \ingroup cli
  44 + * \page cli_show_face_detection Show Face Detection
  45 + * \code
  46 + * $ br -algorithm ShowFaceDetection -enrollAll -enroll ../data/family.jpg # Press 'Enter' to cycle through the results
  47 + * \endcode
38 48 */
39 49  
40 50 static void help()
... ...
data deleted
1   -Subproject commit 3501de8f90e2ec366ea418c7d5d2ef8beb612e73
scripts/trainAgeRegression-PCSO.sh 0 โ†’ 100755
  1 +#!/bin/bash
  2 +if [ ! -f trainAgeRegression-PCSO.sh ]; then
  3 + echo "Run this script from the scripts folder!"
  4 + exit
  5 +fi
  6 +
  7 +#rm -f ../models/features/FaceClassificationRegistration
  8 +#rm -f ../models/features/FaceClassificationExtraction
  9 +rm -f ../models/algorithms/AgeRegression
  10 +
  11 +br -algorithm AgeRegression -path ../data/PCSO/Images -train "../data/PCSO/PCSO.db[query='SELECT File,Age,PersonID FROM PCSO WHERE Age >= 15 AND AGE <= 75', subset=0:200]" ../share/openbr/models/algorithms/AgeRegression
... ...
scripts/trainFaceRecognition-PCSO.sh 0 โ†’ 100755
  1 +#!/bin/bash
  2 +if [ ! -f trainFaceRecognition-PCSO.sh ]; then
  3 + echo "Run this script from the scripts folder!"
  4 + exit
  5 +fi
  6 +
  7 +#rm -f ../models/features/FaceRecognitionRegistration
  8 +#rm -f ../models/features/FaceRecognitionExtraction
  9 +#rm -f ../models/features/FaceRecognitionEmbedding
  10 +#rm -f ../models/features/FaceRecognitionQuantization
  11 +rm -f ../models/algorithms/FaceRecognition
  12 +
  13 +br -algorithm FaceRecognition -path ../data/PCSO/Images -train "../data/PCSO/PCSO.db[query='SELECT File,'S'||PersonID,PersonID FROM PCSO', subset=0:5:6000]" ../share/openbr/models/algorithms/FaceRecognition
... ...
scripts/trainGenderClassification-PCSO.sh 0 โ†’ 100755
  1 +#!/bin/bash
  2 +if [ ! -f trainGenderClassification-PCSO.sh ]; then
  3 + echo "Run this script from the scripts folder!"
  4 + exit
  5 +fi
  6 +
  7 +#rm -f ../models/features/FaceClassificationRegistration
  8 +#rm -f ../models/features/FaceClassificationExtraction
  9 +rm -f ../models/algorithms/GenderClassification
  10 +
  11 +br -algorithm GenderClassification -path ../data/PCSO/Images -train "../data/PCSO/PCSO.db[query='SELECT File,Gender,PersonID FROM PCSO', subset=0:8000]" ../share/openbr/models/algorithms/GenderClassification
... ...
sdk/core/bee.cpp
... ... @@ -199,6 +199,7 @@ void writeMatrix(const Mat &amp;m, const QString &amp;matrix, const QString &amp;targetSigse
199 199  
200 200 char buff[4];
201 201 QFile file(matrix);
  202 + QtUtils::touchDir(file);
202 203 bool success = file.open(QFile::WriteOnly); if (!success) qFatal("Unable to open %s for writing.", qPrintable(matrix));
203 204 file.write("S2\n");
204 205 file.write(qPrintable(QFileInfo(targetSigset).fileName()));
... ...
sdk/openbr_plugin.cpp
... ... @@ -1108,7 +1108,7 @@ private:
1108 1108 QList<TemplateList> templatesList;
1109 1109 foreach (const Template &t, data) {
1110 1110 if ((templatesList.size() != t.size()) && !templatesList.isEmpty())
1111   - qWarning("Independent::train template %s of size %d differs from expected size %d.", qPrintable((QString)t.file), t.size(), templatesList.size());
  1111 + qWarning("Independent::train template %s of size %d differs from expected size %d.", qPrintable(t.file.name), t.size(), templatesList.size());
1112 1112 while (templatesList.size() < t.size())
1113 1113 templatesList.append(TemplateList());
1114 1114 for (int i=0; i<t.size(); i++)
... ... @@ -1134,11 +1134,13 @@ private:
1134 1134 void project(const Template &src, Template &dst) const
1135 1135 {
1136 1136 dst.file = src.file;
  1137 + QList<Mat> mats;
1137 1138 for (int i=0; i<src.size(); i++) {
1138   - Template m;
1139   - transforms[i%transforms.size()]->project(Template(src.file, src[i]), m);
1140   - dst.merge(m);
  1139 + transforms[i%transforms.size()]->project(Template(src.file, src[i]), dst);
  1140 + mats.append(dst);
  1141 + dst.clear();
1141 1142 }
  1143 + dst.append(mats);
1142 1144 }
1143 1145  
1144 1146 void store(QDataStream &stream) const
... ...
sdk/plugins/algorithms.cpp
... ... @@ -38,6 +38,8 @@ class AlgorithmsInitializer : public Initializer
38 38 Globals->abbreviations.insert("FaceQuality", "Open!Cascade(FrontalFace)+ASEFEyes+Affine(64,64,0.25,0.35)+ImageQuality+Cvt(Gray)+DFFS+Discard");
39 39 Globals->abbreviations.insert("MedianFace", "Open!Cascade(FrontalFace)+ASEFEyes+Affine(256,256,0.37,0.45)+Center(Median)");
40 40 Globals->abbreviations.insert("BlurredFaceDetection", "Open+LimitSize(1024)+SkinMask/(Cvt(Gray)+GradientMask)+And+Morph(Erode,16)+LargestConvexArea");
  41 + Globals->abbreviations.insert("DrawFaceDetection", "Open+Cascade(FrontalFace)!ASEFEyes+Draw");
  42 + Globals->abbreviations.insert("ShowFaceDetection", "DrawFaceDetection!Show");
41 43 Globals->abbreviations.insert("OpenBR", "FaceRecognition");
42 44 Globals->abbreviations.insert("GenderEstimation", "GenderClassification");
43 45 Globals->abbreviations.insert("AgeEstimation", "AgeRegression");
... ... @@ -48,6 +50,7 @@ class AlgorithmsInitializer : public Initializer
48 50 Globals->abbreviations.insert("SmallSIFT", "Open+LimitSize(512)+KeyPointDetector(SIFT)+KeyPointDescriptor(SIFT):KeyPointMatcher(BruteForce)");
49 51 Globals->abbreviations.insert("SmallSURF", "Open+LimitSize(512)+KeyPointDetector(SURF)+KeyPointDescriptor(SURF):KeyPointMatcher(BruteForce)");
50 52 Globals->abbreviations.insert("ColorHist", "Open+LimitSize(512)!EnsureChannels(3)+SplitChannels+Hist(256,0,8)+Cat+Normalize(L1):L2");
  53 + Globals->abbreviations.insert("IHH", "Open+SplitChannels/(Cvt(Gray)+Gradient+Bin(0,6.283,8,true))+Integral+Merge+IntegralSampler");
51 54  
52 55 // Hash
53 56 Globals->abbreviations.insert("FileName", "Name+Identity:Identical");
... ...
sdk/plugins/ct8.cpp
... ... @@ -236,7 +236,6 @@ struct CT8Context
236 236 FRsdk::SampleSet sampleSet;
237 237 sampleSet.push_back(sample);
238 238  
239   -
240 239 FRsdk::EnrolOpenCVFeedback * feedback_body = new FRsdk::EnrolOpenCVFeedback(m);
241 240 FRsdk::CountedPtr<FRsdk::Enrollment::FeedbackBody> feedback_ptr(feedback_body);
242 241  
... ... @@ -300,14 +299,14 @@ protected:
300 299 * \author Josh Klontz \cite jklontz
301 300 * \author Charles Otto \cite caotto
302 301 */
303   -struct CT8Detect : public UntrainableTransform
304   - , public CT8Context
  302 +class CT8DetectTransform : public UntrainableTransform
  303 + , public CT8Context
305 304 {
306   -public:
307 305 Q_OBJECT
308   -
309 306 Q_PROPERTY(float minRelEyeDistance READ get_minRelEyeDistance WRITE set_minRelEyeDistance RESET reset_minRelEyeDistance STORED false)
310 307 Q_PROPERTY(float maxRelEyeDistance READ get_maxRelEyeDistance WRITE set_maxRelEyeDistance RESET reset_maxRelEyeDistance STORED false)
  308 + BR_PROPERTY(float, minRelEyeDistance, 0.01f)
  309 + BR_PROPERTY(float, maxRelEyeDistance, 0.4f)
311 310  
312 311 // Perform face, then eye detection using the facevacs SDK
313 312 void project(const Template &src, Template &dst) const
... ... @@ -316,57 +315,39 @@ public:
316 315 // Build an FRsdk image from the input openCV mat
317 316 FRsdk::CountedPtr<FRsdk::ImageBody> i(new FRsdk::OpenCVImageBody(src));
318 317 FRsdk::Image img(i);
319   -
320 318 FRsdk::Face::LocationSet faceLocations = faceFinder->find(img, minRelEyeDistance, maxRelEyeDistance);
321   -
322   - // If the face finder doesn't find anything mark the output as a failure
323   - if (faceLocations.empty() ) {
324   - dst.file.setBool("FTE");
325   - return;
326   - }
327   -
328   - QList<QRectF> ROIs;
329   - QList<QPointF> landmarks;
330   - FRsdk::Face::LocationSet::const_iterator faceLocationSetIterator = faceLocations.begin();
331   - bool any_eyes = false;
332 319  
333 320 // Attempt to detect eyes in any face ROIs that were detected
  321 + QList<QRectF> rects;
  322 + QList<QPointF> points;
  323 + FRsdk::Face::LocationSet::const_iterator faceLocationSetIterator = faceLocations.begin();
334 324 while (faceLocationSetIterator != faceLocations.end()) {
335 325 FRsdk::Face::Location faceLocation = *faceLocationSetIterator; faceLocationSetIterator++;
336 326 FRsdk::Eyes::LocationSet currentEyesLocations = eyesFinder->find(img, faceLocation);
337 327  
338 328 if (currentEyesLocations.size() > 0) {
339   - any_eyes = true;
340   - ROIs.append(QRectF(faceLocation.pos.x(), faceLocation.pos.y(), faceLocation.width, faceLocation.width));
341   - landmarks.append(QPointF(currentEyesLocations.front().first.x(), currentEyesLocations.front().first.y()));
342   - landmarks.append(QPointF(currentEyesLocations.front().second.x(), currentEyesLocations.front().second.y()));
343   -
344   - dst += src;
  329 + rects.append(QRectF(faceLocation.pos.x(), faceLocation.pos.y(), faceLocation.width, faceLocation.width));
  330 + points.append(QPointF(currentEyesLocations.front().first.x(), currentEyesLocations.front().first.y()));
  331 + points.append(QPointF(currentEyesLocations.front().second.x(), currentEyesLocations.front().second.y()));
  332 + dst += src.m();
  333 + if (!Globals->enrollAll) break;
345 334 }
346   -
347   - if (any_eyes && !Globals->enrollAll && !dst.isEmpty()) break;
348 335 }
349 336  
350 337 // If eye detection failed, mark the output as a failure
351   - if (!any_eyes) {
352   - dst.file.setBool("FTE");
353   - return;
  338 + if (dst.isEmpty()) {
  339 + dst.file.set("FTE", true);
  340 + if (!Globals->enrollAll) dst += Mat();
354 341 }
355   -
356   - dst.file.setROIs(ROIs);
357   - dst.file.setLandmarks(landmarks);
  342 + dst.file.appendRects(rects);
  343 + dst.file.appendPoints(points);
358 344 } catch (std::exception &e) {
359 345 qFatal("CT8Enroll Exception: %s", e.what());
360 346 }
361   -
362   - if (!Globals->enrollAll && dst.isEmpty()) dst += Mat();
363   - }
364   -private:
365   - BR_PROPERTY(float, minRelEyeDistance, 0.01f)
366   - BR_PROPERTY(float, maxRelEyeDistance, 0.4f)
  347 + }
367 348 };
368 349  
369   -BR_REGISTER(Transform, CT8Detect)
  350 +BR_REGISTER(Transform, CT8DetectTransform)
370 351  
371 352 /*!
372 353 * \ingroup transforms
... ... @@ -374,10 +355,11 @@ BR_REGISTER(Transform, CT8Detect)
374 355 * \author Josh Klontz \cite jklontz
375 356 * \author Charles Otto \cite caotto
376 357 */
377   -struct CT8Enroll : public UntrainableTransform
378   - , public CT8Context
  358 +class CT8EnrollTransform : public UntrainableTransform
  359 + , public CT8Context
379 360 {
380 361 Q_OBJECT
  362 +
381 363 // enroll an image using the facevacs sdk. Generates a facevacs "fir" which
382 364 // is their face representation.
383 365 void project(const Template &src, Template &dst) const
... ... @@ -387,37 +369,31 @@ struct CT8Enroll : public UntrainableTransform
387 369 FRsdk::Image img(i);
388 370  
389 371 // If we already have eye locations, use them
390   - QList<QPointF> landmarks = src.file.landmarks();
391   - bool enroll_succeeded = false;
392   - if (landmarks.size() == 2) {
393   - enroll_succeeded = enroll(img, FRsdk::Eyes::Location(toPosition(landmarks[0]), toPosition(landmarks[1])), &(dst.m()));
  372 + QList<QPointF> points = src.file.points();
  373 + bool enrollSucceeded = false;
  374 + if (points.size() == 2) {
  375 + enrollSucceeded = enroll(img, FRsdk::Eyes::Location(toPosition(points[0]), toPosition(points[1])), &(dst.m()));
394 376  
395 377 // Transfer previously detectd eye and face locations to the output dst.
396   - dst.file.insert("CT8_First_Eye_X", landmarks[0].x());
397   - dst.file.insert("CT8_First_Eye_Y", landmarks[0].y());
398   - dst.file.insert("CT8_Second_Eye_X", landmarks[1].x());
399   - dst.file.insert("CT8_Second_Eye_Y", landmarks[1].y());
400   -
401   - QList<QRectF> ROIs = src.file.ROIs();
402   - if (ROIs.size() == 1) {
403   - dst.file.insert("CT8_Face_X", ROIs.first().x());
404   - dst.file.insert("CT8_Face_Y", ROIs.first().y());
405   - dst.file.insert("CT8_Face_Width", ROIs.first().width());
406   - dst.file.insert("CT8_Face_Height", ROIs.first().height());
407   - }
  378 + dst.file.set("First_Eye", points[0]);
  379 + dst.file.set("Second_Eye", points[1]);
  380 +
  381 + QList<QRectF> rects = src.file.rects();
  382 + if (rects.size() == 1)
  383 + dst.file.set("Face", rects.first());
408 384 } else {
409 385 // If we don't have eye locations already, calling enroll here
410 386 // will cause facevacs to perform detection using default
411 387 // parameters (and we will not receive the detected locations
412 388 // as output).
413   - enroll_succeeded = enroll(img, &(dst.m()));
  389 + enrollSucceeded = enroll(img, &(dst.m()));
414 390 }
  391 +
415 392 // If enrollment failed, mark this image as a failure. This will
416 393 // typically only happen if we aren't using pre-detected eye
417 394 // locations
418   - if (!enroll_succeeded)
419   - {
420   - dst.file.setBool("FTE");
  395 + if (!enrollSucceeded) {
  396 + dst.file.set("FTE", true);
421 397 dst.m() = Mat();
422 398 }
423 399 } catch (std::exception &e) {
... ... @@ -426,7 +402,7 @@ struct CT8Enroll : public UntrainableTransform
426 402 }
427 403 };
428 404  
429   -BR_REGISTER(Transform, CT8Enroll)
  405 +BR_REGISTER(Transform, CT8EnrollTransform)
430 406  
431 407 /*!
432 408 * \ingroup distances
... ...
sdk/plugins/draw.cpp
... ... @@ -32,14 +32,14 @@ namespace br
32 32 class DrawTransform : public UntrainableTransform
33 33 {
34 34 Q_OBJECT
35   - Q_PROPERTY(bool verbose READ get_verbose WRITE set_verbose RESET reset_verbose STORED false)
36 35 Q_PROPERTY(bool named READ get_named WRITE set_named RESET reset_named STORED false)
37   - Q_PROPERTY(bool unnamed READ get_unnamed WRITE set_unnamed RESET reset_unnamed STORED false)
38   - Q_PROPERTY(bool ROI READ get_ROI WRITE set_ROI RESET reset_ROI STORED false)
39   - BR_PROPERTY(bool, verbose, false)
  36 + Q_PROPERTY(bool verbose READ get_verbose WRITE set_verbose RESET reset_verbose STORED false)
  37 + Q_PROPERTY(bool points READ get_points WRITE set_points RESET reset_points STORED false)
  38 + Q_PROPERTY(bool rects READ get_rects WRITE set_rects RESET reset_rects STORED false)
40 39 BR_PROPERTY(bool, named, true)
41   - BR_PROPERTY(bool, unnamed, true)
42   - BR_PROPERTY(bool, ROI, true)
  40 + BR_PROPERTY(bool, verbose, false)
  41 + BR_PROPERTY(bool, points, true)
  42 + BR_PROPERTY(bool, rects, true)
43 43  
44 44 void project(const Template &src, Template &dst) const
45 45 {
... ... @@ -47,26 +47,18 @@ class DrawTransform : public UntrainableTransform
47 47 const Scalar verboseColor(255, 255, 0);
48 48 dst = src.m().clone();
49 49  
50   - QList<Point2f> landmarks = OpenCVUtils::toPoints(src.file.points());
51   -
52   - if (unnamed) {
53   - foreach (const Point2f &landmark, landmarks)
54   - circle(dst, landmark, 3, color, -1);
55   - }
56   - if (named) {
57   - QList<Point2f> namedLandmarks = OpenCVUtils::toPoints(src.file.namedPoints());
58   - foreach (const Point2f &landmark, namedLandmarks)
59   - circle(dst, landmark, 3, color);
  50 + if (points) {
  51 + const QList<Point2f> pointsList = OpenCVUtils::toPoints(named ? src.file.namedPoints() : src.file.points());
  52 + for (int i=0; i<pointsList.size(); i++) {
  53 + const Point2f &point = pointsList[i];
  54 + circle(dst, point, 3, color);
  55 + if (verbose) putText(dst, QString::number(i).toStdString(), point, FONT_HERSHEY_SIMPLEX, 0.5, verboseColor, 1);
  56 + }
60 57 }
61   - if (ROI) {
62   - QList<Rect> ROIs = OpenCVUtils::toRects(src.file.rects());
63   - foreach (const Rect ROI, ROIs)
64   - rectangle(dst, ROI, color);
  58 + if (rects) {
  59 + foreach (const Rect &rect, OpenCVUtils::toRects(named ? src.file.namedRects() : src.file.rects()))
  60 + rectangle(dst, rect, color);
65 61 }
66   -
67   - if (verbose)
68   - for (int i=0; i<landmarks.size(); i++)
69   - putText(dst, QString::number(i).toStdString(), landmarks[i], FONT_HERSHEY_SIMPLEX, 0.5, verboseColor, 1);
70 62 }
71 63 };
72 64  
... ...
sdk/plugins/eyes.cpp
... ... @@ -182,11 +182,12 @@ private:
182 182 float second_eye_x = (right_rect.x + maxLoc.x)*gray.cols/width+roi.x;
183 183 float second_eye_y = (right_rect.y + maxLoc.y)*gray.rows/height+roi.y;
184 184  
185   - dst = src;
  185 + dst.m() = src.m();
186 186 dst.file.appendPoint(QPointF(first_eye_x, first_eye_y));
187 187 dst.file.appendPoint(QPointF(second_eye_x, second_eye_y));
188   - dst.file.set("ASEF_Right_Eye", QPointF(first_eye_x, first_eye_y));
189   - dst.file.set("ASEF_Left_Eye", QPointF(second_eye_x, second_eye_y));
  188 + dst.file.set("First_Eye", QPointF(first_eye_x, first_eye_y));
  189 + dst.file.set("Second_Eye", QPointF(second_eye_x, second_eye_y));
  190 + dst.file.set("Face", QRect(roi.x, roi.y, roi.width, roi.height));
190 191 }
191 192 };
192 193  
... ...
sdk/plugins/gallery.cpp
... ... @@ -345,6 +345,10 @@ class csvGallery : public Gallery
345 345 if (!samples.contains(key))
346 346 samples.insert(key, file.value(key));
347 347  
  348 + // Don't create columns in the CSV for these special fields
  349 + samples.remove("Points");
  350 + samples.remove("Rects");
  351 +
348 352 QStringList lines;
349 353 lines.reserve(files.size()+1);
350 354  
... ...
sdk/plugins/hist.cpp
... ... @@ -164,91 +164,6 @@ class IntegralHistTransform : public UntrainableTransform
164 164  
165 165 BR_REGISTER(Transform, IntegralHistTransform)
166 166  
167   -/*!
168   - * \ingroup transforms
169   - * \brief Detects regions of low variance
170   - * \author Josh Klontz \cite jklontz
171   - */
172   -class VarianceChangeDetectorTransform : public UntrainableTransform
173   -{
174   - Q_OBJECT
175   - Q_PROPERTY(int bins READ get_bins WRITE set_bins RESET reset_bins STORED false)
176   - Q_PROPERTY(int radius READ get_radius WRITE set_radius RESET reset_radius STORED false)
177   - BR_PROPERTY(int, bins, 256)
178   - BR_PROPERTY(int, radius, 16)
179   -
180   - float stddev(const Mat &integral, int i, int j, int scale, int *buffer) const
181   - {
182   - const float count = scale*scale*radius*radius;
183   -
184   - float mean = 0;
185   - for (int k=0; k<bins; k++) {
186   - buffer[k] = integral.at<qint32>(i+scale,(j+scale)*bins+k)
187   - - integral.at<qint32>(i+scale, j *bins+k)
188   - - integral.at<qint32>(i ,(j+scale)*bins+k)
189   - + integral.at<qint32>(i , j *bins+k);
190   - mean += k*buffer[k];
191   - }
192   - mean /= count;
193   -
194   - float variance = 0;
195   - for (int k=0; k<bins; k++)
196   - variance += buffer[k] * (k-mean) * (k-mean);
197   -
198   - return sqrt(variance/count);
199   - }
200   -
201   - void project(const Template &src, Template &dst) const
202   - {
203   - const Mat &m = src.m();
204   - if (m.type() != CV_32SC1) qFatal("VarianceChangeDetector requires CV_32SC1 images from IntegralHist");
205   -
206   - int *buffer = new int[bins];
207   -
208   - float bestRatio = -std::numeric_limits<float>::max();
209   - QRectF bestRect;
210   -
211   - const int rows = m.rows;
212   - const int cols = m.cols/bins;
213   - const int maxSize = min(m.rows, m.cols/bins);
214   - int scale = 2;
215   - while (scale < maxSize) {
216   - const int step = std::max(1, scale/6);
217   - for (int i=0; i+scale < rows; i+=step) {
218   - for (int j=0; j+scale < cols; j+=step) {
219   - float internalStdDev = stddev(m, i, j, scale, buffer);
220   - float externalStdDev = std::numeric_limits<float>::max();
221   - externalStdDev = std::min(externalStdDev, ((i-2*scale >= 0) && (j-2*scale >= 0)) ? stddev(m, i-2*scale, j-2*scale, scale, buffer) : 0);
222   - externalStdDev = std::min(externalStdDev, (i-2*scale >= 0) ? stddev(m, i-2*scale, j , scale, buffer) : 0);
223   - externalStdDev = std::min(externalStdDev, ((i-2*scale >= 0) && (j+3*scale < cols)) ? stddev(m, i-2*scale, j+2*scale, scale, buffer) : 0);
224   - externalStdDev = std::min(externalStdDev, (j+3*scale < cols) ? stddev(m, i , j+2*scale, scale, buffer) : 0);
225   - externalStdDev = std::min(externalStdDev, ((i+3*scale < rows) && (j+3*scale < cols)) ? stddev(m, i+2*scale, j+2*scale, scale, buffer) : 0);
226   - externalStdDev = std::min(externalStdDev, (i+3*scale < rows) ? stddev(m, i+2*scale, j , scale, buffer) : 0);
227   - externalStdDev = std::min(externalStdDev, ((i+3*scale < rows) && (j-2*scale >= 0)) ? stddev(m, i+2*scale, j-2*scale, scale, buffer) : 0);
228   - externalStdDev = std::min(externalStdDev, (j-2*scale >= 0) ? stddev(m, i , j-2*scale, scale, buffer) : 0);
229   -
230   - float ratio;
231   - if (externalStdDev == 0) ratio = 0;
232   - else if (internalStdDev == 0) ratio = std::numeric_limits<float>::max() * (float(scale)/float(maxSize));
233   - else ratio = scale*scale * pow(externalStdDev,2) / pow(internalStdDev, 2);
234   -
235   - if (ratio > bestRatio) {
236   - bestRatio = ratio;
237   - bestRect = QRect(j*radius, i*radius, scale*radius, scale*radius);
238   - }
239   - }
240   - }
241   - scale = std::max(scale+1, int(scale*1.25));
242   - }
243   -
244   - delete[] buffer;
245   - dst.file.appendRect(bestRect);
246   - dst.file.setLabel(bestRatio);
247   - }
248   -};
249   -
250   -BR_REGISTER(Transform, VarianceChangeDetectorTransform)
251   -
252 167 } // namespace br
253 168  
254 169 #include "hist.moc"
... ...
sdk/plugins/integral.cpp
1 1 #include <opencv2/imgproc/imgproc.hpp>
  2 +#include <Eigen/Core>
2 3 #include <openbr_plugin.h>
3 4  
  5 +using namespace cv;
  6 +
4 7 namespace br
5 8 {
6 9  
... ... @@ -15,7 +18,7 @@ class IntegralTransform : public UntrainableTransform
15 18  
16 19 void project(const Template &src, Template &dst) const
17 20 {
18   - cv::integral(src, dst);
  21 + integral(src, dst);
19 22 }
20 23 };
21 24  
... ... @@ -23,6 +26,71 @@ BR_REGISTER(Transform, IntegralTransform)
23 26  
24 27 /*!
25 28 * \ingroup transforms
  29 + * \brief Sliding window object recognition from a multi-channel intergral image.
  30 + * \author Josh Klontz \cite jklontz
  31 + */
  32 +class IntegralSampler : public UntrainableTransform
  33 +{
  34 + Q_OBJECT
  35 + Q_PROPERTY(int scales READ get_scales WRITE set_scales RESET reset_scales STORED false)
  36 + Q_PROPERTY(float scaleFactor READ get_scaleFactor WRITE set_scaleFactor RESET reset_scaleFactor STORED false)
  37 + Q_PROPERTY(float stepFactor READ get_stepFactor WRITE set_stepFactor RESET reset_stepFactor STORED false)
  38 + Q_PROPERTY(int minSize READ get_minSize WRITE set_minSize RESET reset_minSize STORED false)
  39 + BR_PROPERTY(int, scales, 1)
  40 + BR_PROPERTY(float, scaleFactor, 1.25)
  41 + BR_PROPERTY(float, stepFactor, 0.25)
  42 + BR_PROPERTY(int, minSize, 8)
  43 +
  44 + void project(const Template &src, Template &dst) const
  45 + {
  46 + typedef Eigen::Map< const Eigen::Matrix<qint32,Eigen::Dynamic,1> > InputDescriptor;
  47 + typedef Eigen::Map< Eigen::Matrix<qint32,Eigen::Dynamic,1> > OutputDescriptor;
  48 + const Mat &m = src.m();
  49 + if (m.depth() != CV_32S) qFatal("Expected CV_32S matrix depth.");
  50 + const int channels = m.channels();
  51 + const int rowStep = channels * m.cols;
  52 +
  53 + int descriptors = 0;
  54 + int currentSize = min(m.rows, m.cols)-1;
  55 + for (int scale=0; scale<scales; scale++) {
  56 + descriptors += ceil((m.rows-currentSize)*stepFactor/currentSize) *
  57 + ceil((m.cols-currentSize)*stepFactor/currentSize);
  58 + currentSize /= scaleFactor;
  59 + if (currentSize < minSize)
  60 + break;
  61 + }
  62 + Mat n(descriptors, channels, CV_32SC1);
  63 +
  64 + const qint32 *dataIn = (qint32*)m.data;
  65 + qint32 *dataOut = (qint32*)n.data;
  66 + currentSize = min(m.rows, m.cols)-1;
  67 + int index = 0;
  68 + for (int scale=0; scale<scales; scale++) {
  69 + const int currentStep = currentSize * stepFactor;
  70 + for (int i=currentSize; i<m.rows; i+=currentStep) {
  71 + for (int j=currentSize; j<m.cols; j+=currentStep) {
  72 + InputDescriptor a(dataIn+((i-currentSize)*rowStep+(j-currentSize)*channels), channels, 1);
  73 + InputDescriptor b(dataIn+((i-currentSize)*rowStep+ j *channels), channels, 1);
  74 + InputDescriptor c(dataIn+( i *rowStep+(j-currentSize)*channels), channels, 1);
  75 + InputDescriptor d(dataIn+( i *rowStep+ j *channels), channels, 1);
  76 + OutputDescriptor y(dataOut+(index*channels), channels, 1);
  77 + y = d-b-c+a;
  78 + index++;
  79 + }
  80 + }
  81 + currentSize /= scaleFactor;
  82 + if (currentSize < minSize)
  83 + break;
  84 + }
  85 +
  86 + dst.m() = n;
  87 + }
  88 +};
  89 +
  90 +BR_REGISTER(Transform, IntegralSampler)
  91 +
  92 +/*!
  93 + * \ingroup transforms
26 94 * \brief Computes magnitude and/or angle of image.
27 95 * \author Josh Klontz \cite jklontz
28 96 */
... ... @@ -41,10 +109,10 @@ private:
41 109 void project(const Template &src, Template &dst) const
42 110 {
43 111 if (src.m().type() != CV_8UC1) qFatal("Requires CV_8UC1 input.");
44   - cv::Mat dx, dy, magnitude, angle;
45   - cv::Sobel(src, dx, CV_32F, 1, 0);
46   - cv::Sobel(src, dy, CV_32F, 0, 1);
47   - cv::cartToPolar(dx, dy, magnitude, angle, true);
  112 + Mat dx, dy, magnitude, angle;
  113 + Sobel(src, dx, CV_32F, 1, 0);
  114 + Sobel(src, dy, CV_32F, 0, 1);
  115 + cartToPolar(dx, dy, magnitude, angle, true);
48 116 if ((channel == Magnitude) || (channel == MagnitudeAndAngle))
49 117 dst.append(magnitude);
50 118 if ((channel == Angle) || (channel == MagnitudeAndAngle))
... ...
sdk/plugins/nec3.cpp
... ... @@ -136,13 +136,13 @@ class NEC3Enroll : public UntrainableTransform
136 136 NeoFacePro::CFaceFeature::FreeSerializeData(data);
137 137 }
138 138  
139   - if (src.file.getBool("ForceEnrollment") && !dst.isEmpty()) break;
  139 + if (src.file.get<bool>("ForceEnrollment", false) && !dst.isEmpty()) break;
140 140 }
141   - dst.file.appendLandmarks(landmarks);
  141 + dst.file.appendPoints(landmarks);
142 142  
143 143 contexts.release(context);
144 144  
145   - if (!src.file.getBool("enrollAll") && dst.isEmpty()) dst += cv::Mat();
  145 + if (!src.file.get<bool>("enrollAll", false) && dst.isEmpty()) dst += cv::Mat();
146 146 }
147 147 };
148 148  
... ...
sdk/plugins/quality.cpp
... ... @@ -79,7 +79,7 @@ struct KDE
79 79 double mean, stddev;
80 80 QList<float> bins;
81 81  
82   - KDE() : min(0), max(1) {}
  82 + KDE() : min(0), max(1), mean(0), stddev(1) {}
83 83 KDE(const QList<float> &scores)
84 84 {
85 85 Common::MinMax(scores, &min, &max);
... ...
sdk/plugins/regions.cpp
... ... @@ -111,6 +111,26 @@ BR_REGISTER(Transform, CatTransform)
111 111  
112 112 /*!
113 113 * \ingroup transforms
  114 + * \brief Wraps OpenCV merge
  115 + * \author Josh Klontz \cite jklontz
  116 + */
  117 +class MergeTransform : public UntrainableMetaTransform
  118 +{
  119 + Q_OBJECT
  120 +
  121 + void project(const Template &src, Template &dst) const
  122 + {
  123 + std::vector<Mat> mv;
  124 + foreach (const Mat &m, src)
  125 + mv.push_back(m);
  126 + merge(mv, dst);
  127 + }
  128 +};
  129 +
  130 +BR_REGISTER(Transform, MergeTransform)
  131 +
  132 +/*!
  133 + * \ingroup transforms
114 134 * \brief Duplicates the template data.
115 135 * \author Josh Klontz \cite jklontz
116 136 */
... ...
sdk/plugins/youtube.cpp
  1 +#include <QProcess>
1 2 #include <openbr_plugin.h>
2 3  
3 4 #include "core/common.h"
... ... @@ -16,16 +17,6 @@ class YouTubeFacesDBTransform : public UntrainableMetaTransform
16 17 Q_PROPERTY(QString algorithm READ get_algorithm WRITE set_algorithm RESET reset_algorithm STORED false)
17 18 BR_PROPERTY(QString, algorithm, "")
18 19  
19   - QSharedPointer<Transform> transform;
20   - QSharedPointer<Distance> distance;
21   -
22   - void init()
23   - {
24   - if (algorithm.isEmpty()) return;
25   - transform = Transform::fromAlgorithm(algorithm);
26   - distance = Distance::fromAlgorithm(algorithm);
27   - }
28   -
29 20 void project(const TemplateList &src, TemplateList &dst) const
30 21 {
31 22 Transform::project(src.mid(1) /* First template is the header in 'splits.txt' */, dst);
... ... @@ -33,22 +24,19 @@ class YouTubeFacesDBTransform : public UntrainableMetaTransform
33 24  
34 25 void project(const Template &src, Template &dst) const
35 26 {
  27 + static QMutex mutex;
36 28 const QStringList words = src.file.name.split(", ");
37   - dst.file.name = words[0] + "_" + words[1] + "_" + words[4] + ".mtx";
38   -
39   - TemplateList queryTemplates = TemplateList::fromGallery(File(words[2]).resolved());
40   - sort(queryTemplates);
41   - queryTemplates >> *transform;
42   -
43   - TemplateList targetTemplates = TemplateList::fromGallery(File(words[3]).resolved());
44   - sort(targetTemplates);
45   - targetTemplates >> *transform;
46   -
47   - QScopedPointer<MatrixOutput> memoryOutput(MatrixOutput::make(targetTemplates.files(), queryTemplates.files()));
48   - distance->compare(targetTemplates, queryTemplates, memoryOutput.data());
49   -
50   - dst.clear();
51   - dst.m() = memoryOutput.data()->data;
  29 + const QString matrix = "YTF-"+algorithm+"/"+words[0] + "_" + words[1] + "_" + words[4] + ".mtx";
  30 + const QStringList arguments = QStringList() << "-algorithm" << algorithm
  31 + << "-parallelism" << QString::number(Globals->parallelism)
  32 + << "-path" << Globals->path
  33 + << "-compare" << File(words[2]).resolved() << File(words[3]).resolved() << matrix;
  34 + mutex.lock();
  35 + int result = QProcess::execute(QCoreApplication::applicationFilePath(), arguments);
  36 + mutex.unlock();
  37 + if (result != 0)
  38 + qWarning("Process for computing %s returned %d.", qPrintable(matrix), result);
  39 + dst = Template();
52 40 }
53 41  
54 42 static void sort(TemplateList &templates)
... ...