Commit 94c3bcaa0cff287df4b227ad4997a2bc96c6efdc
1 parent
76290596
some algorithm development
Showing
5 changed files
with
60 additions
and
17 deletions
openbr/core/common.h
| @@ -119,14 +119,14 @@ T Max(const QList<T> &vals) | @@ -119,14 +119,14 @@ T Max(const QList<T> &vals) | ||
| 119 | * \brief Returns the mean and standard deviation of a vector of values. | 119 | * \brief Returns the mean and standard deviation of a vector of values. |
| 120 | */ | 120 | */ |
| 121 | template <template<class> class V, typename T> | 121 | template <template<class> class V, typename T> |
| 122 | -void Mean(const V<T> &vals, double *mean) | 122 | +double Mean(const V<T> &vals) |
| 123 | { | 123 | { |
| 124 | const int size = vals.size(); | 124 | const int size = vals.size(); |
| 125 | 125 | ||
| 126 | // Compute Mean | 126 | // Compute Mean |
| 127 | double sum = 0; | 127 | double sum = 0; |
| 128 | foreach (int val, vals) sum += val; | 128 | foreach (int val, vals) sum += val; |
| 129 | - *mean = (size == 0) ? 0 : sum / size; | 129 | + return (size == 0) ? 0 : sum / size; |
| 130 | } | 130 | } |
| 131 | 131 | ||
| 132 | /*! | 132 | /*! |
| @@ -137,7 +137,7 @@ void MeanStdDev(const V<T> &vals, double *mean, double *stddev) | @@ -137,7 +137,7 @@ void MeanStdDev(const V<T> &vals, double *mean, double *stddev) | ||
| 137 | { | 137 | { |
| 138 | const int size = vals.size(); | 138 | const int size = vals.size(); |
| 139 | 139 | ||
| 140 | - Mean(vals, mean); | 140 | + *mean = Mean(vals); |
| 141 | 141 | ||
| 142 | // Compute Standard Deviation | 142 | // Compute Standard Deviation |
| 143 | double variance = 0; | 143 | double variance = 0; |
openbr/plugins/filter.cpp
| @@ -235,7 +235,7 @@ class PowTransform : public UntrainableTransform | @@ -235,7 +235,7 @@ class PowTransform : public UntrainableTransform | ||
| 235 | 235 | ||
| 236 | void project(const Template &src, Template &dst) const | 236 | void project(const Template &src, Template &dst) const |
| 237 | { | 237 | { |
| 238 | - pow(src, power, dst); | 238 | + pow(preserveSign ? abs(src) : src.m(), power, dst); |
| 239 | if (preserveSign) subtract(Scalar::all(0), dst, dst, src.m() < 0); | 239 | if (preserveSign) subtract(Scalar::all(0), dst, dst, src.m() < 0); |
| 240 | } | 240 | } |
| 241 | }; | 241 | }; |
openbr/plugins/integral.cpp
| @@ -38,15 +38,19 @@ class IntegralSamplerTransform : public UntrainableTransform | @@ -38,15 +38,19 @@ class IntegralSamplerTransform : public UntrainableTransform | ||
| 38 | Q_PROPERTY(float scaleFactor READ get_scaleFactor WRITE set_scaleFactor RESET reset_scaleFactor STORED false) | 38 | Q_PROPERTY(float scaleFactor READ get_scaleFactor WRITE set_scaleFactor RESET reset_scaleFactor STORED false) |
| 39 | Q_PROPERTY(float stepFactor READ get_stepFactor WRITE set_stepFactor RESET reset_stepFactor STORED false) | 39 | Q_PROPERTY(float stepFactor READ get_stepFactor WRITE set_stepFactor RESET reset_stepFactor STORED false) |
| 40 | Q_PROPERTY(int minSize READ get_minSize WRITE set_minSize RESET reset_minSize STORED false) | 40 | Q_PROPERTY(int minSize READ get_minSize WRITE set_minSize RESET reset_minSize STORED false) |
| 41 | + Q_PROPERTY(bool secondOrder READ get_secondOrder WRITE set_secondOrder RESET reset_secondOrder STORED false) | ||
| 41 | BR_PROPERTY(int, scales, 6) | 42 | BR_PROPERTY(int, scales, 6) |
| 42 | BR_PROPERTY(float, scaleFactor, 2) | 43 | BR_PROPERTY(float, scaleFactor, 2) |
| 43 | BR_PROPERTY(float, stepFactor, 0.75) | 44 | BR_PROPERTY(float, stepFactor, 0.75) |
| 44 | - BR_PROPERTY(int, minSize, 6) | 45 | + BR_PROPERTY(int, minSize, 8) |
| 46 | + BR_PROPERTY(bool, secondOrder, false) | ||
| 45 | 47 | ||
| 46 | void project(const Template &src, Template &dst) const | 48 | void project(const Template &src, Template &dst) const |
| 47 | { | 49 | { |
| 48 | typedef Eigen::Map< const Eigen::Matrix<qint32,Eigen::Dynamic,1> > InputDescriptor; | 50 | typedef Eigen::Map< const Eigen::Matrix<qint32,Eigen::Dynamic,1> > InputDescriptor; |
| 51 | + typedef Eigen::Map< const Eigen::Matrix<float,Eigen::Dynamic,1> > SecondOrderInputDescriptor; | ||
| 49 | typedef Eigen::Map< Eigen::Matrix<float,Eigen::Dynamic,1> > OutputDescriptor; | 52 | typedef Eigen::Map< Eigen::Matrix<float,Eigen::Dynamic,1> > OutputDescriptor; |
| 53 | + | ||
| 50 | const Mat &m = src.m(); | 54 | const Mat &m = src.m(); |
| 51 | if (m.depth() != CV_32S) qFatal("Expected CV_32S matrix depth."); | 55 | if (m.depth() != CV_32S) qFatal("Expected CV_32S matrix depth."); |
| 52 | const int channels = m.channels(); | 56 | const int channels = m.channels(); |
| @@ -56,8 +60,10 @@ class IntegralSamplerTransform : public UntrainableTransform | @@ -56,8 +60,10 @@ class IntegralSamplerTransform : public UntrainableTransform | ||
| 56 | float idealSize = min(m.rows, m.cols)-1; | 60 | float idealSize = min(m.rows, m.cols)-1; |
| 57 | for (int scale=0; scale<scales; scale++) { | 61 | for (int scale=0; scale<scales; scale++) { |
| 58 | const int currentSize(idealSize); | 62 | const int currentSize(idealSize); |
| 59 | - descriptors += (1+(m.rows-currentSize-1)/int(idealSize*stepFactor)) * | ||
| 60 | - (1+(m.cols-currentSize-1)/int(idealSize*stepFactor)); | 63 | + const int numDown = 1+(m.rows-currentSize-1)/int(idealSize*stepFactor); |
| 64 | + const int numAcross = 1+(m.cols-currentSize-1)/int(idealSize*stepFactor); | ||
| 65 | + descriptors += numDown*numAcross; | ||
| 66 | + if (secondOrder) descriptors += numDown*(numAcross-1) + (numDown-1)*numAcross; | ||
| 61 | idealSize /= scaleFactor; | 67 | idealSize /= scaleFactor; |
| 62 | if (idealSize < minSize) break; | 68 | if (idealSize < minSize) break; |
| 63 | } | 69 | } |
| @@ -81,6 +87,26 @@ class IntegralSamplerTransform : public UntrainableTransform | @@ -81,6 +87,26 @@ class IntegralSamplerTransform : public UntrainableTransform | ||
| 81 | index++; | 87 | index++; |
| 82 | } | 88 | } |
| 83 | } | 89 | } |
| 90 | + if (secondOrder) { | ||
| 91 | + const int numDown = 1+(m.rows-currentSize-1)/currentStep; | ||
| 92 | + const int numAcross = 1+(m.cols-currentSize-1)/currentStep; | ||
| 93 | + const float *dataIn = n.ptr<float>(index - numDown*numAcross); | ||
| 94 | + for (int i=0; i<numDown; i++) { | ||
| 95 | + for (int j=0; j<numAcross; j++) { | ||
| 96 | + SecondOrderInputDescriptor a(dataIn + (i*numAcross+j)*channels, channels, 1); | ||
| 97 | + if (j < numAcross-1) { | ||
| 98 | + OutputDescriptor y(dataOut+(index*channels), channels, 1); | ||
| 99 | + y = a - SecondOrderInputDescriptor(dataIn + (i*numAcross+j+1)*channels, channels, 1); | ||
| 100 | + index++; | ||
| 101 | + } | ||
| 102 | + if (i < numDown-1) { | ||
| 103 | + OutputDescriptor y(dataOut+(index*channels), channels, 1); | ||
| 104 | + y = a - SecondOrderInputDescriptor(dataIn + ((i+1)*numAcross+j)*channels, channels, 1); | ||
| 105 | + index++; | ||
| 106 | + } | ||
| 107 | + } | ||
| 108 | + } | ||
| 109 | + } | ||
| 84 | idealSize /= scaleFactor; | 110 | idealSize /= scaleFactor; |
| 85 | if (idealSize < minSize) break; | 111 | if (idealSize < minSize) break; |
| 86 | } | 112 | } |
openbr/plugins/quality.cpp
| @@ -32,8 +32,7 @@ class ImpostorUniquenessMeasureTransform : public Transform | @@ -32,8 +32,7 @@ class ImpostorUniquenessMeasureTransform : public Transform | ||
| 32 | QList<float> scores = distance->compare(subset, probe); | 32 | QList<float> scores = distance->compare(subset, probe); |
| 33 | float min, max; | 33 | float min, max; |
| 34 | Common::MinMax(scores, &min, &max); | 34 | Common::MinMax(scores, &min, &max); |
| 35 | - double mean; | ||
| 36 | - Common::Mean(scores, &mean); | 35 | + double mean = Common::Mean(scores); |
| 37 | return (max-mean)/(max-min); | 36 | return (max-mean)/(max-min); |
| 38 | } | 37 | } |
| 39 | 38 |
openbr/plugins/quantize.cpp
| @@ -169,6 +169,19 @@ public: | @@ -169,6 +169,19 @@ public: | ||
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | private: | 171 | private: |
| 172 | + static void getScores(const QList<int> &indicies, const QList<int> &labels, const Mat &lut, QVector<float> &genuineScores, QVector<float> &impostorScores) | ||
| 173 | + { | ||
| 174 | + genuineScores.clear(); impostorScores.clear(); | ||
| 175 | + genuineScores.reserve(indicies.size()); | ||
| 176 | + impostorScores.reserve(indicies.size()*indicies.size()/2); | ||
| 177 | + for (int i=0; i<indicies.size(); i++) | ||
| 178 | + for (int j=i+1; j<indicies.size(); j++) { | ||
| 179 | + const float score = lut.at<float>(0, indicies[i]*256+indicies[j]); | ||
| 180 | + if (labels[i] == labels[j]) genuineScores.append(score); | ||
| 181 | + else impostorScores.append(score); | ||
| 182 | + } | ||
| 183 | + } | ||
| 184 | + | ||
| 172 | void _train(const Mat &data, const QList<int> &labels, Mat *lut, Mat *center) | 185 | void _train(const Mat &data, const QList<int> &labels, Mat *lut, Mat *center) |
| 173 | { | 186 | { |
| 174 | Mat clusterLabels; | 187 | Mat clusterLabels; |
| @@ -181,14 +194,17 @@ private: | @@ -181,14 +194,17 @@ private: | ||
| 181 | if (!bayesian) return; | 194 | if (!bayesian) return; |
| 182 | 195 | ||
| 183 | QList<int> indicies = OpenCVUtils::matrixToVector<int>(clusterLabels); | 196 | QList<int> indicies = OpenCVUtils::matrixToVector<int>(clusterLabels); |
| 184 | - QVector<float> genuineScores; genuineScores.reserve(data.rows); | ||
| 185 | - QVector<float> impostorScores; impostorScores.reserve(data.rows*data.rows/2); | ||
| 186 | - for (int i=0; i<indicies.size(); i++) | ||
| 187 | - for (int j=i+1; j<indicies.size(); j++) { | ||
| 188 | - const float score = lut->at<float>(0, indicies[i]*256+indicies[j]); | ||
| 189 | - if (labels[i] == labels[j]) genuineScores.append(score); | ||
| 190 | - else impostorScores.append(score); | ||
| 191 | - } | 197 | + QVector<float> genuineScores, impostorScores; |
| 198 | + | ||
| 199 | + // RBF Kernel | ||
| 200 | +// getScores(indicies, labels, *lut, genuineScores, impostorScores); | ||
| 201 | +// float sigma = 1.0 / Common::Mean(impostorScores); | ||
| 202 | +// for (int j=0; j<256; j++) | ||
| 203 | +// for (int k=0; k<256; k++) | ||
| 204 | +// lut->at<float>(0,j*256+k) = exp(-lut->at<float>(0,j*256+k)/(2*pow(sigma, 2.f))); | ||
| 205 | + | ||
| 206 | + // Bayesian PDF | ||
| 207 | + getScores(indicies, labels, *lut, genuineScores, impostorScores); | ||
| 192 | genuineScores = Common::Downsample(genuineScores, 256); | 208 | genuineScores = Common::Downsample(genuineScores, 256); |
| 193 | impostorScores = Common::Downsample(impostorScores, 256); | 209 | impostorScores = Common::Downsample(impostorScores, 256); |
| 194 | 210 | ||
| @@ -199,6 +215,8 @@ private: | @@ -199,6 +215,8 @@ private: | ||
| 199 | for (int k=0; k<256; k++) | 215 | for (int k=0; k<256; k++) |
| 200 | lut->at<float>(0,j*256+k) = log(Common::KernelDensityEstimation(genuineScores, lut->at<float>(0,j*256+k), hGenuine) / | 216 | lut->at<float>(0,j*256+k) = log(Common::KernelDensityEstimation(genuineScores, lut->at<float>(0,j*256+k), hGenuine) / |
| 201 | Common::KernelDensityEstimation(impostorScores, lut->at<float>(0,j*256+k), hImpostor)); | 217 | Common::KernelDensityEstimation(impostorScores, lut->at<float>(0,j*256+k), hImpostor)); |
| 218 | +// lut->at<float>(0,j*256+k) = std::max(0.0, log(Common::KernelDensityEstimation(genuineScores, lut->at<float>(0,j*256+k), hGenuine) / | ||
| 219 | +// Common::KernelDensityEstimation(impostorScores, lut->at<float>(0,j*256+k), hImpostor))); | ||
| 202 | } | 220 | } |
| 203 | 221 | ||
| 204 | void train(const TemplateList &src) | 222 | void train(const TemplateList &src) |