Commit b5c16f94c53f1249ca7d22b36a081fff4e86277e

Authored by Jordan Cheney
1 parent dff3e093

CaffeTransform is TimeVarying on GPU and not otherwise

openbr/plugins/classification/caffe.cpp
@@ -16,11 +16,46 @@ using namespace cv; @@ -16,11 +16,46 @@ using namespace cv;
16 namespace br 16 namespace br
17 { 17 {
18 18
  19 +// Net doesn't expose a default constructor which is expected by the default resource allocator.
  20 +// To get around that we make this custom stub class which has a default constructor that passes
  21 +// empty values to the Net constructor.
  22 +class CaffeNet : public Net<float>
  23 +{
  24 +public:
  25 + CaffeNet() : Net<float>("", caffe::TEST) {}
  26 + CaffeNet(const QString &model, caffe::Phase phase) : Net<float>(model.toStdString(), phase) {}
  27 +};
  28 +
  29 +class CaffeResourceMaker : public ResourceMaker<CaffeNet>
  30 +{
  31 + QString model;
  32 + QString weights;
  33 + int gpuDevice;
  34 +
  35 +public:
  36 + CaffeResourceMaker(const QString &model, const QString &weights, int gpuDevice) : model(model), weights(weights), gpuDevice(gpuDevice) {}
  37 +
  38 +private:
  39 + CaffeNet *make() const
  40 + {
  41 + if (gpuDevice >= 0) {
  42 + Caffe::SetDevice(gpuDevice);
  43 + Caffe::set_mode(Caffe::GPU);
  44 + } else {
  45 + Caffe::set_mode(Caffe::CPU);
  46 + }
  47 +
  48 + CaffeNet *net = new CaffeNet(model, caffe::TEST);
  49 + net->CopyTrainedLayersFrom(weights.toStdString());
  50 + return net;
  51 + }
  52 +};
  53 +
19 /*! 54 /*!
20 * \brief A transform that wraps the Caffe deep learning library. This transform expects the input to a given Caffe model to be a MemoryDataLayer. 55 * \brief A transform that wraps the Caffe deep learning library. This transform expects the input to a given Caffe model to be a MemoryDataLayer.
21 * The output of the Caffe network is treated as a feature vector and is stored in dst. Batch processing is possible. For a given batch size set in 56 * The output of the Caffe network is treated as a feature vector and is stored in dst. Batch processing is possible. For a given batch size set in
22 * the memory data layer, src is expected to have an equal number of mats. Dst will always have the same size (number of mats) as src and the ordering 57 * the memory data layer, src is expected to have an equal number of mats. Dst will always have the same size (number of mats) as src and the ordering
23 - * will be preserved, so dst[1] is the output of src[1] after it passes through the 58 + * will be preserved, so dst[1] is the output of src[1] after it passes through the neural net.
24 * \author Jordan Cheney \cite jcheney 59 * \author Jordan Cheney \cite jcheney
25 * \br_property QString model path to prototxt model file 60 * \br_property QString model path to prototxt model file
26 * \br_property QString weights path to caffemodel file 61 * \br_property QString weights path to caffemodel file
@@ -39,23 +74,22 @@ class CaffeFVTransform : public UntrainableTransform @@ -39,23 +74,22 @@ class CaffeFVTransform : public UntrainableTransform
39 BR_PROPERTY(QString, weights, "") 74 BR_PROPERTY(QString, weights, "")
40 BR_PROPERTY(int, gpuDevice, -1) 75 BR_PROPERTY(int, gpuDevice, -1)
41 76
42 - QSharedPointer<Net<float> > net; 77 + Resource<CaffeNet> caffeResource;
43 78
44 void init() 79 void init()
45 { 80 {
46 - if (gpuDevice >= 0) {  
47 - Caffe::SetDevice(gpuDevice);  
48 - Caffe::set_mode(Caffe::GPU);  
49 - } else {  
50 - Caffe::set_mode(Caffe::CPU);  
51 - } 81 + caffeResource.setResourceMaker(new CaffeResourceMaker(model, weights, gpuDevice));
  82 + }
52 83
53 - net.reset(new Net<float>(model.toStdString(), caffe::TEST));  
54 - net->CopyTrainedLayersFrom(weights.toStdString()); 84 + bool timeVarying() const
  85 + {
  86 + return gpuDevice < 0 ? false : true;
55 } 87 }
56 88
57 void project(const Template &src, Template &dst) const 89 void project(const Template &src, Template &dst) const
58 { 90 {
  91 + CaffeNet *net = caffeResource.acquire();
  92 +
59 MemoryDataLayer<float> *data_layer = static_cast<MemoryDataLayer<float> *>(net->layers()[0].get()); 93 MemoryDataLayer<float> *data_layer = static_cast<MemoryDataLayer<float> *>(net->layers()[0].get());
60 94
61 if (src.size() != data_layer->batch_size()) 95 if (src.size() != data_layer->batch_size())
@@ -70,6 +104,8 @@ class CaffeFVTransform : public UntrainableTransform @@ -70,6 +104,8 @@ class CaffeFVTransform : public UntrainableTransform
70 int dim_features = output->count() / data_layer->batch_size(); 104 int dim_features = output->count() / data_layer->batch_size();
71 for (int n = 0; n < data_layer->batch_size(); n++) 105 for (int n = 0; n < data_layer->batch_size(); n++)
72 dst += Mat(1, dim_features, CV_32FC1, output->mutable_cpu_data() + output->offset(n)); 106 dst += Mat(1, dim_features, CV_32FC1, output->mutable_cpu_data() + output->offset(n));
  107 +
  108 + caffeResource.release(net);
73 } 109 }
74 }; 110 };
75 111