Skip to content

Commit

Permalink
Merge pull request #422 from sguada/threshold_layer
Browse files Browse the repository at this point in the history
Threshold layer to binarize features
Added GPU code and tested
  • Loading branch information
sguada committed May 27, 2014
2 parents ad5c15e + 5a4c9bc commit 3e0c42d
Show file tree
Hide file tree
Showing 5 changed files with 261 additions and 1 deletion.
31 changes: 31 additions & 0 deletions include/caffe/neuron_layers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,37 @@ class TanHLayer : public NeuronLayer<Dtype> {
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
};

/* ThresholdLayer
Outputs 1 if value in input is above threshold, 0 otherwise.
The defult threshold = 0, which means positive values would become 1 and
negative or 0, would become 0
y = 1 if x > threshold
y = 0 if x <= threshold
y' = don't differenciable
*/
template <typename Dtype>
class ThresholdLayer : public NeuronLayer<Dtype> {
public:
explicit ThresholdLayer(const LayerParameter& param)
: NeuronLayer<Dtype>(param) {}
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);

protected:
virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
NOT_IMPLEMENTED;
}

Dtype threshold_;
};

} // namespace caffe

#endif // CAFFE_NEURON_LAYERS_HPP_
32 changes: 32 additions & 0 deletions src/caffe/layers/threshold_layer.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
// Copyright 2014 BVLC and contributors.

#include <vector>

#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"


namespace caffe {

template <typename Dtype>
void ThresholdLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
NeuronLayer<Dtype>::SetUp(bottom, top);
threshold_ = this->layer_param_.threshold_param().threshold();
}

template <typename Dtype>
Dtype ThresholdLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = (*top)[0]->mutable_cpu_data();
const int count = bottom[0]->count();
for (int i = 0; i < count; ++i) {
top_data[i] = (bottom_data[i] > threshold_) ? Dtype(1) : Dtype(0);
}
return Dtype(0);
}

INSTANTIATE_CLASS(ThresholdLayer);

} // namespace caffe
39 changes: 39 additions & 0 deletions src/caffe/layers/threshold_layer.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
// Copyright 2014 BVLC and contributors.

#include <algorithm>
#include <vector>

#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"

using std::max;

namespace caffe {

template <typename Dtype>
__global__ void ThresholdForward(const int n, const Dtype threshold,
const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > threshold ? 1 : 0;
}
}

template <typename Dtype>
Dtype ThresholdLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
ThresholdForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, threshold_, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;

return Dtype(0);
}


INSTANTIATE_CLASS(ThresholdLayer);


} // namespace caffe
10 changes: 9 additions & 1 deletion src/caffe/proto/caffe.proto
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ message LayerParameter {
// line above the enum. Update the next available ID when you add a new
// LayerType.
//
// LayerType next available ID: 31 (last added: ARGMAX)
// LayerType next available ID: 32 (last added: THRESHOLD)
enum LayerType {
// "NONE" layer type is 0th enum element so that we don't cause confusion
// by defaulting to an existent LayerType (instead, should usually error if
Expand Down Expand Up @@ -163,6 +163,7 @@ message LayerParameter {
SPLIT = 22;
TANH = 23;
WINDOW_DATA = 24;
THRESHOLD = 31;
}
optional LayerType type = 5; // the layer type from the enum above

Expand All @@ -175,6 +176,7 @@ message LayerParameter {
repeated float weight_decay = 8;

// Parameters for particular layer types.
// Parameters next available ID: 26 (last added: ThresholdParameter)
optional ArgMaxParameter argmax_param = 23;
optional ConcatParameter concat_param = 9;
optional ConvolutionParameter convolution_param = 10;
Expand All @@ -191,6 +193,7 @@ message LayerParameter {
optional PoolingParameter pooling_param = 19;
optional PowerParameter power_param = 21;
optional WindowDataParameter window_data_param = 20;
optional ThresholdParameter threshold_param = 25;

// DEPRECATED: The layer parameters specified as a V0LayerParameter.
// This should never be used by any code except to upgrade to the new
Expand Down Expand Up @@ -261,6 +264,11 @@ message EltwiseParameter {
repeated float coeff = 2; // blob-wise coefficient for SUM operation
}

// Message that stores parameters used by ThresholdLayer
message ThresholdParameter {
optional float threshold = 1 [default = 0]; // Strictly Positive values
}

// Message that stores parameters used by HDF5DataLayer
message HDF5DataParameter {
// Specify the data source.
Expand Down
150 changes: 150 additions & 0 deletions src/caffe/test/test_threshold_layer.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
// Copyright 2014 BVLC and contributors.

#include <vector>

#include "cuda_runtime.h"
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/test/test_gradient_check_util.hpp"

#include "caffe/test/test_caffe_main.hpp"

namespace caffe {

extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;

template <typename Dtype>
class ThresholdLayerTest : public ::testing::Test {
protected:
ThresholdLayerTest()
: blob_bottom_(new Blob<Dtype>(2, 3, 6, 5)),
blob_top_(new Blob<Dtype>()) {
Caffe::set_random_seed(1701);
// fill the values
FillerParameter filler_param;
GaussianFiller<Dtype> filler(filler_param);
filler.Fill(this->blob_bottom_);
blob_bottom_vec_.push_back(blob_bottom_);
blob_top_vec_.push_back(blob_top_);
}
virtual ~ThresholdLayerTest() { delete blob_bottom_; delete blob_top_; }
Blob<Dtype>* const blob_bottom_;
Blob<Dtype>* const blob_top_;
vector<Blob<Dtype>*> blob_bottom_vec_;
vector<Blob<Dtype>*> blob_top_vec_;
};

typedef ::testing::Types<float, double> Dtypes;
TYPED_TEST_CASE(ThresholdLayerTest, Dtypes);


TYPED_TEST(ThresholdLayerTest, TestSetup) {
LayerParameter layer_param;
ThresholdLayer<TypeParam> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num());
EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels());
EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_->height());
EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_->width());
}

TYPED_TEST(ThresholdLayerTest, TestCPU) {
LayerParameter layer_param;
Caffe::set_mode(Caffe::CPU);
ThresholdLayer<TypeParam> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
// Now, check values
const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
const TypeParam* top_data = this->blob_top_->cpu_data();
const TypeParam threshold_ = layer_param.threshold_param().threshold();
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
EXPECT_GE(top_data[i], 0.);
EXPECT_LE(top_data[i], 1.);
if (top_data[i] == 0) {
EXPECT_LE(bottom_data[i], threshold_);
}
if (top_data[i] == 1) {
EXPECT_GT(bottom_data[i], threshold_);
}
}
}

TYPED_TEST(ThresholdLayerTest, TestCPU2) {
LayerParameter layer_param;
Caffe::set_mode(Caffe::CPU);
ThresholdParameter* threshold_param =
layer_param.mutable_threshold_param();
threshold_param->set_threshold(0.5);
ThresholdLayer<TypeParam> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
// Now, check values
const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
const TypeParam* top_data = this->blob_top_->cpu_data();
const TypeParam threshold_ = layer_param.threshold_param().threshold();
EXPECT_FLOAT_EQ(threshold_, 0.5);
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
EXPECT_GE(top_data[i], 0.);
EXPECT_LE(top_data[i], 1.);
if (top_data[i] == 0) {
EXPECT_LE(bottom_data[i], threshold_);
}
if (top_data[i] == 1) {
EXPECT_GT(bottom_data[i], threshold_);
}
}
}

TYPED_TEST(ThresholdLayerTest, TestGPU) {
LayerParameter layer_param;
Caffe::set_mode(Caffe::GPU);
ThresholdLayer<TypeParam> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
// Now, check values
const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
const TypeParam* top_data = this->blob_top_->cpu_data();
const TypeParam threshold_ = layer_param.threshold_param().threshold();
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
EXPECT_GE(top_data[i], 0.);
EXPECT_LE(top_data[i], 1.);
if (top_data[i] == 0) {
EXPECT_LE(bottom_data[i], threshold_);
}
if (top_data[i] == 1) {
EXPECT_GT(bottom_data[i], threshold_);
}
}
}

TYPED_TEST(ThresholdLayerTest, TestGPU2) {
LayerParameter layer_param;
Caffe::set_mode(Caffe::GPU);
ThresholdParameter* threshold_param =
layer_param.mutable_threshold_param();
threshold_param->set_threshold(0.5);
ThresholdLayer<TypeParam> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
// Now, check values
const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
const TypeParam* top_data = this->blob_top_->cpu_data();
const TypeParam threshold_ = layer_param.threshold_param().threshold();
EXPECT_FLOAT_EQ(threshold_, 0.5);
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
EXPECT_GE(top_data[i], 0.);
EXPECT_LE(top_data[i], 1.);
if (top_data[i] == 0) {
EXPECT_LE(bottom_data[i], threshold_);
}
if (top_data[i] == 1) {
EXPECT_GT(bottom_data[i], threshold_);
}
}
}

} // namespace caffe

0 comments on commit 3e0c42d

Please sign in to comment.