Skip to content

Commit

Permalink
EmbedBackward with no loops -- use caffe_gpu_atomic_add instead
Browse files Browse the repository at this point in the history
  • Loading branch information
jeffdonahue committed Aug 8, 2015
1 parent 86aad5c commit 69b0e8c
Showing 1 changed file with 15 additions and 10 deletions.
25 changes: 15 additions & 10 deletions src/caffe/layers/embed_layer.cu
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include "caffe/common_layers.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/gpu_util.cuh"
#include "caffe/util/math_functions.hpp"

namespace caffe {
Expand All @@ -22,18 +23,21 @@ __global__ void EmbedForward(const int nthreads, const Dtype* bottom_data,
}
}

template <typename Dtype>
__global__ void EmbedBackward(const int nthreads, const Dtype* bottom_data,
const Dtype* top_diff, const int M, const int N, const int K,
Dtype* weight_diff);

template <typename Dtype>
__global__ void EmbedBackward(const int nthreads, const Dtype* bottom_data,
const Dtype* top_diff, const int M, const int N, const int K,
Dtype* weight_diff) {
CUDA_KERNEL_LOOP(weight_index, nthreads) {
const int index = weight_index / N;
const int output_index = weight_index % N;
for (int n = 0; n < M; ++n) {
if (static_cast<int>(bottom_data[n]) == index) {
weight_diff[weight_index] += top_diff[n * N + output_index];
}
}
CUDA_KERNEL_LOOP(top_index, nthreads) {
const int n = top_index / N;
const int d = top_index % N;
const int index = static_cast<int>(bottom_data[n]);
const int weight_index = index * N + d;
caffe_gpu_atomic_add(top_diff[top_index], weight_diff + weight_index);
}
}

Expand All @@ -59,13 +63,14 @@ void EmbedLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
CHECK(!propagate_down[0]) << "Can't backpropagate to EmbedLayer input.";
if (this->param_propagate_down_[0]) {
const int top_count = top[0]->count();
const int count = this->blobs_[0]->count();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
EmbedBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_diff, M_, N_, K_, weight_diff);
<<<CAFFE_GET_BLOCKS(top_count), CAFFE_CUDA_NUM_THREADS>>>(
top_count, bottom_data, top_diff, M_, N_, K_, weight_diff);
}
if (bias_term_ && this->param_propagate_down_[1]) {
const Dtype* top_diff = top[0]->gpu_diff();
Expand Down

0 comments on commit 69b0e8c

Please sign in to comment.