Skip to content

Commit

Permalink
adding forgotten fixed layer files.
Browse files Browse the repository at this point in the history
  • Loading branch information
cheeyos committed Jan 10, 2015
1 parent a4d2f3c commit 3258f77
Show file tree
Hide file tree
Showing 4 changed files with 606 additions and 0 deletions.
79 changes: 79 additions & 0 deletions src/caffe/layers/dropout_fixed_layer.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
// TODO (sergeyk): effect should not be dependent on phase. wasted memcpy.

#include <vector>

#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"

namespace caffe {

template <typename Dtype>
void DropoutFixedLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
NeuronLayer<Dtype>::LayerSetUp(bottom, top);
threshold_ = this->layer_param_.dropout_param().dropout_ratio();
DCHECK(threshold_ > 0.);
DCHECK(threshold_ < 1.);
scale_ = 1. / (1. - threshold_);
uint_thres_ = static_cast<unsigned int>(UINT_MAX * threshold_);
}

template <typename Dtype>
void DropoutFixedLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
NeuronLayer<Dtype>::Reshape(bottom, top);
// Set up the cache for random number generation
rand_vec_.Reshape(bottom[0]->num(), bottom[0]->channels(),
bottom[0]->height(), bottom[0]->width());
}

template <typename Dtype>
void DropoutFixedLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = (*top)[0]->mutable_cpu_data();
unsigned int* mask = rand_vec_.mutable_cpu_data();
const int count = bottom[0]->count();
if (Caffe::phase() == Caffe::TRAIN) {
// Create random numbers
caffe_rng_bernoulli(count, 1. - threshold_, mask);
for (int i = 0; i < count; ++i) {
top_data[i] = bottom_data[i] * mask[i] * scale_;
}
} else {
caffe_copy(bottom[0]->count(), bottom_data, top_data);
caffe_scal(count, Dtype(1.0 - threshold_), top_data);
}
}

template <typename Dtype>
void DropoutFixedLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
vector<Blob<Dtype>*>* bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
if (Caffe::phase() == Caffe::TRAIN) {
const unsigned int* mask = rand_vec_.cpu_data();
const int count = (*bottom)[0]->count();
for (int i = 0; i < count; ++i) {
bottom_diff[i] = top_diff[i] * mask[i] * scale_;
}
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}


#ifdef CPU_ONLY
STUB_GPU(DropoutFixedLayer);
#endif

INSTANTIATE_CLASS(DropoutFixedLayer);


} // namespace caffe
78 changes: 78 additions & 0 deletions src/caffe/layers/dropout_fixed_layer.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
#include <algorithm>
#include <limits>
#include <vector>

#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"

namespace caffe {


template <typename Dtype>
__global__ void DropoutFixedForward(const int n, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}

template <typename Dtype>
void DropoutFixedLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (Caffe::phase() == Caffe::TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutFixedForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, bottom_data, top_data);
caffe_gpu_scal(count, Dtype(1.0 - threshold_), top_data);
}
}

template <typename Dtype>
__global__ void DropoutFixedBackward(const int n, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}

template <typename Dtype>
void DropoutFixedLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
vector<Blob<Dtype>*>* bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
if (Caffe::phase() == Caffe::TRAIN) {
const unsigned int* mask =
static_cast<const unsigned int*>(rand_vec_.gpu_data());
const int count = (*bottom)[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutFixedBackward<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}

INSTANTIATE_CLASS(DropoutFixedLayer);


} // namespace caffe
Loading

0 comments on commit 3258f77

Please sign in to comment.