Skip to content

Commit

Permalink
add CropLayer for cropping one blob to another using coordinates from…
Browse files Browse the repository at this point in the history
… paramters
  • Loading branch information
longjon authored and BlGene committed Jan 26, 2016
1 parent 97f4536 commit f680f23
Show file tree
Hide file tree
Showing 4 changed files with 196 additions and 1 deletion.
49 changes: 49 additions & 0 deletions include/caffe/layers/crop_layer.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
#ifndef CAFFE_CROP_LAYER_HPP_
#define CAFFE_CROP_LAYER_HPP_

#include <utility>
#include <vector>

#include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"

namespace caffe {

/**
* @brief Takes a Blob and crop it along either the width or height dimension,
* outputting a cropped Blob.
*
* TODO(dox): thorough documentation for Forward, Backward, and proto params.
*/

template <typename Dtype>
class CropLayer : public Layer<Dtype> {
public:
explicit CropLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);

virtual inline const char* type() const { return "Crop"; }
virtual inline int ExactNumBottomBlobs() const { return 2; }
virtual inline int ExactNumTopBlobs() const { return 1; }

protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);

int crop_h_, crop_w_;
};

} // namespace caffe

#endif // CAFFE_CROP_LAYER_HPP_
78 changes: 78 additions & 0 deletions src/caffe/layers/crop_layer.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
#include <algorithm>
#include <map>
#include <set>
#include <vector>

#include "caffe/layer.hpp"
#include "caffe/layers/crop_layer.hpp"
#include "caffe/net.hpp"


namespace caffe {

template <typename Dtype>
void CropLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const CropParameter& param = this->layer_param_.crop_param();
CHECK_EQ(bottom.size(), 2) << "Wrong number of bottom blobs.";
CHECK_EQ(bottom[0]->num_axes(), 4) << "Only works with 4D blobs.";
CHECK_EQ(bottom[1]->num_axes(), 4) << "Only works with 4D blobs.";
crop_h_ = param.offset_height();
crop_w_ = param.offset_width();
}

template <typename Dtype>
void CropLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// Check that the image we are cropping minus the margin is bigger than the
// destination image.
CHECK_GT(bottom[0]->height()-crop_h_, bottom[1]->height())
<< "invalid offset";
CHECK_GT(bottom[0]->width()-crop_w_, bottom[1]->width()) << "invalid offset";
top[0]->Reshape(bottom[0]->num(), bottom[0]->channels(), bottom[1]->height(),
bottom[1]->width());
}

template <typename Dtype>
void CropLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = top[0]->mutable_cpu_data();
for (int n = 0; n < top[0]->num(); ++n) {
for (int c = 0; c < top[0]->channels(); ++c) {
for (int h = 0; h < top[0]->height(); ++h) {
caffe_copy(top[0]->width(),
bottom_data + bottom[0]->offset(n, c, crop_h_ + h, crop_w_),
top_data + top[0]->offset(n, c, h));
}
}
}
}

template <typename Dtype>
void CropLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
if (propagate_down[0]) {
caffe_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff);
for (int n = 0; n < top[0]->num(); ++n) {
for (int c = 0; c < top[0]->channels(); ++c) {
for (int h = 0; h < top[0]->height(); ++h) {
caffe_copy(top[0]->width(),
top_diff + top[0]->offset(n, c, h),
bottom_diff + bottom[0]->offset(n, c, crop_h_ + h, crop_w_));
}
}
}
}
}

#ifdef CPU_ONLY
STUB_GPU(CropLayer);
#endif

INSTANTIATE_CLASS(CropLayer);
REGISTER_LAYER_CLASS(Crop);

} // namespace caffe
60 changes: 60 additions & 0 deletions src/caffe/layers/crop_layer.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
#include <vector>

#include "caffe/layers/crop_layer.hpp"

namespace caffe {

// Copy (one line per thread) from one array to another, with arbitrary
// strides in the last two dimensions.
template <typename Dtype>
__global__ void copy_kernel(const int n, const int height, const int width,
const int src_outer_stride, const int src_inner_stride,
const int dest_outer_stride, const int dest_inner_stride,
const Dtype* src, Dtype* dest) {
CUDA_KERNEL_LOOP(index, n) {
int src_start = index / height * src_outer_stride
+ index % height * src_inner_stride;
int dest_start = index / height * dest_outer_stride
+ index % height * dest_inner_stride;
for (int i = 0; i < width; ++i) {
dest[dest_start + i] = src[src_start + i];
}
}
}

template <typename Dtype>
void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int lines = top[0]->count() / top[0]->width();

// NOLINT_NEXT_LINE(whitespace/operators)
copy_kernel<<<CAFFE_GET_BLOCKS(lines), CAFFE_CUDA_NUM_THREADS>>>(
lines, top[0]->height(), top[0]->width(),
bottom[0]->height() * bottom[0]->width(), bottom[0]->width(),
top[0]->height() * top[0]->width(), top[0]->width(),
bottom_data + bottom[0]->offset(0, 0, crop_h_, crop_w_), top_data);
}

template <typename Dtype>
void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int lines = top[0]->count() / top[0]->width();

if (propagate_down[0]) {
caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
copy_kernel<<<CAFFE_GET_BLOCKS(lines), CAFFE_CUDA_NUM_THREADS>>>(
lines, top[0]->height(), top[0]->width(),
top[0]->height() * top[0]->width(), top[0]->width(),
bottom[0]->height() * bottom[0]->width(), bottom[0]->width(),
top_diff, bottom_diff + bottom[0]->offset(0, 0, crop_h_, crop_w_));
}
}

INSTANTIATE_LAYER_GPU_FUNCS(CropLayer);

} // namespace caffe
10 changes: 9 additions & 1 deletion src/caffe/proto/caffe.proto
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ message ParamSpec {
// NOTE
// Update the next available ID when you add a new LayerParameter field.
//
// LayerParameter next available layer-specific ID: 141 (last added: elu_param)
// LayerParameter next available layer-specific ID: 142 (last added: crop_param)
message LayerParameter {
optional string name = 1; // the layer name
optional string type = 2; // the layer type
Expand Down Expand Up @@ -359,6 +359,7 @@ message LayerParameter {
optional ConcatParameter concat_param = 104;
optional ContrastiveLossParameter contrastive_loss_param = 105;
optional ConvolutionParameter convolution_param = 106;
optional CropParameter crop_param = 141;
optional DataParameter data_param = 107;
optional DropoutParameter dropout_param = 108;
optional DummyDataParameter dummy_data_param = 109;
Expand Down Expand Up @@ -563,6 +564,13 @@ message ConvolutionParameter {
optional bool force_nd_im2col = 17 [default = false];
}

message CropParameter {
// Assumes standard dimensions: ( N,C,H,W )
// This could possibly be extended to use "optional BlobShape offsets"
optional uint32 offset_height = 1[default = 0];
optional uint32 offset_width = 2[default = 0];
}

message DataParameter {
enum DB {
LEVELDB = 0;
Expand Down

0 comments on commit f680f23

Please sign in to comment.