Skip to content

Commit

Permalink
Cleanup batch norm layer, include global stats computation
Browse files Browse the repository at this point in the history
  • Loading branch information
cdoersch committed Oct 21, 2015
1 parent 2f05b03 commit b7cad30
Show file tree
Hide file tree
Showing 6 changed files with 555 additions and 604 deletions.
135 changes: 94 additions & 41 deletions examples/cifar10/cifar10_full_sigmoid_train_test_bn.prototxt
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ layer {
}
data_param {
source: "examples/cifar10/cifar10_train_lmdb"
batch_size: 111
batch_size: 100
backend: LMDB
}
}
Expand Down Expand Up @@ -75,26 +75,43 @@ layer {
type: "BatchNorm"
bottom: "pool1"
top: "bn1"
bn_param {
scale_filler {
type: "constant"
value: 1
}
shift_filler {
type: "constant"
value: 0.001
}
batch_norm_param {
use_global_stats: false
}
param {
lr_mult: 1.00001
decay_mult: 0
lr_mult: 0
}
param {
lr_mult: 1.00001
decay_mult: 0
lr_mult: 0
}
param {
lr_mult: 0
}
include {
phase: TRAIN
}
}
layer {
name: "bn1"
type: "BatchNorm"
bottom: "pool1"
top: "bn1"
batch_norm_param {
use_global_stats: true
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
include {
phase: TEST
}
}

layer {
name: "Sigmoid1"
type: "Sigmoid"
Expand Down Expand Up @@ -129,29 +146,47 @@ layer {
}


layer {
name: "bn2"
type: "BatchNorm"
bottom: "conv2"
top: "bn2"
batch_norm_param {
use_global_stats: false
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
include {
phase: TRAIN
}
}

layer {
name: "bn2"
type: "BatchNorm"
bottom: "conv2"
top: "bn2"
bn_param {
scale_filler {
type: "constant"
value: 1
}
shift_filler {
type: "constant"
value: 0.001
}
batch_norm_param {
use_global_stats: true
}
param {
lr_mult: 1.00001
decay_mult: 0
lr_mult: 0
}
param {
lr_mult: 1.00001
decay_mult: 0
lr_mult: 0
}
param {
lr_mult: 0
}
include {
phase: TEST
}
}
layer {
Expand Down Expand Up @@ -204,23 +239,41 @@ layer {
type: "BatchNorm"
bottom: "conv3"
top: "bn3"
bn_param {
scale_filler {
type: "constant"
value: 1
}
shift_filler {
type: "constant"
value: 0.001
}
batch_norm_param {
use_global_stats: false
}
param {
lr_mult: 1.00001
decay_mult: 0
lr_mult: 0
}
param {
lr_mult: 1.00001
decay_mult: 0
lr_mult: 0
}
param {
lr_mult: 0
}
include {
phase: TRAIN
}
}
layer {
name: "bn3"
type: "BatchNorm"
bottom: "conv3"
top: "bn3"
batch_norm_param {
use_global_stats: true
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
include {
phase: TEST
}
}
layer {
Expand Down
64 changes: 41 additions & 23 deletions include/caffe/common_layers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,21 +79,46 @@ class ArgMaxLayer : public Layer<Dtype> {
};

/**
* @brief Batch Normalization per-channel with scale & shift linear transform.
*
*/
* @brief Normalizes the input to have 0-mean and/or unit (1) variance across
* the batch.
*
* This layer computes Batch Normalization described in [1]. For
* each channel in the data (i.e. axis 1), it subtracts the mean and divides
* by the variance, where both statistics are computed across both spatial
* dimensions and across the different examples in the batch.
*
* By default, during training time, the network is computing global mean/
* variance statistics via a running average, which is then used at test
* time to allow deterministic outputs for each input. You can manually
* toggle whether the network is accumulating or using the statistics via the
* use_global_stats option. IMPORTANT: for this feature to work, you MUST
* set the learning rate to zero for all three parameter blobs, i.e.,
* param {lr_mult: 0} three times in the layer definition.
*
* Note that the original paper also included a per-channel learned bias and
* scaling factor. It is possible (though a bit cumbersome) to implement
* this in caffe using a single-channel DummyDataLayer filled with zeros,
* followed by a Convolution layer with output the same size as the current.
* This produces a channel-specific value that can be added or multiplied by
* the BatchNorm layer's output.
*
* [1] S. Ioffe and C. Szegedy, "Batch Normalization: Accelerating Deep Network
* Training by Reducing Internal Covariate Shift." arXiv preprint
* arXiv:1502.03167 (2015).
*
* TODO(dox): thorough documentation for Forward, Backward, and proto params.
*/
template <typename Dtype>
class BatchNormLayer : public Layer<Dtype> {
public:
explicit BatchNormLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);

virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);

virtual inline const char* type() const { return "BN"; }
virtual inline const char* type() const { return "BatchNorm"; }
virtual inline int ExactNumBottomBlobs() const { return 1; }
virtual inline int ExactNumTopBlobs() const { return 1; }

Expand All @@ -105,26 +130,19 @@ class BatchNormLayer : public Layer<Dtype> {
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);

// spatial mean & variance
Blob<Dtype> spatial_mean_, spatial_variance_;
// batch mean & variance
Blob<Dtype> batch_mean_, batch_variance_;
// buffer blob
Blob<Dtype> buffer_blob_;
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);

Blob<Dtype> x_norm_;
// x_sum_multiplier is used to carry out sum using BLAS
Blob<Dtype> spatial_sum_multiplier_, batch_sum_multiplier_;
Blob<Dtype> mean_, variance_, temp_, x_norm_;
bool use_global_stats_;
Dtype moving_average_fraction_;
int channels_;
Dtype eps_;

// dimension
int N_;
int C_;
int H_;
int W_;
// eps
Dtype var_eps_;
// extra temporarary variables is used to carry out sums/broadcasting
// using BLAS
Blob<Dtype> batch_sum_multiplier_;
Blob<Dtype> num_by_chans_;
Blob<Dtype> spatial_sum_multiplier_;
};

/**
Expand Down
Loading

0 comments on commit b7cad30

Please sign in to comment.