diff --git a/include/caffe/layers/softmax_loss_ohem_layer.hpp b/include/caffe/layers/softmax_loss_ohem_layer.hpp index 5ad9ee9c..305c05c4 100644 --- a/include/caffe/layers/softmax_loss_ohem_layer.hpp +++ b/include/caffe/layers/softmax_loss_ohem_layer.hpp @@ -60,7 +60,7 @@ class SoftmaxWithLossOHEMLayer : public LossLayer { virtual void Reshape(const vector*>& bottom, const vector*>& top); - virtual inline const char* type() const { return "SoftmaxWithLoss"; } + virtual inline const char* type() const { return "SoftmaxWithLossOHEM"; } virtual inline int ExactNumTopBlobs() const { return -1; } virtual inline int MinTopBlobs() const { return 1; } virtual inline int MaxTopBlobs() const { return 3; } diff --git a/src/caffe/layers/batch_norm_layer.cpp b/src/caffe/layers/batch_norm_layer.cpp index bc19c53b..a69d8f99 100644 --- a/src/caffe/layers/batch_norm_layer.cpp +++ b/src/caffe/layers/batch_norm_layer.cpp @@ -48,9 +48,7 @@ void BatchNormLayer::Reshape(const vector*>& bottom, mean_.Reshape(sz); variance_.Reshape(sz); temp_.ReshapeLike(*bottom[0]); - if (use_global_stats_) { - x_norm_.ReshapeLike(*bottom[0]); - } + x_norm_.ReshapeLike(*bottom[0]); sz[0]=bottom[0]->shape(0); batch_sum_multiplier_.Reshape(sz); diff --git a/src/caffe/layers/smooth_L1_loss_ohem_layer.cu b/src/caffe/layers/smooth_L1_loss_ohem_layer.cu index 669a4f8e..515c340b 100644 --- a/src/caffe/layers/smooth_L1_loss_ohem_layer.cu +++ b/src/caffe/layers/smooth_L1_loss_ohem_layer.cu @@ -75,7 +75,7 @@ namespace caffe { // Output per-instance loss if (top.size() >= 2) { - kernel_channel_sum << count()), + kernel_channel_sum << count()), CAFFE_CUDA_NUM_THREADS >> > (outer_num_, bottom[0]->channels(), inner_num_, errors_.gpu_data(), top[1]->mutable_gpu_data()); }