зеркало из https://github.com/microsoft/caffe.git
Fix code format issues
This commit is contained in:
Родитель
3121fe2d6a
Коммит
d5f5bd5d7f
|
@ -322,7 +322,7 @@ class Layer {
|
|||
* @brief set phase
|
||||
* enable train and test with one network, for saving memory
|
||||
*/
|
||||
virtual inline void set_phase(Phase phase){
|
||||
virtual inline void set_phase(Phase phase) {
|
||||
phase_ = phase;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ namespace caffe {
|
|||
*/
|
||||
template <typename Dtype>
|
||||
class BoxAnnotatorOHEMLayer :public Layer<Dtype>{
|
||||
public:
|
||||
public:
|
||||
explicit BoxAnnotatorOHEMLayer(const LayerParameter& param)
|
||||
: Layer<Dtype>(param) {}
|
||||
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
|
||||
|
@ -32,7 +32,7 @@ namespace caffe {
|
|||
virtual inline int ExactNumBottomBlobs() const { return 4; }
|
||||
virtual inline int ExactNumTopBlobs() const { return 2; }
|
||||
|
||||
protected:
|
||||
protected:
|
||||
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top);
|
||||
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
|
||||
|
@ -54,4 +54,4 @@ namespace caffe {
|
|||
|
||||
} // namespace caffe
|
||||
|
||||
#endif //CAFFE_BOX_ANNOTATOR_OHEM_LAYER_HPP_
|
||||
#endif // CAFFE_BOX_ANNOTATOR_OHEM_LAYER_HPP_
|
||||
|
|
|
@ -33,7 +33,7 @@ namespace caffe {
|
|||
|
||||
template <typename Dtype>
|
||||
class PSROIPoolingLayer : public Layer<Dtype> {
|
||||
public:
|
||||
public:
|
||||
explicit PSROIPoolingLayer(const LayerParameter& param)
|
||||
: Layer<Dtype>(param) {}
|
||||
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
|
||||
|
@ -48,7 +48,7 @@ public:
|
|||
virtual inline int MinTopBlobs() const { return 1; }
|
||||
virtual inline int MaxTopBlobs() const { return 1; }
|
||||
|
||||
protected:
|
||||
protected:
|
||||
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top);
|
||||
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
|
||||
|
|
|
@ -18,58 +18,59 @@ namespace caffe {
|
|||
* R-FCN
|
||||
* Written by Yi Li
|
||||
*/
|
||||
template <typename Dtype>
|
||||
class SmoothL1LossOHEMLayer : public LossLayer<Dtype> {
|
||||
public:
|
||||
explicit SmoothL1LossOHEMLayer(const LayerParameter& param)
|
||||
: LossLayer<Dtype>(param), diff_() {}
|
||||
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top);
|
||||
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top);
|
||||
template <typename Dtype>
|
||||
class SmoothL1LossOHEMLayer : public LossLayer<Dtype> {
|
||||
public:
|
||||
explicit SmoothL1LossOHEMLayer(const LayerParameter& param)
|
||||
: LossLayer<Dtype>(param), diff_() {}
|
||||
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top);
|
||||
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top);
|
||||
|
||||
virtual inline const char* type() const { return "SmoothL1LossOHEM"; }
|
||||
virtual inline const char* type() const { return "SmoothL1LossOHEM"; }
|
||||
|
||||
virtual inline int ExactNumBottomBlobs() const { return -1; }
|
||||
virtual inline int MinBottomBlobs() const { return 2; }
|
||||
virtual inline int MaxBottomBlobs() const { return 3; }
|
||||
virtual inline int ExactNumTopBlobs() const { return -1; }
|
||||
virtual inline int MinTopBlobs() const { return 1; }
|
||||
virtual inline int MaxTopBlobs() const { return 2; }
|
||||
virtual inline int ExactNumBottomBlobs() const { return -1; }
|
||||
virtual inline int MinBottomBlobs() const { return 2; }
|
||||
virtual inline int MaxBottomBlobs() const { return 3; }
|
||||
virtual inline int ExactNumTopBlobs() const { return -1; }
|
||||
virtual inline int MinTopBlobs() const { return 1; }
|
||||
virtual inline int MaxTopBlobs() const { return 2; }
|
||||
|
||||
/**
|
||||
* Unlike most loss layers, in the SmoothL1LossOHEMLayer we can backpropagate
|
||||
* to both inputs -- override to return true and always allow force_backward.
|
||||
*/
|
||||
virtual inline bool AllowForceBackward(const int bottom_index) const {
|
||||
return true;
|
||||
}
|
||||
/**
|
||||
* Unlike most loss layers, in the SmoothL1LossOHEMLayer we can backpropagate
|
||||
* to both inputs -- override to return true and always allow force_backward.
|
||||
*/
|
||||
virtual inline bool AllowForceBackward(const int bottom_index) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top);
|
||||
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top);
|
||||
protected:
|
||||
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top);
|
||||
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top);
|
||||
|
||||
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
|
||||
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
|
||||
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
|
||||
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
|
||||
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
|
||||
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
|
||||
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
|
||||
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
|
||||
|
||||
/// Read the normalization mode parameter and compute the normalizer based
|
||||
/// on the blob size.
|
||||
virtual Dtype get_normalizer(
|
||||
LossParameter_NormalizationMode normalization_mode, Dtype pre_fixed_normalizer);
|
||||
/// Read the normalization mode parameter and compute the normalizer based
|
||||
/// on the blob size.
|
||||
virtual Dtype get_normalizer(
|
||||
LossParameter_NormalizationMode normalization_mode,
|
||||
Dtype pre_fixed_normalizer);
|
||||
|
||||
Blob<Dtype> diff_;
|
||||
Blob<Dtype> errors_;
|
||||
bool has_weights_;
|
||||
Blob<Dtype> diff_;
|
||||
Blob<Dtype> errors_;
|
||||
bool has_weights_;
|
||||
|
||||
int outer_num_, inner_num_;
|
||||
int outer_num_, inner_num_;
|
||||
|
||||
/// How to normalize the output loss.
|
||||
LossParameter_NormalizationMode normalization_;
|
||||
};
|
||||
/// How to normalize the output loss.
|
||||
LossParameter_NormalizationMode normalization_;
|
||||
};
|
||||
|
||||
} // namespace caffe
|
||||
|
||||
|
|
|
@ -154,7 +154,7 @@ class Net {
|
|||
inline const vector<vector<Blob<Dtype>*> >& top_vecs() const {
|
||||
return top_vecs_;
|
||||
}
|
||||
|
||||
|
||||
inline const vector<vector<int> >& bottom_id_vecs() const {
|
||||
return bottom_id_vecs_;
|
||||
}
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "mex.h"
|
||||
#include "gpu/mxGPUArray.h"
|
||||
#include "mex.h"
|
||||
|
||||
#include "caffe/caffe.hpp"
|
||||
|
||||
|
@ -67,14 +67,14 @@ static void mx_mat_to_blob(const mxArray* mx_mat, Blob<float>* blob,
|
|||
|
||||
const float* mat_mem_ptr = NULL;
|
||||
mxGPUArray const *mx_mat_gpu;
|
||||
if (mxIsGPUArray(mx_mat)){
|
||||
if (mxIsGPUArray(mx_mat)) {
|
||||
mxInitGPU();
|
||||
mx_mat_gpu = mxGPUCreateFromMxArray(mx_mat);
|
||||
mat_mem_ptr = reinterpret_cast<const float*>(mxGPUGetDataReadOnly(mx_mat_gpu));
|
||||
mat_mem_ptr = reinterpret_cast<const float*>(
|
||||
mxGPUGetDataReadOnly(mx_mat_gpu));
|
||||
mxCHECK(blob->count() == mxGPUGetNumberOfElements(mx_mat_gpu),
|
||||
"number of elements in target blob doesn't match that in input mxArray");
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
mxCHECK(blob->count() == mxGetNumberOfElements(mx_mat),
|
||||
"number of elements in target blob doesn't match that in input mxArray");
|
||||
mat_mem_ptr = reinterpret_cast<const float*>(mxGetData(mx_mat));
|
||||
|
@ -94,7 +94,7 @@ static void mx_mat_to_blob(const mxArray* mx_mat, Blob<float>* blob,
|
|||
}
|
||||
caffe_copy(blob->count(), mat_mem_ptr, blob_mem_ptr);
|
||||
|
||||
if (mxIsGPUArray(mx_mat)){
|
||||
if (mxIsGPUArray(mx_mat)) {
|
||||
mxGPUDestroyGPUArray(mx_mat_gpu);
|
||||
}
|
||||
}
|
||||
|
@ -142,9 +142,10 @@ static mxArray* int_vec_to_mx_vec(const vector<int>& int_vec) {
|
|||
|
||||
|
||||
// Convert vector<vector<int> > to matlab cell of (row vector)s
|
||||
static mxArray* int_vec_vec_to_mx_cell_vec(const vector<vector<int> >& int_vec_vec) {
|
||||
static mxArray* int_vec_vec_to_mx_cell_vec(
|
||||
const vector<vector<int> >& int_vec_vec) {
|
||||
mxArray* mx_cell_vec = mxCreateCellMatrix(int_vec_vec.size(), 1);
|
||||
for (int i = 0; i < int_vec_vec.size(); i++){
|
||||
for (int i = 0; i < int_vec_vec.size(); i++) {
|
||||
mxSetCell(mx_cell_vec, i, int_vec_to_mx_vec(int_vec_vec[i]));
|
||||
}
|
||||
return mx_cell_vec;
|
||||
|
@ -322,11 +323,9 @@ static void net_set_phase(MEX_ARGS) {
|
|||
Phase phase;
|
||||
if (strcmp(phase_name, "train") == 0) {
|
||||
phase = TRAIN;
|
||||
}
|
||||
else if (strcmp(phase_name, "test") == 0) {
|
||||
} else if (strcmp(phase_name, "test") == 0) {
|
||||
phase = TEST;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
mxERROR("Unknown phase");
|
||||
}
|
||||
net->SetPhase(phase);
|
||||
|
@ -339,8 +338,15 @@ static void net_get_attr(MEX_ARGS) {
|
|||
"Usage: caffe_('net_get_attr', hNet)");
|
||||
Net<float>* net = handle_to_ptr<Net<float> >(prhs[0]);
|
||||
const int net_attr_num = 8;
|
||||
const char* net_attrs[net_attr_num] = { "hLayer_layers", "hBlob_blobs",
|
||||
"input_blob_indices", "output_blob_indices", "layer_names", "blob_names", "bottom_id_vecs", "top_id_vecs" };
|
||||
const char* net_attrs[net_attr_num] = {
|
||||
"hLayer_layers",
|
||||
"hBlob_blobs",
|
||||
"input_blob_indices",
|
||||
"output_blob_indices",
|
||||
"layer_names",
|
||||
"blob_names",
|
||||
"bottom_id_vecs",
|
||||
"top_id_vecs" };
|
||||
mxArray* mx_net_attr = mxCreateStructMatrix(1, 1, net_attr_num,
|
||||
net_attrs);
|
||||
mxSetField(mx_net_attr, 0, "hLayer_layers",
|
||||
|
@ -481,7 +487,8 @@ static void blob_get_data(MEX_ARGS) {
|
|||
|
||||
// Usage: caffe_('blob_set_data', hBlob, new_data)
|
||||
static void blob_set_data(MEX_ARGS) {
|
||||
mxCHECK(nrhs == 2 && mxIsStruct(prhs[0]) && (mxIsSingle(prhs[1]) || mxIsGPUArray(prhs[1])),
|
||||
mxCHECK(nrhs == 2 && mxIsStruct(prhs[0]) &&
|
||||
(mxIsSingle(prhs[1]) || mxIsGPUArray(prhs[1])),
|
||||
"Usage: caffe_('blob_set_data', hBlob, new_data)");
|
||||
Blob<float>* blob = handle_to_ptr<Blob<float> >(prhs[0]);
|
||||
mx_mat_to_blob(prhs[1], blob, DATA);
|
||||
|
@ -493,10 +500,9 @@ static void blob_copy_data(MEX_ARGS) {
|
|||
"Usage: caffe_('blob_copy_data', hBlob_to, hBlob_from)");
|
||||
Blob<float>* blob_to = handle_to_ptr<Blob<float> >(prhs[0]);
|
||||
Blob<float>* blob_from = handle_to_ptr<Blob<float> >(prhs[1]);
|
||||
//mxCHECK(blob_from->count() == blob_to->count(),
|
||||
// "number of elements in target blob doesn't match that in source blob");
|
||||
|
||||
blob_to->CopyFrom(*blob_from, false, true);
|
||||
// mxCHECK(blob_from->count() == blob_to->count(),
|
||||
// "number of elements in target blob doesn't match that in source blob");
|
||||
blob_to->CopyFrom(*blob_from, false, true);
|
||||
}
|
||||
|
||||
// Usage: caffe_('blob_get_diff', hBlob)
|
||||
|
@ -561,22 +567,20 @@ static void set_random_seed(MEX_ARGS) {
|
|||
Caffe::set_random_seed(random_seed);
|
||||
}
|
||||
|
||||
static void glog_failure_handler(){
|
||||
static void glog_failure_handler() {
|
||||
static bool is_glog_failure = false;
|
||||
if (!is_glog_failure)
|
||||
{
|
||||
if (!is_glog_failure) {
|
||||
is_glog_failure = true;
|
||||
::google::FlushLogFiles(0);
|
||||
mexErrMsgTxt("glog check error, please check log and clear mex");
|
||||
}
|
||||
}
|
||||
|
||||
static void protobuf_log_handler(::google::protobuf::LogLevel level, const char* filename, int line,
|
||||
const std::string& message)
|
||||
{
|
||||
const int max_err_length = 512;
|
||||
char err_message[max_err_length];
|
||||
snprintf(err_message, max_err_length, "Protobuf : %s . at %s Line %d",
|
||||
static void protobuf_log_handler(::google::protobuf::LogLevel level,
|
||||
const char* filename, int line, const std::string& message) {
|
||||
const int kMaxErrLength = 512;
|
||||
char err_message[kMaxErrLength];
|
||||
snprintf(err_message, kMaxErrLength, "Protobuf : %s . at %s Line %d",
|
||||
message.c_str(), filename, line);
|
||||
LOG(INFO) << err_message;
|
||||
::google::FlushLogFiles(0);
|
||||
|
|
|
@ -101,9 +101,9 @@ void BatchNormLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
|
|||
} else {
|
||||
if (use_global_stats_) {
|
||||
top_diff = top[0]->gpu_diff();
|
||||
}
|
||||
else {
|
||||
caffe_copy(x_norm_.count(), top[0]->gpu_diff(), x_norm_.mutable_gpu_diff());
|
||||
} else {
|
||||
caffe_copy(x_norm_.count(), top[0]->gpu_diff(),
|
||||
x_norm_.mutable_gpu_diff());
|
||||
top_diff = x_norm_.gpu_diff();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
// ------------------------------------------------------------------
|
||||
// R-FCN
|
||||
// Copyright (c) 2016 Microsoft
|
||||
// Licensed under The MIT License [see r-fcn/LICENSE for details]
|
||||
// Written by Yi Li
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
|
@ -9,6 +7,7 @@
|
|||
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "caffe/blob.hpp"
|
||||
#include "caffe/common.hpp"
|
||||
|
@ -24,9 +23,11 @@ using std::ceil;
|
|||
namespace caffe {
|
||||
|
||||
template <typename Dtype>
|
||||
void BoxAnnotatorOHEMLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
|
||||
void BoxAnnotatorOHEMLayer<Dtype>::LayerSetUp(
|
||||
const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top) {
|
||||
BoxAnnotatorOHEMParameter box_anno_param = this->layer_param_.box_annotator_ohem_param();
|
||||
BoxAnnotatorOHEMParameter box_anno_param =
|
||||
this->layer_param_.box_annotator_ohem_param();
|
||||
roi_per_img_ = box_anno_param.roi_per_img();
|
||||
CHECK_GT(roi_per_img_, 0);
|
||||
ignore_label_ = box_anno_param.ignore_label();
|
||||
|
@ -40,7 +41,7 @@ namespace caffe {
|
|||
height_ = bottom[0]->height();
|
||||
width_ = bottom[0]->width();
|
||||
spatial_dim_ = height_*width_;
|
||||
|
||||
|
||||
CHECK_EQ(bottom[1]->num(), num_);
|
||||
CHECK_EQ(bottom[1]->channels(), 1);
|
||||
CHECK_EQ(bottom[1]->height(), height_);
|
||||
|
@ -63,14 +64,15 @@ namespace caffe {
|
|||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void BoxAnnotatorOHEMLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top) {
|
||||
void BoxAnnotatorOHEMLayer<Dtype>::Forward_cpu(
|
||||
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
|
||||
NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void BoxAnnotatorOHEMLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
|
||||
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
|
||||
void BoxAnnotatorOHEMLayer<Dtype>::Backward_cpu(
|
||||
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
|
||||
const vector<Blob<Dtype>*>& bottom) {
|
||||
NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
// ------------------------------------------------------------------
|
||||
// R-FCN
|
||||
// Copyright (c) 2016 Microsoft
|
||||
// Licensed under The MIT License [see r-fcn/LICENSE for details]
|
||||
// Written by Yi Li
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
|
@ -14,9 +12,10 @@
|
|||
using std::max;
|
||||
using std::min;
|
||||
|
||||
namespace caffe {
|
||||
namespace caffe {
|
||||
template <typename Dtype>
|
||||
void BoxAnnotatorOHEMLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
|
||||
void BoxAnnotatorOHEMLayer<Dtype>::Forward_gpu(
|
||||
const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top) {
|
||||
const Dtype* bottom_rois = bottom[0]->cpu_data();
|
||||
const Dtype* bottom_loss = bottom[1]->cpu_data();
|
||||
|
@ -30,9 +29,9 @@ namespace caffe {
|
|||
int num_rois_ = bottom[1]->count();
|
||||
|
||||
int num_imgs = -1;
|
||||
for (int n = 0; n < num_rois_; n++){
|
||||
for (int s = 0; s < spatial_dim_; s++){
|
||||
num_imgs = bottom_rois[0]>num_imgs ? bottom_rois[0] : num_imgs;
|
||||
for (int n = 0; n < num_rois_; n++) {
|
||||
for (int s = 0; s < spatial_dim_; s++) {
|
||||
num_imgs = bottom_rois[0] > num_imgs ? bottom_rois[0] : num_imgs;
|
||||
bottom_rois++;
|
||||
}
|
||||
bottom_rois += (5-1)*spatial_dim_;
|
||||
|
@ -44,33 +43,37 @@ namespace caffe {
|
|||
|
||||
// Find rois with max loss
|
||||
vector<int> sorted_idx(num_rois_);
|
||||
for (int i = 0; i < num_rois_; i++){
|
||||
for (int i = 0; i < num_rois_; i++) {
|
||||
sorted_idx[i] = i;
|
||||
}
|
||||
std::sort(sorted_idx.begin(), sorted_idx.end(),
|
||||
[bottom_loss](int i1, int i2){return bottom_loss[i1] > bottom_loss[i2]; });
|
||||
[bottom_loss](int i1, int i2) {
|
||||
return bottom_loss[i1] > bottom_loss[i2];
|
||||
});
|
||||
|
||||
// Generate output labels for scoring and loss_weights for bbox regression
|
||||
vector<int> number_left(num_imgs, roi_per_img_);
|
||||
for (int i = 0; i < num_rois_; i++){
|
||||
for (int i = 0; i < num_rois_; i++) {
|
||||
int index = sorted_idx[i];
|
||||
int s = index % (width_*height_);
|
||||
int n = index / (width_*height_);
|
||||
int batch_ind = bottom_rois[n*5*spatial_dim_+s];
|
||||
if (number_left[batch_ind]>0){
|
||||
if (number_left[batch_ind] > 0) {
|
||||
number_left[batch_ind]--;
|
||||
top_labels[index] = bottom_labels[index];
|
||||
for (int j = 0; j < bbox_channels_; j++){
|
||||
for (int j = 0; j < bbox_channels_; j++) {
|
||||
int bbox_index = (n*bbox_channels_+j)*spatial_dim_+s;
|
||||
top_bbox_loss_weights[bbox_index]=bottom_bbox_loss_weights[bbox_index];
|
||||
top_bbox_loss_weights[bbox_index] =
|
||||
bottom_bbox_loss_weights[bbox_index];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void BoxAnnotatorOHEMLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
|
||||
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
|
||||
void BoxAnnotatorOHEMLayer<Dtype>::Backward_gpu(
|
||||
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
|
||||
const vector<Blob<Dtype>*>& bottom) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
// ------------------------------------------------------------------
|
||||
// R-FCN
|
||||
// Copyright (c) 2016 Microsoft
|
||||
// Licensed under The MIT License [see r-fcn/LICENSE for details]
|
||||
// Written by Yi Li
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
|
@ -21,8 +19,9 @@ using std::ceil;
|
|||
namespace caffe {
|
||||
template <typename Dtype>
|
||||
void PSROIPoolingLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top){
|
||||
PSROIPoolingParameter psroi_pooling_param = this->layer_param_.psroi_pooling_param();
|
||||
const vector<Blob<Dtype>*>& top) {
|
||||
PSROIPoolingParameter psroi_pooling_param =
|
||||
this->layer_param_.psroi_pooling_param();
|
||||
spatial_scale_ = psroi_pooling_param.spatial_scale();
|
||||
LOG(INFO) << "Spatial scale: " << spatial_scale_;
|
||||
|
||||
|
@ -45,19 +44,21 @@ namespace caffe {
|
|||
<< "input channel number does not match layer parameters";
|
||||
height_ = bottom[0]->height();
|
||||
width_ = bottom[0]->width();
|
||||
top[0]->Reshape(bottom[1]->num(), output_dim_, pooled_height_, pooled_width_);
|
||||
mapping_channel_.Reshape(bottom[1]->num(), output_dim_, pooled_height_, pooled_width_);
|
||||
top[0]->Reshape(
|
||||
bottom[1]->num(), output_dim_, pooled_height_, pooled_width_);
|
||||
mapping_channel_.Reshape(
|
||||
bottom[1]->num(), output_dim_, pooled_height_, pooled_width_);
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void PSROIPoolingLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top){
|
||||
const vector<Blob<Dtype>*>& top) {
|
||||
NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void PSROIPoolingLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
|
||||
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom){
|
||||
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
|
||||
NOT_IMPLEMENTED;
|
||||
}
|
||||
#ifdef CPU_ONLY
|
||||
|
@ -67,4 +68,4 @@ namespace caffe {
|
|||
INSTANTIATE_CLASS(PSROIPoolingLayer);
|
||||
REGISTER_LAYER_CLASS(PSROIPooling);
|
||||
|
||||
}
|
||||
} // namespace caffe
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
// ------------------------------------------------------------------
|
||||
// --------------------------------------------------------
|
||||
// R-FCN
|
||||
// Copyright (c) 2016 Microsoft
|
||||
// Licensed under The MIT License [see r-fcn/LICENSE for details]
|
||||
// Written by Yi Li
|
||||
// ------------------------------------------------------------------
|
||||
// Written by Yi Li, 2016.
|
||||
// --------------------------------------------------------
|
||||
|
||||
#include <algorithm>
|
||||
#include <cfloat>
|
||||
#include <vector>
|
||||
|
||||
#include "caffe/layers/psroi_pooling_layer.hpp"
|
||||
#include "caffe/util/gpu_util.cuh"
|
||||
|
@ -38,16 +38,20 @@ namespace caffe {
|
|||
// [start, end) interval for spatial sampling
|
||||
bottom_rois += n * 5;
|
||||
int roi_batch_ind = bottom_rois[0];
|
||||
Dtype roi_start_w = static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale;
|
||||
Dtype roi_start_h = static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale;
|
||||
Dtype roi_end_w = static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale;
|
||||
Dtype roi_end_h = static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale;
|
||||
Dtype roi_start_w =
|
||||
static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale;
|
||||
Dtype roi_start_h =
|
||||
static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale;
|
||||
Dtype roi_end_w =
|
||||
static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale;
|
||||
Dtype roi_end_h =
|
||||
static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale;
|
||||
|
||||
// Force too small ROIs to be 1x1
|
||||
Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0
|
||||
Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
|
||||
Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
|
||||
|
||||
// Compute w and h at bottom
|
||||
// Compute w and h at bottom
|
||||
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
|
||||
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
|
||||
|
||||
|
@ -62,7 +66,7 @@ namespace caffe {
|
|||
// Add roi offsets and clip to input boundaries
|
||||
hstart = min(max(hstart, 0), height);
|
||||
hend = min(max(hend, 0), height);
|
||||
wstart = min(max(wstart, 0),width);
|
||||
wstart = min(max(wstart, 0), width);
|
||||
wend = min(max(wend, 0), width);
|
||||
bool is_empty = (hend <= hstart) || (wend <= wstart);
|
||||
|
||||
|
@ -72,8 +76,8 @@ namespace caffe {
|
|||
|
||||
bottom_data += (roi_batch_ind * channels + c) * height * width;
|
||||
Dtype out_sum = 0;
|
||||
for (int h = hstart; h < hend; ++h){
|
||||
for (int w = wstart; w < wend; ++w){
|
||||
for (int h = hstart; h < hend; ++h) {
|
||||
for (int w = wstart; w < wend; ++w) {
|
||||
int bottom_index = h*width + w;
|
||||
out_sum += bottom_data[bottom_index];
|
||||
}
|
||||
|
@ -96,9 +100,11 @@ namespace caffe {
|
|||
caffe_gpu_set(count, Dtype(0), top_data);
|
||||
caffe_gpu_set(count, -1, mapping_channel_ptr);
|
||||
// NOLINT_NEXT_LINE(whitespace/operators)
|
||||
PSROIPoolingForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
|
||||
count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_,
|
||||
pooled_width_, bottom_rois, output_dim_, group_size_, top_data, mapping_channel_ptr);
|
||||
PSROIPoolingForward<Dtype> << <CAFFE_GET_BLOCKS(count),
|
||||
CAFFE_CUDA_NUM_THREADS >> >(count, bottom_data, spatial_scale_,
|
||||
channels_, height_, width_, pooled_height_,
|
||||
pooled_width_, bottom_rois, output_dim_, group_size_,
|
||||
top_data, mapping_channel_ptr);
|
||||
CUDA_POST_KERNEL_CHECK;
|
||||
}
|
||||
|
||||
|
@ -112,7 +118,7 @@ namespace caffe {
|
|||
const int channels,
|
||||
const int height, const int width,
|
||||
const int pooled_height, const int pooled_width,
|
||||
const int output_dim,
|
||||
const int output_dim,
|
||||
Dtype* bottom_diff,
|
||||
const Dtype* bottom_rois) {
|
||||
CUDA_KERNEL_LOOP(index, nthreads) {
|
||||
|
@ -124,16 +130,20 @@ namespace caffe {
|
|||
// [start, end) interval for spatial sampling
|
||||
bottom_rois += n * 5;
|
||||
int roi_batch_ind = bottom_rois[0];
|
||||
Dtype roi_start_w = static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale;
|
||||
Dtype roi_start_h = static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale;
|
||||
Dtype roi_end_w = static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale;
|
||||
Dtype roi_end_h = static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale;
|
||||
Dtype roi_start_w =
|
||||
static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale;
|
||||
Dtype roi_start_h =
|
||||
static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale;
|
||||
Dtype roi_end_w =
|
||||
static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale;
|
||||
Dtype roi_end_h =
|
||||
static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale;
|
||||
|
||||
// Force too small ROIs to be 1x1
|
||||
Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0
|
||||
Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
|
||||
Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
|
||||
|
||||
// Compute w and h at bottom
|
||||
// Compute w and h at bottom
|
||||
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
|
||||
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
|
||||
|
||||
|
@ -154,11 +164,12 @@ namespace caffe {
|
|||
|
||||
// Compute c at bottom
|
||||
int c = mapping_channel[index];
|
||||
Dtype* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width;
|
||||
Dtype* offset_bottom_diff = bottom_diff +
|
||||
(roi_batch_ind * channels + c) * height * width;
|
||||
Dtype bin_area = (hend - hstart)*(wend - wstart);
|
||||
Dtype diff_val = is_empty ? 0. : top_diff[index] / bin_area;
|
||||
for (int h = hstart; h < hend; ++h){
|
||||
for (int w = wstart; w < wend; ++w){
|
||||
for (int h = hstart; h < hend; ++h) {
|
||||
for (int w = wstart; w < wend; ++w) {
|
||||
int bottom_index = h*width + w;
|
||||
caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
|
||||
}
|
||||
|
@ -182,10 +193,11 @@ namespace caffe {
|
|||
caffe_gpu_set(bottom_count, Dtype(0), bottom_diff);
|
||||
const int count = top[0]->count();
|
||||
// NOLINT_NEXT_LINE(whitespace/operators)
|
||||
PSROIPoolingBackwardAtomic<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
|
||||
count, top_diff, mapping_channel_ptr, top[0]->num(), spatial_scale_,
|
||||
channels_, height_, width_, pooled_height_, pooled_width_, output_dim_,
|
||||
bottom_diff, bottom_rois);
|
||||
PSROIPoolingBackwardAtomic<Dtype> << <CAFFE_GET_BLOCKS(count),
|
||||
CAFFE_CUDA_NUM_THREADS >> >(count, top_diff, mapping_channel_ptr,
|
||||
top[0]->num(), spatial_scale_, channels_, height_, width_,
|
||||
pooled_height_, pooled_width_, output_dim_, bottom_diff,
|
||||
bottom_rois);
|
||||
CUDA_POST_KERNEL_CHECK;
|
||||
}
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
// Written by Yi Li, 2016.
|
||||
// --------------------------------------------------------
|
||||
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
@ -22,8 +23,7 @@ void SmoothL1LossOHEMLayer<Dtype>::LayerSetUp(
|
|||
normalization_ = this->layer_param_.loss_param().normalize() ?
|
||||
LossParameter_NormalizationMode_VALID :
|
||||
LossParameter_NormalizationMode_BATCH_SIZE;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
normalization_ = this->layer_param_.loss_param().normalization();
|
||||
}
|
||||
}
|
||||
|
@ -50,14 +50,16 @@ void SmoothL1LossOHEMLayer<Dtype>::Reshape(
|
|||
bottom[0]->height(), bottom[0]->width());
|
||||
|
||||
// top[2] stores per-instance loss, which takes the shape of N*1*H*W
|
||||
if (top.size()>=2) {
|
||||
top[1]->Reshape(bottom[0]->num(), 1, bottom[0]->height(), bottom[0]->width());
|
||||
if (top.size() >= 2) {
|
||||
top[1]->Reshape(
|
||||
bottom[0]->num(), 1, bottom[0]->height(), bottom[0]->width());
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
Dtype SmoothL1LossOHEMLayer<Dtype>::get_normalizer(
|
||||
LossParameter_NormalizationMode normalization_mode, Dtype pre_fixed_normalizer) {
|
||||
LossParameter_NormalizationMode normalization_mode,
|
||||
Dtype pre_fixed_normalizer) {
|
||||
Dtype normalizer;
|
||||
switch (normalization_mode) {
|
||||
case LossParameter_NormalizationMode_FULL:
|
||||
|
@ -85,14 +87,15 @@ Dtype SmoothL1LossOHEMLayer<Dtype>::get_normalizer(
|
|||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void SmoothL1LossOHEMLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top) {
|
||||
void SmoothL1LossOHEMLayer<Dtype>::Forward_cpu(
|
||||
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
|
||||
NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void SmoothL1LossOHEMLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
|
||||
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
|
||||
void SmoothL1LossOHEMLayer<Dtype>::Backward_cpu(
|
||||
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
|
||||
const vector<Blob<Dtype>*>& bottom) {
|
||||
NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
|
|
|
@ -13,21 +13,20 @@
|
|||
#include "caffe/util/math_functions.hpp"
|
||||
|
||||
namespace caffe {
|
||||
template <typename Dtype>
|
||||
__global__ void SmoothL1ForwardGPU(const int n, const Dtype* in, Dtype* out) {
|
||||
// f(x) = 0.5 * x^2 if |x| < 1
|
||||
// |x| - 0.5 otherwise
|
||||
CUDA_KERNEL_LOOP(index, n) {
|
||||
Dtype val = in[index];
|
||||
Dtype abs_val = abs(val);
|
||||
if (abs_val < 1) {
|
||||
out[index] = 0.5 * val * val;
|
||||
}
|
||||
else {
|
||||
out[index] = abs_val - 0.5;
|
||||
}
|
||||
}
|
||||
}
|
||||
template <typename Dtype>
|
||||
__global__ void SmoothL1ForwardGPU(const int n, const Dtype* in, Dtype* out) {
|
||||
// f(x) = 0.5 * x^2 if |x| < 1
|
||||
// |x| - 0.5 otherwise
|
||||
CUDA_KERNEL_LOOP(index, n) {
|
||||
Dtype val = in[index];
|
||||
Dtype abs_val = abs(val);
|
||||
if (abs_val < 1) {
|
||||
out[index] = 0.5 * val * val;
|
||||
} else {
|
||||
out[index] = abs_val - 0.5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
__global__ void kernel_channel_sum(const int num, const int channels,
|
||||
|
@ -43,85 +42,90 @@ namespace caffe {
|
|||
}
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void SmoothL1LossOHEMLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top) {
|
||||
int count = bottom[0]->count();
|
||||
caffe_gpu_sub(
|
||||
count,
|
||||
bottom[0]->gpu_data(),
|
||||
bottom[1]->gpu_data(),
|
||||
diff_.mutable_gpu_data()); // d := b0 - b1
|
||||
if (has_weights_) {
|
||||
caffe_gpu_mul(
|
||||
count,
|
||||
bottom[2]->gpu_data(),
|
||||
diff_.gpu_data(),
|
||||
diff_.mutable_gpu_data()); // d := w * (b0 - b1)
|
||||
}
|
||||
SmoothL1ForwardGPU<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
|
||||
count, diff_.gpu_data(), errors_.mutable_gpu_data());
|
||||
CUDA_POST_KERNEL_CHECK;
|
||||
|
||||
|
||||
Dtype loss;
|
||||
caffe_gpu_asum(count, errors_.gpu_data(), &loss);
|
||||
int spatial_dim = diff_.height() * diff_.width();
|
||||
|
||||
Dtype pre_fixed_normalizer = this->layer_param_.loss_param().pre_fixed_normalizer();
|
||||
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
|
||||
pre_fixed_normalizer);
|
||||
|
||||
// Output per-instance loss
|
||||
if (top.size() >= 2) {
|
||||
kernel_channel_sum<Dtype> << <CAFFE_GET_BLOCKS(top[0]->count()), CAFFE_CUDA_NUM_THREADS >> >
|
||||
(outer_num_, bottom[0]->channels(), inner_num_, errors_.gpu_data(),
|
||||
top[1]->mutable_gpu_data());
|
||||
template <typename Dtype>
|
||||
void SmoothL1LossOHEMLayer<Dtype>::Forward_gpu(
|
||||
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
|
||||
int count = bottom[0]->count();
|
||||
caffe_gpu_sub(
|
||||
count,
|
||||
bottom[0]->gpu_data(),
|
||||
bottom[1]->gpu_data(),
|
||||
diff_.mutable_gpu_data()); // d := b0 - b1
|
||||
if (has_weights_) {
|
||||
caffe_gpu_mul(
|
||||
count,
|
||||
bottom[2]->gpu_data(),
|
||||
diff_.gpu_data(),
|
||||
diff_.mutable_gpu_data()); // d := w * (b0 - b1)
|
||||
}
|
||||
}
|
||||
SmoothL1ForwardGPU<Dtype> << <CAFFE_GET_BLOCKS(count),
|
||||
CAFFE_CUDA_NUM_THREADS >> >(count, diff_.gpu_data(),
|
||||
errors_.mutable_gpu_data());
|
||||
CUDA_POST_KERNEL_CHECK;
|
||||
|
||||
template <typename Dtype>
|
||||
__global__ void SmoothL1BackwardGPU(const int n, const Dtype* in, Dtype* out) {
|
||||
// f'(x) = x if |x| < 1
|
||||
// = sign(x) otherwise
|
||||
CUDA_KERNEL_LOOP(index, n) {
|
||||
Dtype val = in[index];
|
||||
Dtype abs_val = abs(val);
|
||||
if (abs_val < 1) {
|
||||
out[index] = val;
|
||||
}
|
||||
else {
|
||||
out[index] = (Dtype(0) < val) - (val < Dtype(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void SmoothL1LossOHEMLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
|
||||
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
|
||||
int count = diff_.count();
|
||||
SmoothL1BackwardGPU<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
|
||||
count, diff_.gpu_data(), diff_.mutable_gpu_data());
|
||||
CUDA_POST_KERNEL_CHECK;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
if (propagate_down[i]) {
|
||||
const Dtype sign = (i == 0) ? 1 : -1;
|
||||
int spatial_dim = diff_.height() * diff_.width();
|
||||
Dtype loss;
|
||||
caffe_gpu_asum(count, errors_.gpu_data(), &loss);
|
||||
int spatial_dim = diff_.height() * diff_.width();
|
||||
|
||||
Dtype pre_fixed_normalizer = this->layer_param_.loss_param().pre_fixed_normalizer();
|
||||
Dtype normalizer = get_normalizer(normalization_, pre_fixed_normalizer);
|
||||
Dtype alpha = sign * top[0]->cpu_diff()[0] / normalizer;
|
||||
Dtype pre_fixed_normalizer =
|
||||
this->layer_param_.loss_param().pre_fixed_normalizer();
|
||||
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
|
||||
pre_fixed_normalizer);
|
||||
|
||||
caffe_gpu_axpby(
|
||||
bottom[i]->count(), // count
|
||||
alpha, // alpha
|
||||
diff_.gpu_data(), // x
|
||||
Dtype(0), // beta
|
||||
bottom[i]->mutable_gpu_diff()); // y
|
||||
}
|
||||
}
|
||||
}
|
||||
// Output per-instance loss
|
||||
if (top.size() >= 2) {
|
||||
kernel_channel_sum<Dtype> << <CAFFE_GET_BLOCKS(top[0]->count()),
|
||||
CAFFE_CUDA_NUM_THREADS >> > (outer_num_, bottom[0]->channels(),
|
||||
inner_num_, errors_.gpu_data(), top[1]->mutable_gpu_data());
|
||||
}
|
||||
}
|
||||
|
||||
INSTANTIATE_LAYER_GPU_FUNCS(SmoothL1LossOHEMLayer);
|
||||
template <typename Dtype>
|
||||
__global__ void SmoothL1BackwardGPU(
|
||||
const int n, const Dtype* in, Dtype* out) {
|
||||
// f'(x) = x if |x| < 1
|
||||
// = sign(x) otherwise
|
||||
CUDA_KERNEL_LOOP(index, n) {
|
||||
Dtype val = in[index];
|
||||
Dtype abs_val = abs(val);
|
||||
if (abs_val < 1) {
|
||||
out[index] = val;
|
||||
} else {
|
||||
out[index] = (Dtype(0) < val) - (val < Dtype(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void SmoothL1LossOHEMLayer<Dtype>::Backward_gpu(
|
||||
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
|
||||
const vector<Blob<Dtype>*>& bottom) {
|
||||
int count = diff_.count();
|
||||
SmoothL1BackwardGPU<Dtype> << <CAFFE_GET_BLOCKS(count),
|
||||
CAFFE_CUDA_NUM_THREADS >> >(count, diff_.gpu_data(),
|
||||
diff_.mutable_gpu_data());
|
||||
CUDA_POST_KERNEL_CHECK;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
if (propagate_down[i]) {
|
||||
const Dtype sign = (i == 0) ? 1 : -1;
|
||||
int spatial_dim = diff_.height() * diff_.width();
|
||||
|
||||
Dtype pre_fixed_normalizer =
|
||||
this->layer_param_.loss_param().pre_fixed_normalizer();
|
||||
Dtype normalizer = get_normalizer(normalization_, pre_fixed_normalizer);
|
||||
Dtype alpha = sign * top[0]->cpu_diff()[0] / normalizer;
|
||||
|
||||
caffe_gpu_axpby(
|
||||
bottom[i]->count(), // count
|
||||
alpha, // alpha
|
||||
diff_.gpu_data(), // x
|
||||
Dtype(0), // beta
|
||||
bottom[i]->mutable_gpu_diff()); // y
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
INSTANTIATE_LAYER_GPU_FUNCS(SmoothL1LossOHEMLayer);
|
||||
|
||||
} // namespace caffe
|
||||
|
|
|
@ -11,9 +11,9 @@ template <typename Dtype>
|
|||
void SoftmaxWithLossOHEMLayer<Dtype>::LayerSetUp(
|
||||
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
|
||||
LossLayer<Dtype>::LayerSetUp(bottom, top);
|
||||
LayerParameter softmax_param(this->layer_param_);
|
||||
// Fix a bug which occurs with more than one output
|
||||
softmax_param.clear_loss_weight();
|
||||
LayerParameter softmax_param(this->layer_param_);
|
||||
// Fix a bug which occurs with more than one output
|
||||
softmax_param.clear_loss_weight();
|
||||
softmax_param.set_type("Softmax");
|
||||
softmax_layer_ = LayerRegistry<Dtype>::CreateLayer(softmax_param);
|
||||
softmax_bottom_vec_.clear();
|
||||
|
@ -56,10 +56,10 @@ void SoftmaxWithLossOHEMLayer<Dtype>::Reshape(
|
|||
top[1]->ReshapeLike(*bottom[0]);
|
||||
}
|
||||
|
||||
// top[2] stores per-instance loss, which takes the shape of N*1*H*W
|
||||
// top[2] stores per-instance loss, which takes the shape of N*1*H*W
|
||||
if (top.size() >= 3) {
|
||||
top[2]->ReshapeLike(*bottom[1]);
|
||||
}
|
||||
top[2]->ReshapeLike(*bottom[1]);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
|
@ -95,13 +95,14 @@ Dtype SoftmaxWithLossOHEMLayer<Dtype>::get_normalizer(
|
|||
template <typename Dtype>
|
||||
void SoftmaxWithLossOHEMLayer<Dtype>::Forward_cpu(
|
||||
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
|
||||
NOT_IMPLEMENTED;
|
||||
NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void SoftmaxWithLossOHEMLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
|
||||
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
|
||||
NOT_IMPLEMENTED;
|
||||
void SoftmaxWithLossOHEMLayer<Dtype>::Backward_cpu(
|
||||
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
|
||||
const vector<Blob<Dtype>*>& bottom) {
|
||||
NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
#ifdef CPU_ONLY
|
||||
|
|
|
@ -62,12 +62,13 @@ void SoftmaxWithLossOHEMLayer<Dtype>::Forward_gpu(
|
|||
top[1]->ShareData(prob_);
|
||||
}
|
||||
if (top.size() >= 3) {
|
||||
// Output per-instance loss
|
||||
caffe_gpu_memcpy(top[2]->count() * sizeof(Dtype), loss_data, top[2]->mutable_gpu_data());
|
||||
// Output per-instance loss
|
||||
caffe_gpu_memcpy(top[2]->count() * sizeof(Dtype), loss_data,
|
||||
top[2]->mutable_gpu_data());
|
||||
}
|
||||
|
||||
// Fix a bug, which happens when propagate_down[0] = false in backward
|
||||
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff());
|
||||
// Fix a bug, which happens when propagate_down[0] = false in backward
|
||||
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff());
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
|
@ -95,9 +96,9 @@ __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
|
|||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void SoftmaxWithLossOHEMLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
|
||||
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
|
||||
|
||||
void SoftmaxWithLossOHEMLayer<Dtype>::Backward_gpu(
|
||||
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
|
||||
const vector<Blob<Dtype>*>& bottom) {
|
||||
if (propagate_down[1]) {
|
||||
LOG(FATAL) << this->type()
|
||||
<< " Layer cannot backpropagate to label inputs.";
|
||||
|
|
|
@ -284,9 +284,9 @@ void Net<Dtype>::Init(const NetParameter& in_param) {
|
|||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void Net<Dtype>::SetPhase(Phase phase){
|
||||
// set all layers
|
||||
for (int i = 0; i < layers_.size(); ++i){
|
||||
void Net<Dtype>::SetPhase(Phase phase) {
|
||||
// set all layers
|
||||
for (int i = 0; i < layers_.size(); ++i) {
|
||||
layers_[i]->set_phase(phase);
|
||||
}
|
||||
// set net phase
|
||||
|
|
Загрузка…
Ссылка в новой задаче