diff --git a/include/caffe/net.hpp b/include/caffe/net.hpp index 1d06dc45..bbb220eb 100644 --- a/include/caffe/net.hpp +++ b/include/caffe/net.hpp @@ -89,7 +89,7 @@ class Net { * @brief For an already initialized net, implicitly copies (i.e., using no * additional memory) the pre-trained layers from another Net. */ - void ShareTrainedLayersWith(Net* other); + void ShareTrainedLayersWith(const Net* other); // For an already initialized net, CopyTrainedLayersFrom() copies the already // trained layers from another net parameter instance. /** @@ -99,51 +99,73 @@ class Net { void CopyTrainedLayersFrom(const NetParameter& param); void CopyTrainedLayersFrom(const string trained_filename); /// @brief Writes the net to a proto. - void ToProto(NetParameter* param, bool write_diff = false); + void ToProto(NetParameter* param, bool write_diff = false) const; /// @brief returns the network name. - inline const string& name() { return name_; } + inline const string& name() const { return name_; } /// @brief returns the layer names - inline const vector& layer_names() { return layer_names_; } + inline const vector& layer_names() const { return layer_names_; } /// @brief returns the blob names - inline const vector& blob_names() { return blob_names_; } + inline const vector& blob_names() const { return blob_names_; } /// @brief returns the blobs - inline const vector > >& blobs() { return blobs_; } + inline const vector > >& blobs() const { + return blobs_; + } /// @brief returns the layers - inline const vector > >& layers() { return layers_; } + inline const vector > >& layers() const { + return layers_; + } /** * @brief returns the bottom vecs for each layer -- usually you won't * need this unless you do per-layer checks such as gradients. */ - inline vector*> >& bottom_vecs() { return bottom_vecs_; } + inline const vector*> >& bottom_vecs() const { + return bottom_vecs_; + } /** * @brief returns the top vecs for each layer -- usually you won't * need this unless you do per-layer checks such as gradients. */ - inline vector*> >& top_vecs() { return top_vecs_; } - inline vector >& bottom_need_backward() { + inline const vector*> >& top_vecs() const { + return top_vecs_; + } + inline const vector >& bottom_need_backward() const { return bottom_need_backward_; } - inline vector& blob_loss_weights() { + inline const vector& blob_loss_weights() const { return blob_loss_weights_; } /// @brief returns the parameters - inline vector > >& params() { return params_; } + inline const vector > >& params() const { + return params_; + } /// @brief returns the parameter learning rate multipliers - inline vector& params_lr() { return params_lr_; } - inline vector& params_weight_decay() { return params_weight_decay_; } - const map& param_names_index() { return param_names_index_; } + inline const vector& params_lr() const { return params_lr_; } + inline const vector& params_weight_decay() const { + return params_weight_decay_; + } + const map& param_names_index() const { + return param_names_index_; + } /// @brief Input and output blob numbers - inline int num_inputs() { return net_input_blobs_.size(); } - inline int num_outputs() { return net_output_blobs_.size(); } - inline vector*>& input_blobs() { return net_input_blobs_; } - inline vector*>& output_blobs() { return net_output_blobs_; } - inline vector& input_blob_indices() { return net_input_blob_indices_; } - inline vector& output_blob_indices() { return net_output_blob_indices_; } - bool has_blob(const string& blob_name); - const shared_ptr > blob_by_name(const string& blob_name); - bool has_layer(const string& layer_name); - const shared_ptr > layer_by_name(const string& layer_name); + inline int num_inputs() const { return net_input_blobs_.size(); } + inline int num_outputs() const { return net_output_blobs_.size(); } + inline const vector*>& input_blobs() const { + return net_input_blobs_; + } + inline const vector*>& output_blobs() const { + return net_output_blobs_; + } + inline const vector& input_blob_indices() const { + return net_input_blob_indices_; + } + inline const vector& output_blob_indices() const { + return net_output_blob_indices_; + } + bool has_blob(const string& blob_name) const; + const shared_ptr > blob_by_name(const string& blob_name) const; + bool has_layer(const string& layer_name) const; + const shared_ptr > layer_by_name(const string& layer_name) const; void set_debug_info(const bool value) { debug_info_ = value; } diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index e4492cfd..47fc8446 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -636,7 +636,7 @@ void Net::UpdateDebugInfo(const int param_id) { } template -void Net::ShareTrainedLayersWith(Net* other) { +void Net::ShareTrainedLayersWith(const Net* other) { int num_source_layers = other->layers().size(); for (int i = 0; i < num_source_layers; ++i) { Layer* source_layer = other->layers()[i].get(); @@ -726,7 +726,7 @@ void Net::CopyTrainedLayersFrom(const string trained_filename) { } template -void Net::ToProto(NetParameter* param, bool write_diff) { +void Net::ToProto(NetParameter* param, bool write_diff) const { param->Clear(); param->set_name(name_); // Add bottom and top @@ -785,16 +785,16 @@ void Net::Update() { } template -bool Net::has_blob(const string& blob_name) { +bool Net::has_blob(const string& blob_name) const { return blob_names_index_.find(blob_name) != blob_names_index_.end(); } template const shared_ptr > Net::blob_by_name( - const string& blob_name) { + const string& blob_name) const { shared_ptr > blob_ptr; if (has_blob(blob_name)) { - blob_ptr = blobs_[blob_names_index_[blob_name]]; + blob_ptr = blobs_[blob_names_index_.find(blob_name)->second]; } else { blob_ptr.reset((Blob*)(NULL)); LOG(WARNING) << "Unknown blob name " << blob_name; @@ -803,16 +803,16 @@ const shared_ptr > Net::blob_by_name( } template -bool Net::has_layer(const string& layer_name) { +bool Net::has_layer(const string& layer_name) const { return layer_names_index_.find(layer_name) != layer_names_index_.end(); } template const shared_ptr > Net::layer_by_name( - const string& layer_name) { + const string& layer_name) const { shared_ptr > layer_ptr; if (has_layer(layer_name)) { - layer_ptr = layers_[layer_names_index_[layer_name]]; + layer_ptr = layers_[layer_names_index_.find(layer_name)->second]; } else { layer_ptr.reset((Layer*)(NULL)); LOG(WARNING) << "Unknown layer name " << layer_name; diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index ab9c00ee..3fa0e2d1 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -418,7 +418,7 @@ Dtype SGDSolver::GetLearningRate() { template void SGDSolver::PreSolve() { // Initialize the history - vector > >& net_params = this->net_->params(); + const vector > >& net_params = this->net_->params(); history_.clear(); update_.clear(); temp_.clear(); @@ -439,9 +439,10 @@ void SGDSolver::PreSolve() { template void SGDSolver::ComputeUpdateValue() { - vector > >& net_params = this->net_->params(); - vector& net_params_lr = this->net_->params_lr(); - vector& net_params_weight_decay = this->net_->params_weight_decay(); + const vector > >& net_params = this->net_->params(); + const vector& net_params_lr = this->net_->params_lr(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); // get the learning rate Dtype rate = GetLearningRate(); if (this->param_.display() && this->iter_ % this->param_.display() == 0) { @@ -552,9 +553,10 @@ void SGDSolver::RestoreSolverState(const SolverState& state) { template void NesterovSolver::ComputeUpdateValue() { - vector > >& net_params = this->net_->params(); - vector& net_params_lr = this->net_->params_lr(); - vector& net_params_weight_decay = this->net_->params_weight_decay(); + const vector > >& net_params = this->net_->params(); + const vector& net_params_lr = this->net_->params_lr(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); // get the learning rate Dtype rate = this->GetLearningRate(); if (this->param_.display() && this->iter_ % this->param_.display() == 0) { @@ -667,9 +669,10 @@ void NesterovSolver::ComputeUpdateValue() { template void AdaGradSolver::ComputeUpdateValue() { - vector > >& net_params = this->net_->params(); - vector& net_params_lr = this->net_->params_lr(); - vector& net_params_weight_decay = this->net_->params_weight_decay(); + const vector > >& net_params = this->net_->params(); + const vector& net_params_lr = this->net_->params_lr(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); // get the learning rate Dtype rate = this->GetLearningRate(); Dtype delta = this->param_.delta(); diff --git a/tools/caffe.cpp b/tools/caffe.cpp index 9f9d975d..ad54bc3b 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -220,8 +220,8 @@ int time() { caffe_net.Backward(); const vector > >& layers = caffe_net.layers(); - vector*> >& bottom_vecs = caffe_net.bottom_vecs(); - vector*> >& top_vecs = caffe_net.top_vecs(); + const vector*> >& bottom_vecs = caffe_net.bottom_vecs(); + const vector*> >& top_vecs = caffe_net.top_vecs(); const vector >& bottom_need_backward = caffe_net.bottom_need_backward(); LOG(INFO) << "*** Benchmark begins ***";