caffe源码 之 layer类

时间:2022-01-04 04:12:01

本文主要解析caffe的中层类定义模块文件/src/caffe/layer.hpp layer.cpp,layer是所有层的基类。

综述::layer.hpp定义了layer的基类,其他例如:loss_layer,data_layer,vision_layer都是在这个layer类的基础上继承的,他们分别实现了基类layer中的一些弱函数,下面通过注释记录下我对基类源码的理解:::::

layer.hpp::::::::::::::

#ifndef CAFFE_LAYER_H_
#define CAFFE_LAYER_H_

#include <algorithm>
#include <string>
#include <vector>

#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/layer_factory.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/util/math_functions.hpp"

/**
Forward declare boost::thread instead of including boost/thread.hpp
to avoid a boost/NVCC issues (#1009, #1010) on OSX.
*/

namespace boost { class mutex; }

namespace caffe {

/**
* @brief An interface for the units of computation which can be composed into a
* Net.
*
* Layer%s must implement a Forward function, in which they take their input
* (bottom) Blob%s (if any) and compute their output Blob%s (if any).
* They may also implement a Backward function, in which they compute the error
* gradients with respect to their input Blob%s, given the error gradients with
* their output Blob%s.
*/

template <typename Dtype>
class Layer {
public:
/**
* You should not implement your own constructor. Any set up code should go
* to SetUp(), where the dimensions of the bottom blobs are provided to the
* layer.
*/

explicit Layer(const LayerParameter& param) /*从solver.prototxt文件中传入的网络参数*/
: layer_param_(param), is_shared_(false) {
// Set phase and copy blobs (if there are any).
phase_ = param.phase(); /*将当前网络是用来测试test还是train的属性值赋值*/
if (layer_param_.blobs_size() > 0) {
blobs_.resize(layer_param_.blobs_size());
for (int i = 0; i < layer_param_.blobs_size(); ++i) {
blobs_[i].reset(new Blob<Dtype>());
blobs_[i]->FromProto(layer_param_.blobs(i)); /*blobs_ 是一个容器用来存放指向Blob类的智能指针, 这里将protobuf中所有的blobs拷贝到参数blobs_ 所指向缓存*/
}
}
}
virtual ~Layer() {} /*类的析构函数*/

/**
* @brief Implements common layer setup functionality.
*
* @param bottom the preshaped input blobs
* @param top
* the allocated but unshaped output blobs, to be shaped by Reshape
*
* Checks that the number of bottom and top blobs is correct.
* Calls LayerSetUp to do special layer setup for individual layer types,
* followed by Reshape to set up sizes of top blobs and internal buffers.
* Sets up the loss weight multiplier blobs for any non-zero loss weights.
* This method may not be overridden.
*/

void SetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
InitMutex();
CheckBlobCounts(bottom, top); /*检查传入的top与bottom数据的blob是否正确*/
//每层进行具体配置
LayerSetUp(bottom, top);
//为了适应输入(bottom)数据的blob的shape的需要,来修改输出(top)数据的blob的shape
Reshape(bottom, top);
//初始化损失函数中任何与输入(top)blob数据相关的权重
SetLossWeights(top);
}

/**
* @brief Does layer-specific setup: your layer should implement this function
* as well as Reshape.
*
* @param bottom
* the preshaped input blobs, whose data fields store the input data for
* this layer
* @param top
* the allocated but unshaped output blobs
*
* This method should do one-time layer specific setup. This includes reading
* and processing relevent parameters from the <code>layer_param_</code>.
* Setting up the shapes of top blobs and internal buffers should be done in
* <code>Reshape</code>, which will be called before the forward pass to
* adjust the top blob sizes.
*/

/*具体的每层的设置函数,虚函数每层要具体的实现这个函数*/
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {}

/**
* @brief Whether a layer should be shared by multiple nets during data
* parallelism. By default, all layers except for data layers should
* not be shared. data layers should be shared to ensure each worker
* solver access data sequentially during data parallelism.
*/

/*反回并行状态,默认除了data layer层其他层都是关闭的*/
virtual inline bool ShareInParallel() const { return false; }

/** @brief Return whether this layer is actually shared by other nets.
* If ShareInParallel() is true and using more than one GPU and the
* net has TRAIN phase, then this function is expected return true.
*/

/*在ShareInParallel返回的值表示该层被共享的前提下,
用这个函数来确定返回参数is_shared_来确定当前层是否被多个网络共享*/

inline bool IsShared() const { return is_shared_; }

/** @brief Set whether this layer is actually shared by other nets
* If ShareInParallel() is true and using more than one GPU and the
* net has TRAIN phase, then is_shared should be set true.
*/

/*在ShareInParallel返回的值表示该层被共享的前提下,
用这个函数来设置参数is_shared_与上面的函数IsShared()相对应*/

inline void SetShared(bool is_shared) {
CHECK(ShareInParallel() || !is_shared)
<< type() << "Layer does not support sharing.";
is_shared_ = is_shared;
}

/**
* @brief Adjust the shapes of top blobs and internal buffers to accommodate
* the shapes of the bottom blobs.
*
* @param bottom the input blobs, with the requested input shapes
* @param top the top blobs, which should be reshaped as needed
*
* This method should reshape top blobs as needed according to the shapes
* of the bottom (input) blobs, as well as reshaping any internal buffers
* and making any other necessary adjustments so that the layer can
* accommodate the bottom blobs.
*/

//为了适应输入(bottom)数据的blob的shape的需要,来修改输出(top)数据的blob的shape
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) = 0;

/**
* @brief Given the bottom blobs, compute the top blobs and the loss.
*
* @param bottom
* the input blobs, whose data fields store the input data for this layer
* @param top
* the preshaped output blobs, whose data fields will store this layers'
* outputs
* \return The total loss from the layer.
*
* The Forward wrapper calls the relevant device wrapper function
* (Forward_cpu or Forward_gpu) to compute the top blob values given the
* bottom blobs. If the layer has any non-zero loss_weights, the wrapper
* then computes and returns the loss.
*
* Your layer should implement Forward_cpu and (optionally) Forward_gpu.
*/

/*实现前向传播函数,根据输入数据的blob来计算输出数据的blob,并且计算损失量*/
inline Dtype Forward(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);

/**
* @brief Given the top blob error gradients, compute the bottom blob error
* gradients.
*
* @param top
* the output blobs, whose diff fields store the gradient of the error
* with respect to themselves
* @param propagate_down
* a vector with equal length to bottom, with each index indicating
* whether to propagate the error gradients down to the bottom blob at
* the corresponding index
* @param bottom
* the input blobs, whose diff fields will store the gradient of the error
* with respect to themselves after Backward is run
*
* The Backward wrapper calls the relevant device wrapper function
* (Backward_cpu or Backward_gpu) to compute the bottom blob diffs given the
* top blob diffs.
*
* Your layer should implement Backward_cpu and (optionally) Backward_gpu.
*/

/*实现后向传播函数, 根据输出数据的误差梯度计算输出数据的误差梯度*/
inline void Backward(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom);

/**
* @brief Returns the vector of learnable parameter blobs.
*/

vector<shared_ptr<Blob<Dtype> > >& blobs() {
return blobs_; /*返回学习的参数,以blobs的容器保存的*/
}

/**
* @brief Returns the layer parameter.
*/

/*返回传入的该层的配置参数*/
const LayerParameter& layer_param() const { return layer_param_; }

/**
* @brief Writes the layer parameter to a protocol buffer
*/

/*将层参数写成Protobuffer文件*/
virtual void ToProto(LayerParameter* param, bool write_diff = false);

/**
* @brief Returns the scalar loss associated with a top blob at a given index.
*/

/*根据索引返回输出数据相关的损失量*/
inline Dtype loss(const int top_index) const {
return (loss_.size() > top_index) ? loss_[top_index] : Dtype(0);
}

/**
* @brief Sets the loss associated with a top blob at a given index.
*/

/*根据索引设置相关的损失量*/
inline void set_loss(const int top_index, const Dtype value) {
if (loss_.size() <= top_index) {
loss_.resize(top_index + 1, Dtype(0));
}
loss_[top_index] = value;
}

/**
* @brief Returns the layer type.
*/

/*返回layer的类型,具体的层具体实现*/
virtual inline const char* type() const { return ""; }

/**
* @brief Returns the exact number of bottom blobs required by the layer,
* or -1 if no exact number is required.
*
* This method should be overridden to return a non-negative value if your
* layer expects some exact number of bottom blobs.
*/

/*返回具体的输入的blobs的个数*/
virtual inline int ExactNumBottomBlobs() const { return -1; }
/**
* @brief Returns the minimum number of bottom blobs required by the layer,
* or -1 if no minimum number is required.
*
* This method should be overridden to return a non-negative value if your
* layer expects some minimum number of bottom blobs.
*/

/*返回当前层所需的最少输入的blobs数量*/
virtual inline int MinBottomBlobs() const { return -1; }
/**
* @brief Returns the maximum number of bottom blobs required by the layer,
* or -1 if no maximum number is required.
*
* This method should be overridden to return a non-negative value if your
* layer expects some maximum number of bottom blobs.
*/

/*返回当前层所需的最多的输入的blobs数量*/
virtual inline int MaxBottomBlobs() const { return -1; }
/**
* @brief Returns the exact number of top blobs required by the layer,
* or -1 if no exact number is required.
*
* This method should be overridden to return a non-negative value if your
* layer expects some exact number of top blobs.
*/

/*返回具体的输出的blobs的个数*/
virtual inline int ExactNumTopBlobs() const { return -1; }
/**
* @brief Returns the minimum number of top blobs required by the layer,
* or -1 if no minimum number is required.
*
* This method should be overridden to return a non-negative value if your
* layer expects some minimum number of top blobs.
*/

/*返回当前层所需的最少输出的blobs数量*/
virtual inline int MinTopBlobs() const { return -1; }
/**
* @brief Returns the maximum number of top blobs required by the layer,
* or -1 if no maximum number is required.
*
* This method should be overridden to return a non-negative value if your
* layer expects some maximum number of top blobs.
*/

/*返回当前层所需的最多输出的blobs数量*/
virtual inline int MaxTopBlobs() const { return -1; }
/**
* @brief Returns true if the layer requires an equal number of bottom and
* top blobs.
*
* This method should be overridden to return true if your layer expects an
* equal number of bottom and top blobs.
*/

/*判断输入与输出的blobs数量是否一致*/
virtual inline bool EqualNumBottomTopBlobs() const { return false; }

/**
* @brief Return whether "anonymous" top blobs are created automatically
* by the layer.
*
* If this method returns true, Net::Init will create enough "anonymous" top
* blobs to fulfill the requirement specified by ExactNumTopBlobs() or
* MinTopBlobs().
*/

/*如果返回true, Net::Init就会自动创建足够的top blobs来满足
ExactNumTopBlobs()和MinTopBlobs()的需要*/

virtual inline bool AutoTopBlobs() const { return false; }

/**
* @brief Return whether to allow force_backward for a given bottom blob
* index.
*
* If AllowForceBackward(i) == false, we will ignore the force_backward
* setting and backpropagate to blob i only if it needs gradient information
* (as is done when force_backward == false).
*/

/*设置是否强制梯度返回,因为有些层其实不需要梯度信息 */
virtual inline bool AllowForceBackward(const int bottom_index) const {
return true;
}

/**
* @brief Specifies whether the layer should compute gradients w.r.t. a
* parameter at a particular index given by param_id.
*
* You can safely ignore false values and always compute gradients
* for all parameters, but possibly with wasteful computation.
*/

/*确定当前层是否应该计算梯度*/
inline bool param_propagate_down(const int param_id) {
return (param_propagate_down_.size() > param_id) ?
param_propagate_down_[param_id] : false;
}
/**
* @brief Sets whether the layer should compute gradients w.r.t. a
* parameter at a particular index given by param_id.
*/

/*设置当前层是否需要计算梯度*/
inline void set_param_propagate_down(const int param_id, const bool value) {
if (param_propagate_down_.size() <= param_id) {
param_propagate_down_.resize(param_id + 1, true);
}
param_propagate_down_[param_id] = value;
}


protected:
/** The protobuf that stores the layer parameters */
LayerParameter layer_param_; //配置的层参数
/** The phase: TRAIN or TEST */
Phase phase_; //当前处在test还是train阶段
/** The vector that stores the learnable parameters as a set of blobs. */
vector<shared_ptr<Blob<Dtype> > > blobs_; //blobs_是一个vector容器,其元素是指向Blob的shared_ptr指针,将可学习的参数存在一组Blob类内
/** Vector indicating whether to compute the diff of each param blob. */
vector<bool> param_propagate_down_; //标识是否为每个参数的blob计算梯度

/** The vector that indicates whether each top blob has a non-zero weight in
* the objective function. */

vector<Dtype> loss_; /*标识哪个top blob 有非零的权重*/

/** @brief Using the CPU device, compute the layer output. */
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) = 0; /*用cpu实现的前向传播函数*/
/**
* @brief Using the GPU device, compute the layer output.
* Fall back to Forward_cpu() if unavailable.
*/

virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// LOG(WARNING) << "Using CPU code as backup.";
return Forward_cpu(bottom, top); /*gpu实现的前向传播函数*/
}

/**
* @brief Using the CPU device, compute the gradients for any parameters and
* for the bottom blobs if propagate_down is true.
*/

virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) = 0; /*cpu实现后向传播*/
/**
* @brief Using the GPU device, compute the gradients for any parameters and
* for the bottom blobs if propagate_down is true.
* Fall back to Backward_cpu() if unavailable.
*/

virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
// LOG(WARNING) << "Using CPU code as backup.";
Backward_cpu(top, propagate_down, bottom); /*gpu实现后向传播*/
}

/**
* Called by the parent Layer's SetUp to check that the number of bottom
* and top Blobs provided as input match the expected numbers specified by
* the {ExactNum,Min,Max}{Bottom,Top}Blobs() functions.
*/

/*检查输入与输出的blobs个数是否正确*/
virtual void CheckBlobCounts(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (ExactNumBottomBlobs() >= 0) {
CHECK_EQ(ExactNumBottomBlobs(), bottom.size())
<< type() << " Layer takes " << ExactNumBottomBlobs()
<< " bottom blob(s) as input.";
}
if (MinBottomBlobs() >= 0) {
CHECK_LE(MinBottomBlobs(), bottom.size())
<< type() << " Layer takes at least " << MinBottomBlobs()
<< " bottom blob(s) as input.";
}
if (MaxBottomBlobs() >= 0) {
CHECK_GE(MaxBottomBlobs(), bottom.size())
<< type() << " Layer takes at most " << MaxBottomBlobs()
<< " bottom blob(s) as input.";
}
if (ExactNumTopBlobs() >= 0) {
CHECK_EQ(ExactNumTopBlobs(), top.size())
<< type() << " Layer produces " << ExactNumTopBlobs()
<< " top blob(s) as output.";
}
if (MinTopBlobs() >= 0) {
CHECK_LE(MinTopBlobs(), top.size())
<< type() << " Layer produces at least " << MinTopBlobs()
<< " top blob(s) as output.";
}
if (MaxTopBlobs() >= 0) {
CHECK_GE(MaxTopBlobs(), top.size())
<< type() << " Layer produces at most " << MaxTopBlobs()
<< " top blob(s) as output.";
}
if (EqualNumBottomTopBlobs()) {
CHECK_EQ(bottom.size(), top.size())
<< type() << " Layer produces one top blob as output for each "
<< "bottom blob input.";
}
}

/**
* Called by SetUp to initialize the weights associated with any top blobs in
* the loss function. Store non-zero loss weights in the diff blob.
*/

/*初始化输出数据的权重*/
inline void SetLossWeights(const vector<Blob<Dtype>*>& top) {
const int num_loss_weights = layer_param_.loss_weight_size();
if (num_loss_weights) {
CHECK_EQ(top.size(), num_loss_weights) << "loss_weight must be "
"unspecified or specified once per top blob.";
for (int top_id = 0; top_id < top.size(); ++top_id) {
const Dtype loss_weight = layer_param_.loss_weight(top_id);
if (loss_weight == Dtype(0)) { continue; }
this->set_loss(top_id, loss_weight);
const int count = top[top_id]->count();
Dtype* loss_multiplier = top[top_id]->mutable_cpu_diff();
caffe_set(count, loss_weight, loss_multiplier);
}
}
}

private:
/** Whether this layer is actually shared by other nets*/
bool is_shared_; /*标识该层是否被其他网络共享*/

/** The mutex for sequential forward if this layer is shared */
shared_ptr<boost::mutex> forward_mutex_;

/*下面是多线程的一些操作*/
/** Initialize forward_mutex_ */
void InitMutex();
/** Lock forward_mutex_ if this layer is shared */
void Lock();
/** Unlock forward_mutex_ if this layer is shared */
void Unlock();

DISABLE_COPY_AND_ASSIGN(Layer);
}; // class Layer

// Forward and backward wrappers. You should implement the cpu and
// gpu specific implementations instead, and should not change these
// functions.
template <typename Dtype>
inline Dtype Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// Lock during forward to ensure sequential forward
Lock(); /*锁住保证按顺序实现前向传播*/
Dtype loss = 0;
Reshape(bottom, top);
switch (Caffe::mode()) { /*检查传入的运行模式,是只在cpu模式下运行,还是gpu模式下运行*/
case Caffe::CPU:
Forward_cpu(bottom, top); /*如果是cpu模式下运行,则调用Forward_cpu计算输入的blobs与loss,不同的layer实现不同*/
for (int top_id = 0; top_id < top.size(); ++top_id) {
if (!this->loss(top_id)) { continue; }
const int count = top[top_id]->count();
const Dtype* data = top[top_id]->cpu_data();
const Dtype* loss_weights = top[top_id]->cpu_diff();
loss += caffe_cpu_dot(count, data, loss_weights); /*累加每个输出数据的loss*/
}
break;
case Caffe::GPU:
Forward_gpu(bottom, top); /*如果是gpu模式下运行,则调用Forward_gpu计算输入的blobs与loss,不同的layer实现不同*/
#ifndef CPU_ONLY
for (int top_id = 0; top_id < top.size(); ++top_id) {
if (!this->loss(top_id)) { continue; }
const int count = top[top_id]->count();
const Dtype* data = top[top_id]->gpu_data();
const Dtype* loss_weights = top[top_id]->gpu_diff();
Dtype blob_loss = 0;
caffe_gpu_dot(count, data, loss_weights, &blob_loss);/*累加每个输出数据的loss*/
loss += blob_loss;
}
#endif
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
Unlock();
return loss;
}

/*后向传播的实现,根据模式的不同调用不同的实现函数*/
template <typename Dtype>
inline void Layer<Dtype>::Backward(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
switch (Caffe::mode()) {
case Caffe::CPU:
Backward_cpu(top, propagate_down, bottom);
break;
case Caffe::GPU:
Backward_gpu(top, propagate_down, bottom);
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
}

// Serialize LayerParameter to protocol buffer
/*将参数保存到protocol缓存*/
template <typename Dtype>
void Layer<Dtype>::ToProto(LayerParameter* param, bool write_diff) {
param->Clear();
param->CopyFrom(layer_param_);
param->clear_blobs();
for (int i = 0; i < blobs_.size(); ++i) {
blobs_[i]->ToProto(param->add_blobs(), write_diff);
}
}

} // namespace caffe

#endif // CAFFE_LAYER_H_

layer.cpp

#include <boost/thread.hpp>
#include "caffe/layer.hpp"

namespace caffe {

/*下面是多线程操作函数的实现*/
template <typename Dtype>
void Layer<Dtype>::InitMutex() {
forward_mutex_.reset(new boost::mutex());
}

template <typename Dtype>
void Layer<Dtype>::Lock() {
if (IsShared()) {
forward_mutex_->lock();
}
}

template <typename Dtype>
void Layer<Dtype>::Unlock() {
if (IsShared()) {
forward_mutex_->unlock();
}
}

INSTANTIATE_CLASS(Layer);

} // namespace caffe

感谢::::
http://www.cnblogs.com/louyihang-loves-baiyan/p/5152653.html
http://blog.csdn.net/mounty_fsc/article/details/51092906
http://blog.csdn.net/u011104550/article/details/51249387
http://blog.163.com/yuyang_tech/blog/static/2160500832015713105052452/