当前位置: 首页>>代码示例>>C++>>正文


C++ Net::Forward方法代码示例

本文整理汇总了C++中Net::Forward方法的典型用法代码示例。如果您正苦于以下问题:C++ Net::Forward方法的具体用法?C++ Net::Forward怎么用?C++ Net::Forward使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Net的用法示例。


在下文中一共展示了Net::Forward方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: LOG

void GradientChecker<Dtype>::CheckGradientNet(
    const Net<Dtype>& net, const vector<Blob<Dtype>*>& input) {
  const vector<shared_ptr<Layer<Dtype> > >& layers = net.layers();
  vector<vector<Blob<Dtype>*> >& bottom_vecs = net.bottom_vecs();
  vector<vector<Blob<Dtype>*> >& top_vecs = net.top_vecs();
  for (int_tp i = 0; i < layers.size(); ++i) {
    net.Forward(input);
    LOG(ERROR)<< "Checking gradient for " << layers[i]->layer_param().name();
    CheckGradientExhaustive(*(layers[i].get()), bottom_vecs[i], top_vecs[i]);
  }
}
开发者ID:ifp-uiuc,项目名称:caffe,代码行数:11,代码来源:test_gradient_check_util.hpp

示例2: mexFunction

void mexFunction(int nLhs, mxArray* pLhs[], int nRhs, const mxArray* pRhs[]) {

  mexAssert(NARGIN_MIN <= nRhs && nRhs <= NARGIN_MAX, "Number of input arguments in wrong!");
  mexAssert(nLhs == NARGOUT, "Number of output arguments is wrong!" );  
  mexAssert(mexIsCell(IN_L), "Layers must be the cell array");
  mexAssert(mexGetNumel(IN_L) == 2, "Layers array must contain 2 cells");
  mexAssert(mexIsCell(IN_W), "Weights must be the cell array");
  mexAssert(mexGetNumel(IN_W) == 2, "Weights array must contain 2 cells");
  
  Net net;
  mxArray *mx_weights;
  net.InitLayers(mexGetCell(IN_L, 1));
  net.InitWeights(mexGetCell(IN_W, 1), mx_weights);  
  net.InitParams(IN_P);
  net.ReadLabels(IN_Y);

  const mxArray *mx_imweights = mexGetCell(IN_W, 0);  
  size_t train_num = net.labels_.size1();
  mexAssert(train_num == mexGetNumel(mx_imweights),
    "Weights and labels number must coincide");
  bool is_multicoords = false;
  if (mexIsCell(IN_X)) {    
    mexAssert(train_num == mexGetNumel(IN_X),
    "Coordinates and labels number must coincide");
    is_multicoords = true;
  }
  Params params_ = net.params_;
  size_t numbatches = (size_t) ceil((ftype) train_num/params_.batchsize_);  
  Mat trainerror_(params_.numepochs_, numbatches);
  Mat trainerror2_(params_.numepochs_, numbatches);
  trainerror2_.assign(0);
  
  std::vector<Net> imnets;
  imnets.resize(params_.batchsize_);
  for (size_t i = 0; i < params_.batchsize_; ++i) {
    imnets[i].InitLayers(mexGetCell(IN_L, 0));        
    if (!is_multicoords) {
      imnets[i].ReadData(IN_X);    
    } else {
      imnets[i].ReadData(mexGetCell(IN_X, i)); // just to get pixels_num
    }
  }
  size_t pixels_num = imnets[0].data_.size1();
  Layer *firstlayer = net.layers_[0];
  size_t dimens_num = firstlayer->outputmaps_;
  mexAssert(imnets[0].layers_.back()->length_ == dimens_num,
            "Final layer length must coincide with the number of outputmaps");   
  mexAssert(pixels_num == firstlayer->mapsize_[0] * firstlayer->mapsize_[1],
            "Pixels number must coincide with the first layer elements number");   
  
  std::vector<size_t> pred_size(2);
  pred_size[0] = 1; pred_size[1] = pixels_num * dimens_num;    
  Mat images_mat, labels_batch, pred_batch, pred_pixels;      
  std::vector< std::vector<Mat> > images, images_der;
  for (size_t epoch = 0; epoch < params_.numepochs_; ++epoch) {    
    std::vector<size_t> randind(train_num);
    for (size_t i = 0; i < train_num; ++i) {
      randind[i] = i;
    }
    if (params_.shuffle_) {
      std::random_shuffle(randind.begin(), randind.end());
    }
    std::vector<size_t>::const_iterator iter = randind.begin();
    for (size_t batch = 0; batch < numbatches; ++batch) {
      size_t batchsize = std::min(params_.batchsize_, (size_t)(randind.end() - iter));
      std::vector<size_t> batch_ind = std::vector<size_t>(iter, iter + batchsize);
      iter = iter + batchsize;      
      labels_batch = SubMat(net.labels_, batch_ind, 1);
      net.UpdateWeights(epoch, false);
      images_mat.resize(batchsize, pred_size[1]);
      InitMaps(images_mat, pred_size, images);        
      // first pass
      for (size_t m = 0; m < batchsize; ++m) {        
        imnets[m].InitWeights(mexGetCell(mx_imweights, batch_ind[m]));
        if (is_multicoords) {
          imnets[m].ReadData(mexGetCell(IN_X, batch_ind[m]));
        }
        imnets[m].InitActiv(imnets[m].data_);                
        imnets[m].Forward(pred_pixels, 1);        
        images[m][0].copy(Trans(pred_pixels).reshape(pred_size[0], pred_size[1]));
      }
      net.InitActiv(images_mat);  
      net.Forward(pred_batch, 1);
      /*
      for (int i = 0; i < 5; ++i) {
        mexPrintMsg("pred_batch1", pred_batch(0, i)); 
      }*/
      // second pass
      net.InitDeriv(labels_batch, trainerror_(epoch, batch));
      net.Backward();
      net.CalcWeights();
      InitMaps(firstlayer->deriv_mat_, pred_size, images_der);
      
      for (size_t m = 0; m < batchsize; ++m) {
        imnets[m].layers_.back()->deriv_mat_ = Trans(images_der[m][0].reshape(dimens_num, pixels_num));        
        imnets[m].Backward();        
      }
      
      // third pass      
      ftype loss2 = 0, curloss = 0, invind = 0; 
//.........这里部分代码省略.........
开发者ID:ankit-maverick,项目名称:ConvNet,代码行数:101,代码来源:cnntrain_inv_mex.cpp

示例3: caffe_net_Forward

EXPORT void caffe_net_Forward(void *netAnon, float &loss)
{
	Net<float> *net = (Net<float> *)netAnon;
	net->Forward(&loss);
}
开发者ID:robertpi,项目名称:caffe,代码行数:5,代码来源:netwrapper.cpp

示例4: net_forward

// Usage: caffe_('net_forward', hNet)
static void net_forward(MEX_ARGS) {
  mxCHECK(nrhs == 1 && mxIsStruct(prhs[0]),
      "Usage: caffe_('net_forward', hNet)");
  Net* net = handle_to_ptr<Net>(prhs[0]);
  net->Forward();
}
开发者ID:Caffe-MPI,项目名称:Caffe-MPI.github.io,代码行数:7,代码来源:caffe_.cpp


注:本文中的Net::Forward方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。