当前位置: 首页>>代码示例>>C++>>正文


C++ NeuralNetwork::computeCost方法代码示例

本文整理汇总了C++中NeuralNetwork::computeCost方法的典型用法代码示例。如果您正苦于以下问题:C++ NeuralNetwork::computeCost方法的具体用法?C++ NeuralNetwork::computeCost怎么用?C++ NeuralNetwork::computeCost使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在NeuralNetwork的用法示例。


在下文中一共展示了NeuralNetwork::computeCost方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: main

int main(int argc, char* argv[])
{
  cout << "Reading in the input data" << endl;
  /**************  Read in images *****************/
  int imgSize = 512;
  int numImages = 10;
  string filename = "olsh.dat";

  vector<MatrixXd> images;
     
  ifstream indata; 
  double num; // variable for input value
  
  indata.open(filename.c_str());
  if(!indata) { 
    cerr << "Error: file could not be opened" << endl;
    return 1;
   }
  
  for(int i = 0; i < numImages; ++i) {
    MatrixXd m(imgSize,imgSize);
    for(int r = 0; r < imgSize; ++r) { 
      for(int c = 0; c < imgSize; ++c) {
	if(indata.eof()) {
	  cerr << "Error: ran out of input values on (" << r << "," << c << ")" << endl;
	  return 1;
	}
	
	indata >> num;
	m(r,c) = num;
      }
    }
    images.push_back(m);
  }
  indata.close();
  
  cout << "Input data loaded" << endl;


  /*************** YOUR CODE HERE ************/
  /* parse the command line options */
  oFilename = NULL;
  if (argc > 1) {
    sscanf(argv[1], "%lf", &alpha);
  }
  if (argc > 2) {
    sscanf(argv[2], "%d", &numIterations);
  }
  if (argc > 3) {
    sscanf(argv[3], "%lf", &beta);
  }
  if (argc > 4) {
    oFilename = argv[4];
    cout << "Outputting to file " << oFilename << endl;
  }
  else {
    oFilename = (char *)malloc(256);
    sprintf(oFilename, "bases/alpha%fnumIterations%d.dat", alpha, numIterations);
  }
  /* For each training example: 

     (i) Run a forward pass on our network on input x, to compute all
     units? activations;

     (ii) Perform one step of stochas- tic gradient descent using
     backpropagation;
     
     (iii) Perform the updates given in Equations (8-9).
  */
  int unitCounts[] = {WINDOW_PIXEL_SIZE * WINDOW_PIXEL_SIZE, 
		      30, 
		      WINDOW_PIXEL_SIZE * WINDOW_PIXEL_SIZE};
  NeuralNetwork nn = NeuralNetwork(3, unitCounts);
  
  MatrixXd trainingImage;
  cout << "Performing " << numIterations << " iterations with alpha = " << alpha << endl;
  for (int iteration=0; iteration < numIterations; iteration++) {
    generateTrainingExample(images, trainingImage);

    if (iteration % 1000 == 0 || iteration < 11) {
      cout << iteration << " " << flush;
      cout << endl << "   ";
      nn.computeCost(trainingImage, trainingImage);
      
    }
    // (i) run a feedforward pass on our network
    nn.forwardPropagateActivations(trainingImage);
    
    // (ii) perform one step of stochastic gradient descent
    nn.backprop(trainingImage);

    // (iii) perform the updates given in Equations (8-9).
    nn.sparseLearningPass(iteration % 1000 == 0 || iteration < 11);

    if (iteration % 1000 == 0 || iteration < 11) {
      cout << "   ";
      nn.computeCost(trainingImage, trainingImage);
    }

  }
//.........这里部分代码省略.........
开发者ID:Pygmalion6636,项目名称:deep-learning,代码行数:101,代码来源:simple-autoencoder.cpp


注:本文中的NeuralNetwork::computeCost方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。