当前位置: 首页>>代码示例>>C++>>正文


C++ TrainingSet::getPriors方法代码示例

本文整理汇总了C++中TrainingSet::getPriors方法的典型用法代码示例。如果您正苦于以下问题:C++ TrainingSet::getPriors方法的具体用法?C++ TrainingSet::getPriors怎么用?C++ TrainingSet::getPriors使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在TrainingSet的用法示例。


在下文中一共展示了TrainingSet::getPriors方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1:


//.........这里部分代码省略.........
				 CL_MEM_READ_ONLY,
				 parLearntNodes*perNodeHistogramSize*sizeof(cl_uint));
  m_clBestFeaturesBuff = cl::Buffer(m_clContext,
				    CL_MEM_WRITE_ONLY,
				    learnBuffsSize*sizeof(cl_uint));
  m_clBestThresholdsBuff = cl::Buffer(m_clContext,
				      CL_MEM_WRITE_ONLY,
				      learnBuffsSize*sizeof(cl_uint));
  m_clBestEntropiesBuff = cl::Buffer(m_clContext,
				     CL_MEM_WRITE_ONLY,
				     learnBuffsSize*sizeof(cl_float));
  m_clPerClassTotSamplesBuff = cl::Buffer(m_clContext,
					  CL_MEM_READ_ONLY,
					  parLearntNodes*nClasses*sizeof(cl_uint));
				    
  // Set kernels arguments that does not change between calls:
  // - prediction
  // Hack: use the labels image as mask, i.e. assume pixels with non-zero label
  //       are marked as foreground pixels
  //m_clPredictKern.setArg(0, m_clTsImg);
  //m_clPredictKern.setArg(1, m_clTsLabelsImg);
  m_clPredictKern.setArg(2, nChannels);
  m_clPredictKern.setArg(5, m_clTreeLeftChildBuff);
  m_clPredictKern.setArg(6, m_clTreeFeaturesBuff);
  m_clPredictKern.setArg(7, FeatDim);
  m_clPredictKern.setArg(8, m_clTreeThrsBuff);
  m_clPredictKern.setArg(9, m_clTreePosteriorsBuff);
  //m_clPredictKern.setArg(10, m_clTsNodesIDImg);
  //m_clPredictKern.setArg(11, m_clPredictImg);
  m_clPredictKern.setArg(12, cl::Local(sizeof(FeatType)*WG_WIDTH*WG_HEIGHT*FeatDim));

  // - per-image histogram update
  //m_clPerImgHistKern.setArg(0, m_clTsImg);
  m_clPerImgHistKern.setArg(1, nChannels);
  //m_clPerImgHistKern.setArg(4, m_clTsLabelsImg);
  //m_clPerImgHistKern.setArg(5, m_clTsNodesIDImg);
  //m_clPerImgHistKern.setArg(6, m_clTsSamplesBuff);
  m_clPerImgHistKern.setArg(8, FeatDim);
  m_clPerImgHistKern.setArg(9, m_clFeatLowBoundsBuff);
  m_clPerImgHistKern.setArg(10, m_clFeatUpBoundsBuff);
  m_clPerImgHistKern.setArg(11, params.nThresholds);
  m_clPerImgHistKern.setArg(12, params.thrLowBound);
  m_clPerImgHistKern.setArg(13, params.thrUpBound);
  //m_clPerImgHistKern.setArg(14, m_clPerImgHistBuff);
  m_clPerImgHistKern.setArg(15, tree.getID());
  m_clPerImgHistKern.setArg(18, m_clTreeLeftChildBuff);
  m_clPerImgHistKern.setArg(19, m_clTreePosteriorsBuff);
  m_clPerImgHistKern.setArg(20, cl::Local(sizeof(FeatType)*8));
  m_clPerImgHistKern.setArg(21, cl::Local(sizeof(FeatType)*WG_WIDTH*WG_HEIGHT*FeatDim));

  // - node's best feature/threshold learning
  m_clLearnBestFeatKern.setArg(0, m_clHistogramBuff);
  m_clLearnBestFeatKern.setArg(1, m_clPerClassTotSamplesBuff);
  m_clLearnBestFeatKern.setArg(2, params.nFeatures);
  m_clLearnBestFeatKern.setArg(3, params.nThresholds);
  m_clLearnBestFeatKern.setArg(4, nClasses);
  m_clLearnBestFeatKern.setArg(5, perThreadFeatThrPairs);
  m_clLearnBestFeatKern.setArg(6, m_clBestFeaturesBuff);
  m_clLearnBestFeatKern.setArg(7, m_clBestThresholdsBuff);
  m_clLearnBestFeatKern.setArg(8, m_clBestEntropiesBuff);


  // Init corresponding host buffers
  /** \todo use mapping/unmapping to avoid device/host copy */
  //m_tsNodesIDImg = new int[m_maxTsImgWidth*m_maxTsImgHeight*GLOBAL_HISTOGRAM_FIFO_SIZE];
  //m_perImgHist = new unsigned char[perImgHistogramSize*GLOBAL_HISTOGRAM_FIFO_SIZE];
  m_bestFeatures = new unsigned int[learnBuffsSize];
  m_bestThresholds = new unsigned int[learnBuffsSize];
  m_bestEntropies = new float[learnBuffsSize];

  // Done with OpenCL initialization


  // Init the global histogram:
  // define the global histogram as a vector of per-node histograms. The total size of
  // the global histogram (defined as number of per-node histograms simultaneously kept)
  // is limited by the smaller between maxFrontierSize and
  // GLOBAL_HISTOGRAM_MAX_SIZE/perNodeHistogramSize
  m_histogramSize = std::min(maxFrontierSize,
			     (size_t)floorl((double)GLOBAL_HISTOGRAM_MAX_SIZE/(perNodeHistogramSize*sizeof(unsigned int))));
  m_histogram = new unsigned int*[m_histogramSize];
  for (int i=0; i<m_histogramSize; i++) m_histogram[i] = new unsigned int[perNodeHistogramSize];


  // Buffer used to track to-train nodes for each depth
  m_frontier = new int[maxFrontierSize];


  // Note: the histogram for the root node is equal to the training set priors
  if (startDepth==1)
  {
    const TreeNode<FeatType, FeatDim> &rootNode = tree.getNode(0); 
    std::copy(trainingSet.getPriors(), trainingSet.getPriors()+nClasses, rootNode.m_posterior);
  }

  delete []tmpFeatUpBounds;
  delete []tmpFeatLowBounds;

  // Done
}
开发者ID:Banus,项目名称:padenti,代码行数:101,代码来源:cl_tree_trainer_impl_init.hpp


注:本文中的TrainingSet::getPriors方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。