当前位置: 首页>>代码示例>>C++>>正文


C++ TrainingSet类代码示例

本文整理汇总了C++中TrainingSet的典型用法代码示例。如果您正苦于以下问题:C++ TrainingSet类的具体用法?C++ TrainingSet怎么用?C++ TrainingSet使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了TrainingSet类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: TEST

TEST(NaiveBayesTest, test2) {
   enum Bird {
    SMALL,
    MIDDLE,
    BIG
  }; 
  TrainingSet<Bird, 2> trainingSet;
  
  // weight, height
  std::array<double, 2> s1 = {2, 10};
  std::array<double, 2> s2 = {2.3, 12};
  trainingSet.add(SMALL, s1);
  trainingSet.add(SMALL, s2);
  
  std::array<double, 2> m1 = {4, 15};
  std::array<double, 2> m2 = {4.7, 17.2};
  trainingSet.add(MIDDLE, m1);
  trainingSet.add(MIDDLE, m2);
  
  std::array<double, 2> b1 = {7, 23};
  std::array<double, 2> b2 = {8.5, 22.5};
  trainingSet.add(BIG, b1);
  trainingSet.add(BIG, b2);
  
  NaiveBayesClasifier<Bird, 2> clasifier;
  EXPECT_TRUE(clasifier.train(trainingSet));
  
  std::array<double, 2> sample1 = {1.5, 9};
  std::array<double, 2> sample2 = {4.9, 16};
  std::array<double, 2> sample3 = {9, 20};
  
  EXPECT_EQ(clasifier.clasify(sample1), SMALL);
  EXPECT_EQ(clasifier.clasify(sample2), MIDDLE);
  EXPECT_EQ(clasifier.clasify(sample3), BIG);
}
开发者ID:BogdanCojocar,项目名称:Naive-Bayes-Clasifier,代码行数:35,代码来源:TestNaiveBayes.cpp

示例2:

bool TrainingSet::operator!=(const TrainingSet &trset)
{
	if(trset.getInputs() == inputs && trset.getTargets() == targets){
		return false;
	}
	return true;
}
开发者ID:marlncpe,项目名称:INSYDE,代码行数:7,代码来源:trainingset.cpp

示例3: left_set

void	ClassificationTree::print_train_log(const TreeNode::PtrSplitNodeBase split, const TrainingSet &train_set) const
{
	MatType				ltype	=	train_set.get_label_type();
	MatType				ftype	=	train_set.get_feature_type();
	int					rows	=	(int)ltype.total();
	cv::Mat_<double>	left_tmp;
	cv::Mat_<double>	right_tmp;
	TrainingSet			left_set(ftype, ltype);
	TrainingSet			right_set(ftype, ltype);

	split->operator()(train_set, left_set, right_set);

	
	left_set.compute_target_mean(left_tmp);
	right_set.compute_target_mean(right_tmp);

	cv::Mat_<double>	left_dist(rows, 1, (double*)left_tmp.data);
	cv::Mat_<double>	right_dist(rows, 1, (double*)right_tmp.data);


	printf("left dist\n");
	for (unsigned ii = 0; ii < left_dist.total(); ++ii) {
		printf("\tlabel%d:%f\n", ii, left_dist.at<double>(ii) / left_set.size());
	}

	printf("right dist\n");
	for (unsigned ii = 0; ii < right_dist.total(); ++ii) {
		printf("\tlabel%d:%f\n", ii, right_dist.at<double>(ii) / right_set.size());
	}

}
开发者ID:mrthat,项目名称:cvpr,代码行数:31,代码来源:ClassificationTree.cpp

示例4: CalculateBP

float CalculateBP(const char * reffn, const char * trainfn, int refNum, int ngram, int bleuType)
{
    TrainingSet * ts = new TrainingSet();
    if( bleuType < 3 )
        ts->LoadRefData(reffn, refNum, ngram, MAX_SENT_NUM);
    ts->LoadTrainingData(trainfn, false);
    float BP = trainer->GetBP(ts, ngram, (BLEU_TYPE)bleuType, 0);
    delete ts;
    return BP;
}
开发者ID:initial-d,项目名称:smt_server,代码行数:10,代码来源:Interface.cpp

示例5: positives

LogLikelihood::LogLikelihood(TrainingSet &positives,TrainingSet &negatives)
  : positives(positives),
    negatives(negatives),
    numPos(positives.numCases()),
    numNeg(negatives.numCases()),
    numAttributes(positives.getSchema().numAttributes()),
    func(positives.getSchema().numAttributes()),
    ok(true)
{
}
开发者ID:bmajoros,项目名称:BioMaLL,代码行数:10,代码来源:LogLikelihood.C

示例6: MERTraining

bool MERTraining(const char * reffn, const char * trainfn, const char * configfn, int ngram, int bleuType)
{
    TrainingSet * ts = new TrainingSet();
    if( bleuType < 3 )
        ts->LoadRefData(reffn, 4, ngram, MAX_SENT_NUM);
    ts->LoadTrainingData(trainfn, false);
    trainer->LoadPara(configfn);
    trainer->OptimzieWeightsWithMERT(ts, ngram, (BLEU_TYPE)bleuType, 0);
    delete ts;
    return true;
}
开发者ID:initial-d,项目名称:smt_server,代码行数:11,代码来源:Interface.cpp

示例7:

void	ClassificationTree::print_train_log(const TreeNode::PtrLeafNodeBase leaf, const TrainingSet &train_set) const
{
	cv::Mat_<double>	label_dist;

	train_set.compute_target_mean(label_dist);

	printf("leaf dist\n");

	for (unsigned ii = 0; ii < label_dist.total(); ++ii) {
		printf("\tlabel%d:%f\n", ii, label_dist.at<double>(ii) / std::max<double>((double)train_set.size(), 1.0));
	}
}
开发者ID:mrthat,项目名称:cvpr,代码行数:12,代码来源:ClassificationTree.cpp

示例8: _initTrain

void CLTreeTrainer<ImgType, nChannels, FeatType, FeatDim, nClasses>::train(
  Tree<FeatType, FeatDim, nClasses> &tree,
  const TrainingSet<ImgType, nChannels> &trainingSet,
  const TreeTrainerParameters<FeatType, FeatDim> &params,
  unsigned int startDepth, unsigned int endDepth)
{
  /** \todo support a starting depth different from 1 */
  if (startDepth!=1) throw "Starting depth must be equal to 1";

  _initTrain(tree, trainingSet, params, startDepth, endDepth);
  

  for (unsigned int currDepth=startDepth; currDepth<endDepth; currDepth++)
  {
    boost::chrono::steady_clock::time_point perLevelTrainStart = 
      boost::chrono::steady_clock::now(); 

    unsigned int frontierSize = _initFrontier(tree, params, currDepth);
    unsigned int nSlices = _initHistogram(params);

    
    if (nSlices>1)
    {
      BOOST_LOG_TRIVIAL(info) << "Maximum allowed global histogram size reached: split in "
			      << nSlices << " slices";
    }
    

    // Flag all images as to-be-skipped: the flag will be set to false if at least one
    // image pixel is processed
    std::fill_n(m_toSkipTsImg, trainingSet.getImages().size(), true);

    for (unsigned int i=0; i<nSlices; i++)
    {
      _traverseTrainingSet(trainingSet, params, currDepth, i);
      _learnBestFeatThr(tree, params, currDepth, i);
    }

    // Update skipped images flags
    std::copy(m_toSkipTsImg, m_toSkipTsImg+trainingSet.getImages().size(), m_skippedTsImg);
  

    boost::chrono::duration<double> perLevelTrainTime =
      boost::chrono::duration_cast<boost::chrono::duration<double> >(boost::chrono::steady_clock::now() - 
								   perLevelTrainStart);
    
    BOOST_LOG_TRIVIAL(info) << "Depth " << currDepth << " trained in "
			    << perLevelTrainTime.count() << " seconds";
    
  }

  _cleanTrain();
}
开发者ID:mUogoro,项目名称:padenti,代码行数:53,代码来源:cl_tree_trainer_impl.hpp

示例9: InitFoldSets

void DataSet::InitFoldSets(TrainingSet &ts, ValidationSet &vs, int fold)
{
	int vStart = nSamples*(fold - 1) / nFolds;
	int vEnd = nSamples*fold / nFolds;
	ts.Init(nSamples - (vEnd - vStart), nFeatures);
	vs.Init((vEnd - vStart), nFeatures);
	for (int i = 0; i < nSamples; i++)
	{
		if (i >= vStart&&i<vEnd)
			vs.PushSample(X[i], Y[i]);
		else
			ts.PushSample(X[i], Y[i]);
	}
}
开发者ID:felipesfaria,项目名称:FariaTcc,代码行数:14,代码来源:DataSet.cpp

示例10: entrenamiento

void PruebaPantalla::on_DotMatrixRepresentationButton_clicked()
{
	QString openDir = QFileDialog::getOpenFileName(this, //widget
												  "Abrir conjunto de entrenamiento", //caption
												  "/home/edixon/programacion/INSYDE//samples/TrainingSets", //dir
												  "Conjunto de entrenamiento (*.tsf)", //filter
												  new QString("Conjunto de entrenamiento (*.tsf)"));

	if(openDir == "") return;

	TrainingSet *ts = new TrainingSet(openDir);

	DotMatrixRepresentation *dmr = new DotMatrixRepresentation(ts->getInputs()[0]);

	dmr->show();
}
开发者ID:,项目名称:,代码行数:16,代码来源:

示例11: build

void Apta::build(TrainingSet trainingSet, bool useWhiteNodes)
{
    if (useWhiteNodes) {
        this->_useWhiteNodes = true;
    }

    // Start with the root of APTA colored red
    this->_addNode(true, this->_rootId, "", "", '\0');

    for (pair<string, bool> sample : trainingSet.get()) {
        this->_addPath(this->_rootId, sample.first,
            sample.second ? Apta::ACCEPTED : Apta::REJECTED);
    }
}
开发者ID:grammarhoard,项目名称:2014-rosu-inference-cpp,代码行数:14,代码来源:Apta.cpp

示例12: main

int main(int argc, char *argv[]) {
	QApplication a(argc, argv);

	TrainingSet input;
	input.AddInput(red);
	input.AddInput(green);
	input.AddInput(dk_green);
	input.AddInput(blue);
	input.AddInput(dk_blue);
	input.AddInput(yellow);
	input.AddInput(orange);
	input.AddInput(purple);
	input.AddInput(black);
	input.AddInput(white);

	std::vector<float> vCol(3);
	int w1 = 40;
	int w2 = 4;

	SOMNetGPU gpu;
	gpu.CreateSOM(3, 1, w1,w1);
	gpu.SetTrainingSet(input);
	
	SetFcn(&ownFn);
	gpu.SetDistFunction(ownFn);
	// or just: SetFcn(gpu.GetDistFunction() );

	gpu.Training(1000);

	SOMReader w(w1, w1, w2);
	for(int x = 0; x < w1*w1; x++) {
		SOMNeuron *pNeur = (SOMNeuron*)((SOMLayer*)gpu.GetOPLayer())->GetNeuron(x);
		vCol[0] = pNeur->GetConI(0)->GetValue();
		vCol[1] = pNeur->GetConI(1)->GetValue();
		vCol[2] = pNeur->GetConI(2)->GetValue();

		w.SetField(QPoint(pNeur->GetPosition()[0], pNeur->GetPosition()[1]), vCol );
	}
	w.Save("SimpFnExtByGPU.png");
	return 0;
}
开发者ID:AtnesNess,项目名称:annetgpgpu,代码行数:41,代码来源:SimpFnExtGPU.cpp

示例13: test

void NeuralNetwork::test(TrainingSet &testSet)
{
	vector<InputImage *>* data = testSet.getData();

	int numCorrect = 0;
	for (vector<InputImage *>::iterator testImage = data->begin(); testImage != data->end(); ++testImage)
	{
		Mat *trainingImageMat = (*testImage)->getImage();
		vector<int> *actualLabel = (*testImage)->getLabelVector();

		// Get V
		Mat V = parameters * (*trainingImageMat);

		// Compute prediction
		vector<float> predictions(LABEL_SIZE);
		predictHelper(V, predictions);

		// Find max for prediction
		float max = 0;
		int maxInd = 0;
		int count = 0;
		for (vector<float>::iterator it = predictions.begin(); it != predictions.end(); ++it)
		{
			if (*it > max)
			{
				max = *it;
				maxInd = count;
			}
			count++;
		}

		char predictedChar = InputImage::oneHotIndexToChar(maxInd);
		cout << "Predicted: " << predictedChar << " | Actual: " << (*testImage)->getCharLabel() << endl;
		if (tolower(predictedChar) == tolower((*testImage)->getCharLabel()))
		{
			numCorrect++;
		}
	}

	float percentCorrect = ((float)numCorrect / (float)data->size()) * 100;
	cout << "Percent correct: " << (int)percentCorrect << "%" << endl;
}
开发者ID:utat-uav,项目名称:cv16-interface,代码行数:42,代码来源:NeuralNetwork.cpp

示例14: train

void NeuralNetwork::train(TrainingSet &trainingSet)
{
	vector<InputImage *>* data = trainingSet.getData();

	vector<float> G;

	// Repeat until convergence
	bool hasConverged = false;
	int count = 0;
	float avgCrossEntropy = 100;
	time_t timer;
	time(&timer);
	int k = 0;
	while (!hasConverged)
	{
		if (count > MIN_TRAIN_TIME)
		{
			hasConverged = true;
			break;
		}
		count++;

		if (count % 5 == 0)
		{
			cout << count << "th cycle with " << avgCrossEntropy << " avg cross entropy" << endl;
			cout << difftime(time(0), timer) << " seconds elapsed" << endl;
		}

		// Reset average crossentropy
		avgCrossEntropy = 0;

		// Get predictions
		vector<vector<float>> allPredictions;
		vector<InputImage *> inputImages;
		for (int m = k; m < k + BATCH_SIZE; ++m)
		{
			int ind = m % data->size();

			Mat *trainingImageMat = data->at(ind)->getImage();
			vector<int> *actualLabel = data->at(ind)->getLabelVector();

			// Get V
			Mat V = parameters * (*trainingImageMat);

			// Compute prediction
			vector<float> predictions(LABEL_SIZE);
			predictHelper(V, predictions);

			avgCrossEntropy -= (logf(predictions[data->at(ind)->getLabelIndex()]));

			allPredictions.push_back(predictions);
			inputImages.push_back(data->at(ind));
		}

		// Update parameters
		for (int i = 0; i < parameters.rows; ++i)
		{
			for (int j = 0; j < parameters.cols; ++j)
			{
				float grad = 0;
#pragma omp parallel for reduction(+:grad)
				for (int p = 0; p < BATCH_SIZE; p++)
				{
					grad += inputImages.at(p)->getImage()->at<float>(j, 0) * (inputImages.at(p)->getLabelVector()->at(i) - allPredictions[p][i]);
				}

				parameters.at<float>(i, j) += TRAINING_STEP * grad;
			}
		}

		// Average the cross entropy
		avgCrossEntropy /= BATCH_SIZE;

		k += BATCH_SIZE;
	}

	// Save to file
	ofstream nnsave;
	nnsave.open("savednn.txt");
	for (int i = 0; i < parameters.rows; ++i)
	{
		for (int j = 0; j < parameters.cols; ++j)
		{
			nnsave << parameters.at<float>(i, j) << "\t";
		}
		nnsave << endl;
	}
	nnsave << endl;
	nnsave.close();

	//cout << parameters << endl;
}
开发者ID:utat-uav,项目名称:cv16-interface,代码行数:92,代码来源:NeuralNetwork.cpp

示例15: TrainNeuralNetwork

void BackPropagation::TrainNeuralNetwork(LayeredFeedForwardNeuralNet& networkToTrain, const TrainingSet& trainingSet) const
{
    long trainingIterations = 0;
    double cumulativeNetworkError = DBL_MAX;
    TrainingSet trainingSetCopy = trainingSet;
    
    // get activation derivative function for delta rule
    std::shared_ptr<IUnaryExpressionParser> pExpressionParser = UnaryExpressionParserFactory::CreateDerivativeParser();
    UnaryFunction activationDerivative = pExpressionParser->GetFunctionForExpression(networkToTrain.GetActivationFunction());
    
    while (cumulativeNetworkError > m_targetNetworkError && trainingIterations < m_iterationLimit)
    {
        std::cout << "Enet = " << cumulativeNetworkError << std::endl;
        
        // reset network error for new training set iteration.
        cumulativeNetworkError = 0.0;
        
        // begin a new training cycle: put exemplars in random order
        std::random_shuffle(trainingSetCopy.begin(), trainingSetCopy.end());
        
        for (const Exemplar& exemplar : trainingSetCopy)
        {
            // fire the neural network and record activations at each layer
            std::vector<VectorXd> layerActivations;
            layerActivations.push_back(exemplar.first);
            for (long layerIndex = 1; layerIndex < networkToTrain.GetLayerCount(); layerIndex++)
            {
                layerActivations.push_back(
                    networkToTrain.FireSingleLayer(layerActivations[layerIndex - 1], layerIndex)
                );
            }
            
            // deque of errors on each layer (so we can add in reverse order)
            std::deque<VectorXd> layerErrors;
            
            // iterate over the layers in reverse order (back propagating), calculating errors.
            // reverse order because error in below layers is dependent on error of above layers.
            for (long layerIndex = networkToTrain.GetLayerCount() - 1; layerIndex > 0; layerIndex--)
            {
                VectorXd currentLayerError; // what we're trying to calculate
                const VectorXd& currentLayerActivation = layerActivations[layerIndex];
                
                if (layerIndex == networkToTrain.GetLayerCount() - 1)
                {
                    // this is the output layer's error, which is calculated against the known exemplar expected output
                    // Eo = (Do - Yo)Yo([1_0..1_n] - Yo)    for sigmoid (we use generalised delta rule and derivative of activation fn)
                    const VectorXd& expectedOutputPattern = exemplar.second;
                    currentLayerError = (expectedOutputPattern - currentLayerActivation) * currentLayerActivation.unaryExpr(activationDerivative);
                } else {
                    // this is a hidden layer error vector, which is calculated against the above layer's error and input weights.
                    // Ehy = Yh(1 - Yh)Wi^T.Eo    for sigmoid (we use generalised delta rule and derivative of activation fn)
                    MatrixXd aboveLayerInputWeights = networkToTrain.GetLayerInputWeights(layerIndex + 1);
                    const VectorXd& aboveLayerError = layerErrors.front();
                    // when calculating hidden layer errors we don't care about bias weights for the input weights of the above layer.
                    // this is because the "error of the bias unit" in a hidden layer is not used to calculate changes in weights below. so get rid of these to simplify calculation.
                    MatrixXd aboveLayerInputWeightsMinusBias = aboveLayerInputWeights.leftCols(aboveLayerInputWeights.cols() - 1);
                    // note we use cwiseProduct because we want to multiply elements of weighted error vector against deriative of current layer activations.
                    currentLayerError = (aboveLayerInputWeightsMinusBias.transpose() * aboveLayerError).cwiseProduct(currentLayerActivation.unaryExpr(activationDerivative));
                }
                layerErrors.push_front(currentLayerError);
            }
            // push a dummy 0 error to error deque so error/activation stl vector elements line up.
            layerErrors.push_front(VectorXd::Zero(exemplar.first.size()));
            
            // next we need to iterate over errors for each layer (excluding dummy input layer), calculating change in input weights.
            for (long layerIndex = 1; layerIndex < networkToTrain.GetLayerCount(); layerIndex++)
            {
                // get weight matrix to adjust
                MatrixXd weightsToAdjust = networkToTrain.GetLayerInputWeights(layerIndex);
                
                // get previous layer's activations (plus bias value)
                VectorXd previousLayerActivationPlusBias(weightsToAdjust.cols());
                previousLayerActivationPlusBias << layerActivations[layerIndex - 1], VectorXd::Constant(1, -1.0);
                
                // calculate change in weights ΔW = η Yh^T . Eo (where . is outer product)
                MatrixXd layerInputWeightsDelta = (layerErrors[layerIndex] * previousLayerActivationPlusBias.transpose()) * m_learningRate;
                
                // update neural net weights
                weightsToAdjust += layerInputWeightsDelta;
                networkToTrain.SetLayerInputWeights(weightsToAdjust, layerIndex);
                
                std::cout << "Weights for layer " << layerIndex << " are now:" << std::endl;
                std::cout << weightsToAdjust << std::endl;
            }
            
            // ok now update the cumulative network error.
            // this is (expected - actual activations) normalised, squared and then halved.
            cumulativeNetworkError += (exemplar.second - layerActivations.back()).squaredNorm() / 2;
            
        } // end for training-set-iteration
        trainingIterations++;
    } // target reached (or iteration limit exceeded). end training.
    
    if (trainingIterations == m_iterationLimit)
    {
        std::cout << "Iteration limit reached - optimisation did not converge on a global minimum." << std::endl;
    } else {
        std::cout << "Target network error reached after " << trainingIterations << " training set iterations." << std::endl;
    }
}
开发者ID:philmccarthy24,项目名称:FFNeuralNet,代码行数:100,代码来源:BackPropagation.cpp


注:本文中的TrainingSet类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。