当前位置: 首页>>代码示例>>C++>>正文


C++ CvRTrees::train方法代码示例

本文整理汇总了C++中CvRTrees::train方法的典型用法代码示例。如果您正苦于以下问题:C++ CvRTrees::train方法的具体用法?C++ CvRTrees::train怎么用?C++ CvRTrees::train使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在CvRTrees的用法示例。


在下文中一共展示了CvRTrees::train方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: Train_rtrees

void Model::Train_rtrees( const SampleSet& samples )
{
	CvRTrees* model = (CvRTrees*)m_pModel;
	CvRTParams* para = (CvRTParams*)m_trainPara;
	model->train(samples.Samples(), CV_ROW_SAMPLE, samples.Labels(), 
		cv::Mat(), cv::Mat(), cv::Mat(), cv::Mat(), *para);
}
开发者ID:ElmerNing,项目名称:OpencvML,代码行数:7,代码来源:Model.cpp

示例2: train

  void train()
  {
    int sample_size = pos_data_.size() + neg_data_.size();
    feat_count_ = pos_data_[0].size();

    CvMat* cv_data = cvCreateMat(sample_size, feat_count_, CV_32FC1);
    CvMat* cv_resp = cvCreateMat(sample_size, 1, CV_32S);

    // Put positive data in opencv format.
    int j = 0;
    for (vector< vector<float> >::iterator i = pos_data_.begin();
         i != pos_data_.end();
         i++)
    {
      float* data_row = (float*)(cv_data->data.ptr + cv_data->step * j);
      for (int k = 0; k < feat_count_; k++)
        data_row[k] = (*i)[k];

      cv_resp->data.i[j] = 1;
      j++;
    }

    // Put negative data in opencv format.
    for (vector< vector<float> >::iterator i = neg_data_.begin();
         i != neg_data_.end();
         i++)
    {
      float* data_row = (float*)(cv_data->data.ptr + cv_data->step * j);
      for (int k = 0; k < feat_count_; k++)
        data_row[k] = (*i)[k];

      cv_resp->data.i[j] = -1;
      j++;
    }

    CvMat* var_type = cvCreateMat(1, feat_count_ + 1, CV_8U);
    cvSet(var_type, cvScalarAll(CV_VAR_ORDERED));
    cvSetReal1D(var_type, feat_count_, CV_VAR_CATEGORICAL);

    float priors[] = {1.0, 1.0};

    CvRTParams fparam(8, 20, 0, false, 10, priors, false, 5, 50, 0.001f, CV_TERMCRIT_ITER);
    fparam.term_crit = cvTermCriteria(CV_TERMCRIT_ITER, 100, 0.1);

    forest.train(cv_data, CV_ROW_SAMPLE, cv_resp, 0, 0, var_type, 0,
                 fparam);


    cvReleaseMat(&cv_data);
    cvReleaseMat(&cv_resp);
    cvReleaseMat(&var_type);
  }
开发者ID:xm-project,项目名称:xm_leg_detector,代码行数:52,代码来源:train_leg_detector.cpp

示例3: main

int main()
{
    const int train_sample_count = 300;

//#define LEPIOTA
#ifdef LEPIOTA
    const char* filename = "../../../OpenCV_SVN/samples/c/agaricus-lepiota.data";
#else
    const char* filename = "../../../OpenCV_SVN/samples/c/waveform.data";
#endif

    CvDTree dtree;
    CvBoost boost;
    CvRTrees rtrees;
    CvERTrees ertrees;

    CvMLData data;

    CvTrainTestSplit spl( train_sample_count );
    
    data.read_csv( filename );

#ifdef LEPIOTA
    data.set_response_idx( 0 );     
#else
    data.set_response_idx( 21 );     
    data.change_var_type( 21, CV_VAR_CATEGORICAL );
#endif

    data.set_train_test_split( &spl );
    
    printf("======DTREE=====\n");
    dtree.train( &data, CvDTreeParams( 10, 2, 0, false, 16, 0, false, false, 0 ));
    print_result( dtree.calc_error( &data, CV_TRAIN_ERROR), dtree.calc_error( &data ), dtree.get_var_importance() );

#ifdef LEPIOTA
    printf("======BOOST=====\n");
    boost.train( &data, CvBoostParams(CvBoost::DISCRETE, 100, 0.95, 2, false, 0));
    print_result( boost.calc_error( &data, CV_TRAIN_ERROR ), boost.calc_error( &data ), 0 );
#endif

    printf("======RTREES=====\n");
    rtrees.train( &data, CvRTParams( 10, 2, 0, false, 16, 0, true, 0, 100, 0, CV_TERMCRIT_ITER ));
    print_result( rtrees.calc_error( &data, CV_TRAIN_ERROR), rtrees.calc_error( &data ), rtrees.get_var_importance() );

    printf("======ERTREES=====\n");
    ertrees.train( &data, CvRTParams( 10, 2, 0, false, 16, 0, true, 0, 100, 0, CV_TERMCRIT_ITER ));
    print_result( ertrees.calc_error( &data, CV_TRAIN_ERROR), ertrees.calc_error( &data ), ertrees.get_var_importance() );

    return 0;
}
开发者ID:glo,项目名称:ee384b,代码行数:51,代码来源:tree_engine.cpp

示例4: normalValidation

void normalValidation( DataSet& data, TrainResult& result)
{
	//these vars not needed - use empty Mat
	Mat varIdx, missingDataMask;
	
	
	
	Mat sampleIdx;

	result.train_hr = 0;
	result.test_hr = 0;
	result.fpRate = 0;
	result.fnRate = 0;
	
	//	printf( "numSamples %d", data.numSamples);
	
	//CvBoostTree boost;
	
	//define test and trainingsset
	float partTrain = 1.0/8.0;
	sampleIdx = Mat(1,data.numSamples,CV_8U,1.0);
	
	int negIdx = (int)floor(partTrain*data.numNeg);
	sampleIdx.colRange(negIdx*5, negIdx*6) = 0.0;
	
	
	int posIdx = (int)floor( partTrain*data.numPos );
	sampleIdx.colRange( data.numNeg+posIdx*5, data.numNeg + posIdx*6) = 0.0;
	
	//int numT = (cv::sum( sampleIdx ))[0];
	//printf("sample Idx sum (trainsamples): %d\n",numT);
	
	int numTestSamples = negIdx + posIdx;
	printf("numSamples: %d -- numTrainSamples: %d -- numTestSamples: %d\n",data.numSamples, data.numSamples-numTestSamples, numTestSamples );
	
	
	//training
	forest.train(data.data, CV_ROW_SAMPLE, data.responses, varIdx, sampleIdx, data.varType, missingDataMask, forestParams);
	
	//booster.train(data.data, CV_ROW_SAMPLE, data.responses, varIdx, sampleIdx, data.varType, missingDataMask, boostParams);
	
	//evaluation
	evaluation(forest, data, sampleIdx, result);
	
	
	double sum = (cv::sum(result.var_importance))[0];
	result.var_importance /= sum;
	
	printf( "____\nRecognition rate: train = %.2f%%, test = %.2f%% -- overall FN = %.2f%%, FP = %.2f%%\n",
		   result.train_hr*100., result.test_hr*100. ,result.fnRate*100. ,result.fpRate*100.);
}
开发者ID:crocdialer,项目名称:libccf,代码行数:51,代码来源:freshTrainer.cpp

示例5: train_rf

CvRTrees* train_rf(CvMat* predictors, CvMat* labels)
{
	int stat[2];
	get_stat(labels, stat);
	printf("%d negative samples, %d positive samples\n", stat[0], stat[1]);
	
	const int tree_count = 500;
	const float priors[] = {0.25f,0.75f};
	CvRTrees* rtrees = new CvRTrees();
	CvRTParams rtparams = CvRTParams(5, 10, 0, false, 2, priors, true, 
									 (int)sqrt((float)predictors->cols), tree_count, 1e-6, 
									 CV_TERMCRIT_ITER + CV_TERMCRIT_EPS);
	CvMat* var_type = cvCreateMat(predictors->cols + 1, 1, CV_8UC1);
	for(int i = 0; i < predictors->cols; i++)
	{
		*(int*)(var_type->data.ptr + i*var_type->step) = CV_VAR_NUMERICAL;
	}
	*(int*)(var_type->data.ptr + predictors->cols*var_type->step) = CV_VAR_CATEGORICAL;
	rtrees->train(predictors, CV_ROW_SAMPLE, labels, 0, 0, var_type, 0, rtparams);
	return rtrees;
}
开发者ID:PR2,项目名称:pr2_plugs,代码行数:21,代码来源:learning.cpp

示例6: find_decision_boundary_RF

void find_decision_boundary_RF()
{
    img.copyTo( imgDst );

    Mat trainSamples, trainClasses;
    prepare_train_data( trainSamples, trainClasses );

    // learn classifier
    CvRTrees  rtrees;
    CvRTParams  params( 4, // max_depth,
                        2, // min_sample_count,
                        0.f, // regression_accuracy,
                        false, // use_surrogates,
                        16, // max_categories,
                        0, // priors,
                        false, // calc_var_importance,
                        1, // nactive_vars,
                        5, // max_num_of_trees_in_the_forest,
                        0, // forest_accuracy,
                        CV_TERMCRIT_ITER // termcrit_type
                       );

    rtrees.train( trainSamples, CV_ROW_SAMPLE, trainClasses, Mat(), Mat(), Mat(), Mat(), params );

    Mat testSample(1, 2, CV_32FC1 );
    for( int y = 0; y < img.rows; y += testStep )
    {
        for( int x = 0; x < img.cols; x += testStep )
        {
            testSample.at<float>(0) = (float)x;
            testSample.at<float>(1) = (float)y;

            int response = (int)rtrees.predict( testSample );
            circle( imgDst, Point(x,y), 2, classColors[response], 1 );
        }
    }
}
开发者ID:406089450,项目名称:opencv,代码行数:37,代码来源:points_classifier.cpp

示例7: main

Int_t main()
{
    // Access ntuples
    TFile* file[2];
    file[0] = new TFile("~/SingleMuon_pT_501_500.root");
    file[1] = new TFile("~/SingleMuon_pT_200_150.root");
    TTree* tree[2];
    tree[0] = (TTree*)file[0]->Get("trees");
    tree[1] = (TTree*)file[1]->Get("trees");

    // Declare variables and set branch addresses
    Double_t ptR[2] = {0, 0}, ptER[2] = {0, 0}, chi2R[2] = {0, 0}, d0R[2] = {0, 0}, dXYR[2] = {0, 0}, dZR[2] = {0, 0}, d0ER[2] = {0, 0}, etaR[2] = {0, 0}, etaER[2] = {0, 0}, phiR[2] = {0, 0}, resXR[2] = {0, 0}, resYR[2] = {0, 0};
    Double_t ptG[2] = {0, 0}, etaG[2] = {0, 0}, phiG[2] = {0, 0};
    Double_t globalTrkX[2] = {0, 0}, globalTrkY[2] = {0, 0}, globalTrkZ[2] = {0, 0}, hitPosX[2] = {0, 0}, hitPosY[2] = {0, 0}, hitPosZ[2] = {0, 0}, transImpPar4RecHits[2] = {0, 0};
    Int_t foundR[2] = {0, 0}, lostR[2] = {0, 0}, ndofR[2] = {0, 0}, idG[2] = {0, 0}, eventN[2] = {0, 0}, nRepeats[2] = {0, 0}, nMuons[2] = {0, 0};
    Int_t muonHits[2] = {0, 0}, dtHits[2] = {0, 0}, cscHits[2] = {0, 0}, rpcHits[2] = {0, 0};	// Muon hits
    Int_t pixelHits[2] = {0, 0}, barrelHits[2] = {0, 0}, endcapHits[2] = {0, 0};	// Pixel hits
    Int_t stripHits[2] = {0, 0}, tibHits[2] = {0, 0}, tidHits[2] = {0, 0}, tobHits[2] = {0, 0}, tecHits[2] = {0, 0};	// Strip hits
    Bool_t hQualR[2] = {0, 0}, repeatFlag[2] = {0, 0};
    // Missing folder items in ntuple??

    Long64_t event[2];
    for (Int_t t = 0; t < 2; t++)
    {
	event[t] = tree[t]->GetEntries();
	tree[t]->SetBranchAddress("ptR", &ptR[t]);
	tree[t]->SetBranchAddress("ptER", &ptER[t]);
	tree[t]->SetBranchAddress("chi2R", &chi2R[t]);
	tree[t]->SetBranchAddress("d0R", &d0R[t]);
	tree[t]->SetBranchAddress("dXYR", &dXYR[t]);
	tree[t]->SetBranchAddress("dZR", &dZR[t]);
	tree[t]->SetBranchAddress("d0ER", &d0ER[t]);
	tree[t]->SetBranchAddress("foundR", &foundR[t]);
	tree[t]->SetBranchAddress("lostR", &lostR[t]);
	tree[t]->SetBranchAddress("etaR", &etaR[t]);
	tree[t]->SetBranchAddress("etaER", &etaER[t]);
	tree[t]->SetBranchAddress("phiR", &phiR[t]);
	tree[t]->SetBranchAddress("hQualR", &hQualR[t]);
	tree[t]->SetBranchAddress("ndofR", &ndofR[t]);
	tree[t]->SetBranchAddress("ptG", &ptG[t]);
	tree[t]->SetBranchAddress("etaG", &etaG[t]);
	tree[t]->SetBranchAddress("phiG", &phiG[t]);
	tree[t]->SetBranchAddress("idG", &idG[t]);
	tree[t]->SetBranchAddress("residualXR", &resXR[t]);
	tree[t]->SetBranchAddress("residualYR", &resYR[t]);
	tree[t]->SetBranchAddress("globalTrkX", &globalTrkX[t]);
	tree[t]->SetBranchAddress("globalTrkY", &globalTrkY[t]);
	tree[t]->SetBranchAddress("globalTrkZ", &globalTrkZ[t]);
	tree[t]->SetBranchAddress("numberOfValidMuonHits", &muonHits[t]);
	tree[t]->SetBranchAddress("numberOfValidPixelHits", &pixelHits[t]);
	tree[t]->SetBranchAddress("numberOfValidPixelBarrelHits", &barrelHits[t]);
	tree[t]->SetBranchAddress("numberOfValidPixelEndcapHits", &endcapHits[t]);
	tree[t]->SetBranchAddress("numberOfValidStripHits", &stripHits[t]);
	tree[t]->SetBranchAddress("numberOfValidStripTIBHits", &tibHits[t]);
	tree[t]->SetBranchAddress("numberOfValidStripTIDHits", &tidHits[t]);
	tree[t]->SetBranchAddress("numberOfValidStripTOBHits", &tobHits[t]);
	tree[t]->SetBranchAddress("numberOfValidStripTECHits", &tecHits[t]);
	tree[t]->SetBranchAddress("numberOfValidMuonDTHits", &dtHits[t]);
	tree[t]->SetBranchAddress("numberOfValidMuonCSCHits", &cscHits[t]);
	tree[t]->SetBranchAddress("numberOfValidMuonRPCHits", &rpcHits[t]);
	tree[t]->SetBranchAddress("eventN", &eventN[t]);
	tree[t]->SetBranchAddress("repeatFlag", &repeatFlag[t]);
	tree[t]->SetBranchAddress("numbRepeats", &nRepeats[t]);
	tree[t]->SetBranchAddress("numbMuons", &nMuons[t]);
	tree[t]->SetBranchAddress("hitPosX", &hitPosX[t]);
	tree[t]->SetBranchAddress("hitPosY", &hitPosY[t]);
	tree[t]->SetBranchAddress("hitPosZ", &hitPosZ[t]);
	tree[t]->SetBranchAddress("transImpPar4RecHits", &transImpPar4RecHits[t]);
    }

    // Forest parameters
    const Int_t VARS = 4;
    Int_t train_good = 200;	// out of 791
    Int_t train_bad = 200;	// out of 678
    Int_t max_trees = 50;
    Int_t max_depth = 15;
    Int_t nactive_vars = 0;	// 0 for sqrt(VARS)
    Int_t min_sample_count = 10;
    Float_t regression_accuracy = 0;
    Bool_t use_surrogates = false;
    Int_t max_categories = 2;
    Float_t priors[] = {1., 1.};
    Bool_t calc_var_importance = false;
    Float_t forest_accuracy = 0.01;
    Int_t termcrit_type = CV_TERMCRIT_ITER;	// CV_TERMCRIT_EPS or ITER

    // Create canvases
    TCanvas* c1 = new TCanvas("c1", "Histogram and ROC curve", 1280, 480);
    c1->Divide(2, 1);
    c1->SetGrid();
    c1->SetLogx();
    c1->SetLogy();
    TCanvas* c2 = new TCanvas("c2", "Feature histograms", 1280, 720);
    c2->Divide(2, 2);
    TFile* canvas = new TFile("canvas.root", "RECREATE");
    TLegend* legend = new TLegend(0.11, 0.7, 0.4, 0.89);

    // Create histogram and graph arrays
    const Int_t BINS = 101;
    Double_t xmin = -0.01;
//.........这里部分代码省略.........
开发者ID:jlrainbolt,项目名称:OpenCVProjects,代码行数:101,代码来源:trackingrf.cpp

示例8: main

int main()
{
	//-----------------------------------读图片------------------------
	IplImage*** imgs = new IplImage**[END - START + 1];
	IplImage*** tex_imgs = new IplImage**[END - START + 1];
	for (int i = 0; i < END - START + 1; i++)
	{
		imgs[i] = new IplImage*[5];
		tex_imgs[i] = new IplImage*[4];
	}
	for (int i = 0; i < END - START + 1; i++)
	{
		for (int j = 0; j < 4; j++)
		{
			imgs[i][j] = NULL;
			tex_imgs[i][j] = NULL;
		}
		imgs[i][4] = NULL;
	}
	//----------------------------------------
	cout << "read image..........." << endl;
	for (int i = START; i <= END; i++)
	{
		char flairname[100], t1name[100], t1cname[100], t2name[100], truthname[100];
		memset(flairname, 0, 100); memset(t1name, 0, 100); memset(t1cname, 0, 100); memset(t2name, 0, 100); memset(truthname, 0, 100);

		sprintf(flairname, "BRATS_HG0005_FLAIR/BRATS_HG0005_FLAIR_%d.png", i);
		sprintf(t1name, "BRATS_HG0005_T1/BRATS_HG0005_T1_%d.png", i);
		sprintf(t1cname, "BRATS_HG0005_T1C/BRATS_HG0005_T1C_%d.png", i);
		sprintf(t2name, "BRATS_HG0005_T2/BRATS_HG0005_T2_%d.png", i);
		sprintf(truthname, "BRATS_HG0005_truth/BRATS_HG0005_truth_%d.png", i);

		IplImage* flair_img = RGB2GRAY(cvLoadImage(flairname));
		IplImage* t1_img = RGB2GRAY(cvLoadImage(t1name));
		IplImage* t1c_img = RGB2GRAY(cvLoadImage(t1cname));
		IplImage* t2_img = RGB2GRAY(cvLoadImage(t2name));
		IplImage* truth_img = RGB2GRAY(cvLoadImage(truthname));

		imgs[i - START][0] = flair_img;
		imgs[i - START][1] = t1_img;
		imgs[i - START][2] = t1c_img;
		imgs[i - START][3] = t2_img;
		imgs[i - START][4] = truth_img;
		//获取纹理图
		IplImage* flair_tex = cvCreateImage(cvGetSize(flair_img), IPL_DEPTH_8U, 1);
		IplImage* t1_tex = cvCreateImage(cvGetSize(t1_img), IPL_DEPTH_8U, 1);
		IplImage* t1c_tex = cvCreateImage(cvGetSize(t1c_img), IPL_DEPTH_8U, 1);
		IplImage* t2_tex = cvCreateImage(cvGetSize(t2_img), IPL_DEPTH_8U, 1);
		LBP(flair_img, flair_tex);
		LBP(t1_img, t1_tex);
		LBP(t1c_img, t1c_tex);
		LBP(t2_img, t2_tex);

		tex_imgs[i - START][0] = flair_tex;
		tex_imgs[i - START][1] =t1_tex;
		tex_imgs[i - START][2] = t1c_tex;
		tex_imgs[i - START][3] = t2_tex;
	}
	//----------------------------------------------------------
	cout << "read training data............" << endl;
	Mat train_datas(HEIGHT*WIDTH*(END - START + 1), ATTRIBUTES_PER_SAMPLE, CV_32FC1);    
	Mat responses(HEIGHT*WIDTH*(END - START + 1), 1, CV_32SC1);
	//---读取训练数据----
	int dataline=read_training_data(imgs,tex_imgs, train_datas, responses);
	Mat _train_datas(dataline, ATTRIBUTES_PER_SAMPLE, CV_32FC1);
	Mat _responses(dataline, 1, CV_32SC1);
	//减少训练数据为dataline个
	for (int i = 0; i < dataline; i++)
	{
		float* float_data = train_datas.ptr<float>(i);
		int* int_data = responses.ptr<int>(i);

		_train_datas.at<float>(i, 0) = float_data[0];
		_train_datas.at<float>(i, 1) = float_data[1];
		_train_datas.at<float>(i, 2) = float_data[2];
		_train_datas.at<float>(i, 3) = float_data[3];

		_train_datas.at<float>(i, 4) = float_data[4];
		_train_datas.at<float>(i, 5) = float_data[5];
		_train_datas.at<float>(i, 6) = float_data[6];
		_train_datas.at<float>(i, 7) = float_data[7];

		_train_datas.at<float>(i, 8) = float_data[8];

		_responses.at<int>(i, 0) = int_data[0];
	}
	//----设置输入类型---
	Mat var_type = Mat(ATTRIBUTES_PER_SAMPLE+1, 1, CV_8U);
	var_type.setTo(Scalar(CV_VAR_NUMERICAL)); // all inputs are numerical  
	var_type.at<uchar>(ATTRIBUTES_PER_SAMPLE, 0) = CV_VAR_CATEGORICAL;
	//---训练数据---
	cout << "training......." << endl;
	float priors[NUMBER_OF_CLASSES] = { 1, 1 };
	CvRTParams params = CvRTParams(25, // max depth  
	                       4, // min sample count  
	                       0, // regression accuracy: N/A here  
                           false, // compute surrogate split, no missing data  
		                   5, // max number of categories (use sub-optimal algorithm for larger numbers)  
	                        priors, // the array of priors  
	                        false,  // calculate variable importance  
//.........这里部分代码省略.........
开发者ID:yuki252111,项目名称:computerVision,代码行数:101,代码来源:brain.cpp

示例9: build_rtrees_classifier

static
int build_rtrees_classifier( char* data_filename,
    char* filename_to_save, char* filename_to_load )
{
    CvMat* data = 0;
    CvMat* responses = 0;
    CvMat* var_type = 0;
    CvMat* sample_idx = 0;

    int ok = read_num_class_data( data_filename, 16, &data, &responses );
    int nsamples_all = 0, ntrain_samples = 0;
    int i = 0;
    double train_hr = 0, test_hr = 0;
    CvRTrees forest;
    CvMat* var_importance = 0;

    if( !ok )
    {
        printf( "Could not read the database %s\n", data_filename );
        return -1;
    }

    printf( "The database %s is loaded.\n", data_filename );
    nsamples_all = data->rows;
    ntrain_samples = (int)(nsamples_all*0.8);

    // Create or load Random Trees classifier
    if( filename_to_load )
    {
        // load classifier from the specified file
        forest.load( filename_to_load );
        ntrain_samples = 0;
        if( forest.get_tree_count() == 0 )
        {
            printf( "Could not read the classifier %s\n", filename_to_load );
            return -1;
        }
        printf( "The classifier %s is loaded.\n", data_filename );
    }
    else
    {
        // create classifier by using <data> and <responses>
        printf( "Training the classifier ...\n");

        // 1. create type mask
        var_type = cvCreateMat( data->cols + 1, 1, CV_8U );
        cvSet( var_type, cvScalarAll(CV_VAR_ORDERED) );
        cvSetReal1D( var_type, data->cols, CV_VAR_CATEGORICAL );

        // 2. create sample_idx
        sample_idx = cvCreateMat( 1, nsamples_all, CV_8UC1 );
        {
            CvMat mat;
            cvGetCols( sample_idx, &mat, 0, ntrain_samples );
            cvSet( &mat, cvRealScalar(1) );

            cvGetCols( sample_idx, &mat, ntrain_samples, nsamples_all );
            cvSetZero( &mat );
        }

        // 3. train classifier
        forest.train( data, CV_ROW_SAMPLE, responses, 0, sample_idx, var_type, 0,
            CvRTParams(10,10,0,false,15,0,true,4,100,0.01f,CV_TERMCRIT_ITER));
        printf( "\n");
    }

    // compute prediction error on train and test data
    for( i = 0; i < nsamples_all; i++ )
    {
        double r;
        CvMat sample;
        cvGetRow( data, &sample, i );

        r = forest.predict( &sample );
        r = fabs((double)r - responses->data.fl[i]) <= FLT_EPSILON ? 1 : 0;

        if( i < ntrain_samples )
            train_hr += r;
        else
            test_hr += r;
    }

    test_hr /= (double)(nsamples_all-ntrain_samples);
    train_hr /= (double)ntrain_samples;
    printf( "Recognition rate: train = %.1f%%, test = %.1f%%\n",
            train_hr*100., test_hr*100. );

    printf( "Number of trees: %d\n", forest.get_tree_count() );

    // Print variable importance
    var_importance = (CvMat*)forest.get_var_importance();
    if( var_importance )
    {
        double rt_imp_sum = cvSum( var_importance ).val[0];
        printf("var#\timportance (in %%):\n");
        for( i = 0; i < var_importance->cols; i++ )
            printf( "%-2d\t%-4.1f\n", i,
            100.f*var_importance->data.fl[i]/rt_imp_sum);
    }

//.........这里部分代码省略.........
开发者ID:Ashwini7,项目名称:smart-python-programs,代码行数:101,代码来源:letter_recog.cpp

示例10: train

int RandomTrees::train(const char* samples_filename, const char* model_filename, const double ratio, double &train_error, double &test_error)
{
	CvMat* data = 0;
	CvMat* responses = 0;
	CvMat* var_type = 0;
	CvMat* sample_idx = 0;

	this->tree_parameters_.nactive_vars = (int)sqrt(this->number_of_features_);

	int ok = read_num_class_data( samples_filename, this->number_of_features_, &data, &responses );
	int nsamples_all = 0, ntrain_samples = 0;
	int i = 0;
	double train_hr = 0, test_hr = 0;
	CvRTrees forest;
	CvMat* var_importance = 0;

	if( !ok )
	{
		cout << "Could not read the sample in" << samples_filename << endl;;
		return -1;
	}

	cout << "The sample file " << samples_filename << " is loaded." << endl;
	nsamples_all = data->rows;
	ntrain_samples = (int)(nsamples_all * ratio);


	// create classifier by using <data> and <responses>
	cout << "Training the classifier ..." << endl;

	// 1. create type mask
	var_type = cvCreateMat( data->cols + 1, 1, CV_8U );
	cvSet( var_type, cvScalarAll(CV_VAR_ORDERED) );
	cvSetReal1D( var_type, data->cols, CV_VAR_CATEGORICAL );

	// 2. create sample_idx
	sample_idx = cvCreateMat( 1, nsamples_all, CV_8UC1 );
	{
		CvMat mat;
		cvGetCols( sample_idx, &mat, 0, ntrain_samples );
		cvSet( &mat, cvRealScalar(1) );

		cvGetCols( sample_idx, &mat, ntrain_samples, nsamples_all );
		cvSetZero( &mat );
	}

	// 3. train classifier
	forest.train( data, CV_ROW_SAMPLE, responses, 0, sample_idx, var_type, 0, this->tree_parameters_);
	cout << endl;


	// compute prediction error on train and test data
	for( i = 0; i < nsamples_all; i++ )
	{
		double r;
		CvMat sample;
		cvGetRow( data, &sample, i );

		r = forest.predict( &sample );
		r = fabs((double)r - responses->data.fl[i]) <= FLT_EPSILON ? 1 : 0;

		if( i < ntrain_samples )
			train_hr += r;
		else
			test_hr += r;
	}

	test_hr /= (double)(nsamples_all-ntrain_samples);
	train_hr /= (double)ntrain_samples;

	train_error = 1 - train_hr;
	test_error = 1 - test_hr;

	cout << "Recognition rate: train = " << train_hr*100 << ", test = " << test_hr*100 << endl;
	cout << "Number of trees: " << forest.get_tree_count() << endl;

	// Print variable importance
	var_importance = (CvMat*)forest.get_var_importance();
	if( var_importance )
	{
		double rt_imp_sum = cvSum( var_importance ).val[0];
		printf("var#\timportance (in %%):\n");
		for( i = 0; i < var_importance->cols; i++ )
			printf( "%-2d\t%-4.1f\n", i,100.f*var_importance->data.fl[i]/rt_imp_sum);
	}

	// Save Random Trees classifier to file if needed
	if( model_filename )
		forest.save( model_filename );

	//cvReleaseMat( &var_importance );		//causes a segmentation fault
	cvReleaseMat( &sample_idx );
	cvReleaseMat( &var_type );
	cvReleaseMat( &data );
	cvReleaseMat( &responses );

	return 0;
}
开发者ID:EduFill,项目名称:hbrs-ros-pkg,代码行数:98,代码来源:random_trees.cpp

示例11: mexFunction

void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
    ASSERT_NUM_RHS_ARGS_GTE(2);
    ASSERT_NUM_LHS_ARGS_LT(3);

    const mxArray* dataMtx = prhs[0];
    const mxArray* targetValueVec = prhs[1];
    
    //see if we have been provided a struct containing options for the training. 
    //if not, then use defaults provided by opencv
    CvRTParams* rtParams;
    if (nrhs > 2) {
        mexPrintf("Parsing struct argument for parameters\n");
        rtParams = parse_struct_to_forest_config(prhs[2]);
    }
    else {
        mexPrintf("Using default parameters\n");
        rtParams = parse_struct_to_forest_config(NULL);
    }

    mexPrintf("Parameters:\n");
    print_forest_params(rtParams);
    
    unsigned int numSamples, numVariables;

    CvMat* dataCvMtx = matlab_matrix_to_opencv_matrix(dataMtx);
    numSamples = dataCvMtx->rows;
    numVariables = dataCvMtx->cols;
    mexPrintf("training data converted to opencv format. %d samples, each with %d variables\n",
              numSamples, numVariables);
#ifdef PRINT_INPUTS
    print_opencv_matrix(dataCvMtx);
#endif

    CvMat* targetCvMtx = matlab_array_to_opencv_array(targetValueVec);
    if (targetCvMtx->rows != numSamples) {
		MEX_ERR_PRINTF("training data had %d samples, labels contain %d values.", 
		               numSamples, targetCvMtx->rows);
    }
    mexPrintf("training labels converted to opencv format.\n");
#ifdef PRINT_INPUTS
    print_opencv_matrix(targetCvMtx);
#endif

    //specify the type of our variables. In this case, all our variables are 
    CvMat* var_type = cvCreateMat(dataCvMtx->cols + 1, 1, CV_8U);
    cvSet(var_type, cvScalarAll(CV_VAR_ORDERED));

    //actually make the forest and do the training
    clock_t start_time, end_time;
    mexPrintf("training now...");
    start_time = clock();
    CvRTrees *forest = new CvRTrees;
    forest->train(dataCvMtx, CV_ROW_SAMPLE, targetCvMtx, NULL, NULL, var_type, NULL, *rtParams);
    end_time = clock();
	clock_t diff_time = end_time - start_time;
	double seconds_passed = ((float)diff_time) / CLOCKS_PER_SEC;
    mexPrintf("training done in %fs\n", seconds_passed);

    //pack the pointer and return it to matlab
    plhs[0] = pack_pointer((void *)forest);

	// If the user supplied a second lhs argument, return them the time taken to train
	if (nlhs > 1) {
		plhs[1] = mxCreateDoubleScalar(seconds_passed);
	}
    
    cvReleaseMat(&var_type);
    cvReleaseMat(&dataCvMtx);
    cvReleaseMat(&targetCvMtx);
} 
开发者ID:malcolmreynolds,项目名称:matlab-opencv-interop,代码行数:70,代码来源:rf_train.cpp

示例12: main

int main(int argc, char** argv)
{
  // std::cout<<FLT_EPSILON<<std::endl; 
  cv::Mat training_data, training_labels,testing_data, testing_labels;
  
  training_data = read_rgbd_data_cv(argv[1],NUMBER_OF_TRAINING_SAMPLES);
  training_labels = read_rgbd_data_cv(argv[2], NUMBER_OF_TRAINING_SAMPLES);
  testing_data = read_rgbd_data_cv(argv[3],NUMBER_OF_TESTING_SAMPLES);
  testing_labels = read_rgbd_data_cv(argv[4], NUMBER_OF_TESTING_SAMPLES);
  
 
  printf("dataset specs: %d samples with %d features\n", training_data.rows, training_data.cols);

  // define all the attributes as numerical
  // alternatives are CV_VAR_CATEGORICAL or CV_VAR_ORDERED(=CV_VAR_NUMERICAL)
  // that can be assigned on a per attribute basis

  cv::Mat var_type = cv::Mat(training_data.cols + 1, 1, CV_8U );
  var_type.setTo(cv::Scalar(CV_VAR_NUMERICAL) ); // all inputs are numerical
  var_type.at<uchar>(training_data.cols, 0) = CV_VAR_CATEGORICAL; // the labels are categorical

  /********************************步骤1:定义初始化Random Trees的参数******************************/
  float priors[] = {1,1,1,1,1};  // weights of each classification for classes
  CvRTParams params = CvRTParams(25, // max depth
				 50, // min sample count
				 0, // regression accuracy: N/A here
				 false, // compute surrogate split, no missing data
				 15, // max number of categories (use sub-optimal algorithm for larger numbers)
				 priors, // the array of priors
				 false,  // calculate variable importance
				 20,       // number of variables randomly selected at node and used to find the best split(s).
				 NUMBER_OF_TREES,	 // max number of trees in the forest
				 0.01f,				// forrest accuracy
				 CV_TERMCRIT_ITER |	CV_TERMCRIT_EPS // termination cirteria
				 );
  
  /****************************步骤2:训练 Random Decision Forest(RDF)分类器*********************/
  // printf( "\nUsing training database: %s\n\n", argv[1]);
  CvRTrees* rtree = new CvRTrees;
  rtree->train(training_data, CV_ROW_SAMPLE, training_labels,
	       cv::Mat(), cv::Mat(), var_type, cv::Mat(), params);
  
  // perform classifier testing and report results
  cv::Mat test_sample, train_sample;
  int correct_class = 0;
  int wrong_class = 0;
  int result;
  int label;
  int false_positives [NUMBER_OF_CLASSES] = {0,0,0,0,0};
  int false_negatives [NUMBER_OF_CLASSES] = {0,0,0,0,0};

  CvDTreeNode* leaf_nodes [training_data.rows];

  for (int tsample = 0; tsample < training_data.rows; tsample++)
    {
      train_sample = training_data.row(tsample);
      CvForestTree* tree = rtree->get_tree(1);
      CvDTreeNode* leaf_node = tree->predict(train_sample, cv::Mat());
      leaf_nodes[tsample] = leaf_node; 
    }

  // printf( "\nUsing testing database: %s\n\n", argv[2]);

  for (int tsample = 0; tsample < testing_data.rows; tsample++)
    {	       
      // extract a row from the testing matrix
      test_sample = testing_data.row(tsample);
      // train on the testing data:
      // test_sample = training_data.row(tsample);
      /********************************步骤3:预测*********************************************/

      result = (int) rtree->predict(test_sample, cv::Mat());
      label = (int) testing_labels.at<float>(tsample, 0);

      printf("Testing Sample %i -> class result (digit %d) - label (digit %d)\n", tsample, result, label);

      // get the leaf nodes of the first tree in the forest
      /*CvForestTree* tree = rtree->get_tree(0);
      std::list<const CvDTreeNode*> leaf_list;
      leaf_list = get_leaf_node( tree );
      printf("Number of Leaf nodes: %ld\n", leaf_list.size());*/

      // if the prediction and the (true) testing classification are the same
      // (N.B. openCV uses a floating point decision tree implementation!)
      if (fabs(result - label)
	  >= FLT_EPSILON)
	{
	  // if they differ more than floating point error => wrong class
	  wrong_class++;
	  false_positives[(int) result]++;
	  false_negatives[(int) testing_labels.at<float>(tsample, 0)]++;
	}
      else
	{
	  // otherwise correct
	  correct_class++;
	}
    }

  printf( // "\nResults on the testing database: %s\n"
//.........这里部分代码省略.........
开发者ID:far-ad,项目名称:GP-RF,代码行数:101,代码来源:test_rgbd.cpp

示例13: main

int main( int argc, char** argv )
{
    // lets just check the version first

    printf ("OpenCV version %s (%d.%d.%d)\n",
            CV_VERSION,
            CV_MAJOR_VERSION, CV_MINOR_VERSION, CV_SUBMINOR_VERSION);
    
    if(argc != 4)
    {
     	printf("Usage: %s file_training file_testing number_of_classes", argv[0]);
        exit(0);
    }

    //define number of training and testing samples and number of attributes
    int* results = find_parameters_from_csv(argv[1], argv[2]);
    
    int NUMBER_OF_TRAINING_SAMPLES = results[0] - 1;
    int NUMBER_OF_TESTING_SAMPLES = results[1] -1 ;
    int ATTRIBUTES_PER_SAMPLE = results[2];

    int NUMBER_OF_CLASSES = atoi(argv[3]);

    printf("N° of training samples: %d \nN° testing of samples: %d \nN° of attributes: %d \nN° of classes: %d \n", NUMBER_OF_TRAINING_SAMPLES,NUMBER_OF_TESTING_SAMPLES,ATTRIBUTES_PER_SAMPLE,NUMBER_OF_CLASSES );

    // define training data storage matrices (one for attribute examples, one
    // for classifications)

    Mat training_data = Mat(NUMBER_OF_TRAINING_SAMPLES, ATTRIBUTES_PER_SAMPLE, CV_32FC1);
    Mat training_classifications = Mat(NUMBER_OF_TRAINING_SAMPLES, 1, CV_32FC1);

    //define testing data storage matrices

    Mat testing_data = Mat(NUMBER_OF_TESTING_SAMPLES, ATTRIBUTES_PER_SAMPLE, CV_32FC1);
    Mat testing_classifications = Mat(NUMBER_OF_TESTING_SAMPLES, 1, CV_32FC1);

    // define all the attributes as numerical
    // alternatives are CV_VAR_CATEGORICAL or CV_VAR_ORDERED(=CV_VAR_NUMERICAL)
    // that can be assigned on a per attribute basis

    Mat var_type = Mat(ATTRIBUTES_PER_SAMPLE + 1, 1, CV_8U );
    var_type.setTo(Scalar(CV_VAR_NUMERICAL) ); // all inputs are numerical

    // this is a classification problem (i.e. predict a discrete number of class
    // outputs) so reset the last (+1) output var_type element to CV_VAR_CATEGORICAL

    var_type.at<uchar>(ATTRIBUTES_PER_SAMPLE, 0) = CV_VAR_CATEGORICAL;

    double result; // value returned from a prediction

    // load training and testing data sets

    if (read_data_from_csv(argv[1], training_data, training_classifications, NUMBER_OF_TRAINING_SAMPLES, ATTRIBUTES_PER_SAMPLE) &&
            read_data_from_csv(argv[2], testing_data, testing_classifications, NUMBER_OF_TESTING_SAMPLES, ATTRIBUTES_PER_SAMPLE))
    {
        // define the parameters for training the random forest (trees)

  	// weights of each classification for classes
        // (all equal as equal samples of each digit)
        float priors[NUMBER_OF_CLASSES];
	for (int z = 0; z < NUMBER_OF_CLASSES; z++)
	{
		priors[z] = 1;
	}
	//dà peso 1 a ciascuna classe all'inizio

        CvRTParams params = CvRTParams(25, // max depth
                                       2, // min sample count
                                       0, // regression accuracy: N/A here
                                       false, // compute surrogate split, no missing data
                                       15, // max number of categories (use sub-optimal algorithm for larger numbers)
                                       priors, // the array of priors
                                       false,  // calculate variable importance
                                       4,       // number of variables randomly selected at node and used to find the best split(s).
                                       100,	 // max number of trees in the forest
                                       0.01f,				// forrest accuracy
                                       CV_TERMCRIT_ITER |	CV_TERMCRIT_EPS // termination cirteria
                                      );

        // train random forest classifier (using training data)

        printf( "\nUsing training database: %s\n\n", argv[1]);
        CvRTrees* rtree = new CvRTrees;

        rtree->train(training_data, CV_ROW_SAMPLE, training_classifications,
                     Mat(), Mat(), var_type, Mat(), params);

        // perform classifier testing and report results

        Mat test_sample;
        int correct_class = 0;
        int wrong_class = 0;
        int false_positives [NUMBER_OF_CLASSES];

	//initialize every element in false_positives to 0
	for (int z = 0; z < NUMBER_OF_CLASSES; z++)
        {
		false_positives[z] = 0;
	}

//.........这里部分代码省略.........
开发者ID:SimoneNigro,项目名称:OpenCVMachineLearningTest,代码行数:101,代码来源:training_automatico.cpp

示例14: doCrossValidation

void doCrossValidation( DataSet& data, TrainResult& result)
{
	//these vars not needed - use empty Mat
	Mat varIdx, missingDataMask;

//	BoostParams forestParams = cv::BoostParams(cv::Boost::DEFAULT, 100, 0.95, 5, false, 0 );

	Mat sampleIdx;
	int nFold = 5;
	result.train_hr = 0;
	result.test_hr = 0;
	result.fpRate = 0;
	result.fnRate = 0;

//	printf( "numSamples %d", data.numSamples);

	
	// define training/test-sets within trainData
	for(int round = 0; round < nFold; round++)
	{


		//define test and trainingsset
		float partTrain = 1.0/nFold;
		sampleIdx = Mat(1,data.numSamples,CV_8U,1.0);

		int negIdx = (int)floor(partTrain*data.numNeg);
		sampleIdx.colRange(negIdx*round, negIdx*(round+1)) = 0.0;


		int posIdx = (int)floor( partTrain*data.numPos );
		sampleIdx.colRange( data.numNeg+posIdx*round, data.numNeg + posIdx*(round+1)) = 0.0;

		//int numT = (cv::sum( sampleIdx ))[0];
		//printf("sample Idx sum (trainsamples): %d\n",numT);
		
		int numTestSamples = negIdx + posIdx;
		printf("numSamples: %d -- numTrainSamples: %d -- numTestSamples: %d\n",data.numSamples, data.numSamples-numTestSamples, numTestSamples );


		//training
		forest.train(data.data, CV_ROW_SAMPLE, data.responses, varIdx, sampleIdx, data.varType, missingDataMask, forestParams);


		//evaluation
		TrainResult roundResult;
		evaluation(forest, data, sampleIdx, roundResult);

		result.fnRate 	+= roundResult.fnRate;
		result.fpRate 	+= roundResult.fpRate;
		result.test_hr 	+= roundResult.test_hr;
		result.train_hr += roundResult.train_hr;
		if( round == 0 )
			result.var_importance = roundResult.var_importance.clone();
		else
			result.var_importance += roundResult.var_importance;

		printf( "Round %d.Recognition rate: train = %.2f%%, test = %.2f%% -- overall FN = %.2f%%, FP = %.2f%%\n",
				round, roundResult.train_hr*100., roundResult.test_hr*100. ,roundResult.fnRate*100. ,roundResult.fpRate*100.);
	}
	result.fnRate 	/= nFold;
	result.fpRate 	/= nFold;
	result.test_hr 	/= nFold;
	result.train_hr /= nFold;
	result.var_importance /= nFold;
	double sum = (cv::sum(result.var_importance))[0];
	result.var_importance /= sum;

	printf( "____\nRecognition rate: train = %.2f%%, test = %.2f%% -- overall FN = %.2f%%, FP = %.2f%%\n",
			result.train_hr*100., result.test_hr*100. ,result.fnRate*100. ,result.fpRate*100.);
}
开发者ID:crocdialer,项目名称:libccf,代码行数:71,代码来源:freshTrainer.cpp

示例15: main

int main()
{
    const int train_sample_count = 300;
    bool is_regression = false;

    const char* filename = "data/waveform.data";
    int response_idx = 21;

    CvMLData data;

    CvTrainTestSplit spl( train_sample_count );
    
    if(data.read_csv(filename) != 0)
    {
        printf("couldn't read %s\n", filename);
        exit(0);
    }

    data.set_response_idx(response_idx);
    data.change_var_type(response_idx, CV_VAR_CATEGORICAL);
    data.set_train_test_split( &spl );

    const CvMat* values = data.get_values();
    const CvMat* response = data.get_responses();
    const CvMat* missing = data.get_missing();
    const CvMat* var_types = data.get_var_types();
    const CvMat* train_sidx = data.get_train_sample_idx();
    const CvMat* var_idx = data.get_var_idx();
    CvMat*response_map;
    CvMat*ordered_response = cv_preprocess_categories(response, var_idx, response->rows, &response_map, NULL);
    int num_classes = response_map->cols;
    
    CvDTree dtree;
    printf("======DTREE=====\n");
    CvDTreeParams cvd_params( 10, 1, 0, false, 16, 0, false, false, 0);
    dtree.train( &data, cvd_params);
    print_result( dtree.calc_error( &data, CV_TRAIN_ERROR), dtree.calc_error( &data, CV_TEST_ERROR ), dtree.get_var_importance() );

#if 0
    /* boosted trees are only implemented for two classes */
    printf("======BOOST=====\n");
    CvBoost boost;
    boost.train( &data, CvBoostParams(CvBoost::DISCRETE, 100, 0.95, 2, false, 0));
    print_result( boost.calc_error( &data, CV_TRAIN_ERROR ), boost.calc_error( &data, CV_TEST_ERROR), 0 );
#endif

    printf("======RTREES=====\n");
    CvRTrees rtrees;
    rtrees.train( &data, CvRTParams( 10, 2, 0, false, 16, 0, true, 0, 100, 0, CV_TERMCRIT_ITER ));
    print_result( rtrees.calc_error( &data, CV_TRAIN_ERROR), rtrees.calc_error( &data, CV_TEST_ERROR ), rtrees.get_var_importance() );

    printf("======ERTREES=====\n");
    CvERTrees ertrees;
    ertrees.train( &data, CvRTParams( 10, 2, 0, false, 16, 0, true, 0, 100, 0, CV_TERMCRIT_ITER ));
    print_result( ertrees.calc_error( &data, CV_TRAIN_ERROR), ertrees.calc_error( &data, CV_TEST_ERROR ), ertrees.get_var_importance() );

    printf("======GBTREES=====\n");
    CvGBTrees gbtrees;
    CvGBTreesParams gbparams;
    gbparams.loss_function_type = CvGBTrees::DEVIANCE_LOSS; // classification, not regression
    gbtrees.train( &data, gbparams);
    
    //gbt_print_error(&gbtrees, values, response, response_idx, train_sidx);
    print_result( gbtrees.calc_error( &data, CV_TRAIN_ERROR), gbtrees.calc_error( &data, CV_TEST_ERROR ), 0);

    printf("======KNEAREST=====\n");
    CvKNearest knearest;
    //bool CvKNearest::train( const Mat& _train_data, const Mat& _responses,
    //                const Mat& _sample_idx, bool _is_regression,
    //                int _max_k, bool _update_base )
    bool is_classifier = var_types->data.ptr[var_types->cols-1] == CV_VAR_CATEGORICAL;
    assert(is_classifier);
    int max_k = 10;
    knearest.train(values, response, train_sidx, is_regression, max_k, false);

    CvMat* new_response = cvCreateMat(response->rows, 1, values->type);
    //print_types();

    //const CvMat* train_sidx = data.get_train_sample_idx();
    knearest.find_nearest(values, max_k, new_response, 0, 0, 0);

    print_result(knearest_calc_error(values, response, new_response, train_sidx, is_regression, CV_TRAIN_ERROR),
                 knearest_calc_error(values, response, new_response, train_sidx, is_regression, CV_TEST_ERROR), 0);

    printf("======== RBF SVM =======\n");
    //printf("indexes: %d / %d, responses: %d\n", train_sidx->cols, var_idx->cols, values->rows);
    CvMySVM svm1;
    CvSVMParams params1 = CvSVMParams(CvSVM::C_SVC, CvSVM::RBF,
                                     /*degree*/0, /*gamma*/1, /*coef0*/0, /*C*/1,
                                     /*nu*/0, /*p*/0, /*class_weights*/0,
                                     cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, FLT_EPSILON));
    //svm1.train(values, response, train_sidx, var_idx, params1);
    svm1.train_auto(values, response, var_idx, train_sidx, params1);
    svm_print_error(&svm1, values, response, response_idx, train_sidx);

    printf("======== Linear SVM =======\n");
    CvMySVM svm2;
    CvSVMParams params2 = CvSVMParams(CvSVM::C_SVC, CvSVM::LINEAR,
                                     /*degree*/0, /*gamma*/1, /*coef0*/0, /*C*/1,
                                     /*nu*/0, /*p*/0, /*class_weights*/0,
//.........这里部分代码省略.........
开发者ID:JackieXie168,项目名称:mrscake,代码行数:101,代码来源:test_cv.cpp


注:本文中的CvRTrees::train方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。