本文整理汇总了C++中CvANN_MLP::predict方法的典型用法代码示例。如果您正苦于以下问题:C++ CvANN_MLP::predict方法的具体用法?C++ CvANN_MLP::predict怎么用?C++ CvANN_MLP::predict使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CvANN_MLP
的用法示例。
在下文中一共展示了CvANN_MLP::predict方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: mlp
//Neural Network - Multi-Layer Perceptrons
void mlp ( Mat & trainingData , Mat & trainingClasses , Mat & testData , Mat &
testClasses ) {
Mat layers = Mat (4 , 1 , CV_32SC1 ) ;
layers.row (0) = Scalar (2);
layers.row (1) = Scalar (10);
layers.row (2) = Scalar (15);
layers.row (3) = Scalar (1);
CvANN_MLP mlp ;
CvANN_MLP_TrainParams params;
CvTermCriteria criteria;
criteria.max_iter = 100;
criteria.epsilon = 0.00001f;
criteria.type = CV_TERMCRIT_ITER|CV_TERMCRIT_EPS;
params.train_method = CvANN_MLP_TrainParams::BACKPROP ;
params.bp_dw_scale = 0.05f;
params.bp_moment_scale = 0.05f;
params.term_crit = criteria ;
mlp.create ( layers ) ;
// train
mlp.train ( trainingData , trainingClasses , Mat () , Mat () , params ) ;
Mat response (1 , 1 , CV_32FC1 ) ;
Mat predicted ( testClasses.rows , 1 , CV_32F ) ;
for ( int i = 0; i < testData.rows ; i ++) {
Mat response (1 , 1 , CV_32FC1 ) ;
Mat sample = testData.row ( i ) ;
mlp.predict ( sample , response ) ;
predicted.at < float >( i ,0) = response.at < float >(0 ,0) ;
}
cout << " Accuracy_ { MLP } = " << evaluate ( predicted , testClasses ) << endl ;
plot_binary ( testData , predicted , "Predictions Backpropagation" ) ;
}
示例2: ANN_Region
void CImageProcess::ANN_Region()
{
char path[512] = {0};
float obj[MAX_TRAIN_COLS] = {0};
Sample("./sources/0 (1).bmp", obj, MAX_TRAIN_COLS);
CvANN_MLP bpANN;
CvANN_MLP_TrainParams param;
param.term_crit = cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,5000,0.01);
param.train_method = CvANN_MLP_TrainParams::BACKPROP;
param.bp_dw_scale = 0.1;
param.bp_moment_scale = 0.1;
Mat layerSize = (Mat_<int>(1,3)<<MAX_TRAIN_COLS ,MAX_OBJ_COLS,MAX_OBJ_COLS);
bpANN.create(layerSize, CvANN_MLP::SIGMOID_SYM);
//m_bpANN.load("./sources/mlp.xml");
Mat input(1, MAX_TRAIN_COLS, CV_32FC1, obj);
float _obj[MAX_OBJ_COLS] ={0};
Mat out(1, MAX_OBJ_COLS, CV_32FC1, _obj);
//Mat out;
bpANN.predict(input,out);
int i=0;
i+=i;
}
示例3: Predict_mlp
void Model::Predict_mlp( const SampleSet& samples, SampleSet& outError )
{
int true_resp = 0;
CvANN_MLP *model = (CvANN_MLP*)m_pModel;
cv::Mat result;
float temp[40];
model->predict(samples.Samples(), result);
for (int i = 0; i < samples.N(); i++)
{
float maxcol = -1;
int index = -1;
for (int j = 0; j < result.cols; j++)
{
if (result.at<float>(i,j) > maxcol)
{
maxcol = result.at<float>(i,j);
index = j;
}
}
float label = samples.Classes()[index];
if (label != samples.GetLabelAt(i))
{
outError.Add(samples.GetSampleAt(i), samples.GetLabelAt(i));
}
else
{
true_resp++;
}
}
printf("%d %d",samples.N(), true_resp);
}
示例4: classify
int classify(Mat f){
Mat output(1, numberCharacters, CV_32FC1);
ann.predict(f, output);
Point maxLoc;
double maxVal;
minMaxLoc(output, 0, &maxVal, 0, &maxLoc);
//We need know where in output is the max val, the x (cols) is the class.
cout<<maxLoc.x<<endl;
return maxLoc.x;
}
示例5: predict
void predict(int nbSamples, int size)
{
CvANN_MLP network;
CvFileStorage* storage = cvOpenFileStorage( "data/neural_model.xml", 0, CV_STORAGE_READ);
CvFileNode *n = cvGetFileNodeByName(storage, 0, "neural_model");
network.read(storage, n);
Mat toPredict(nbSamples, size * size, CV_32F);
int label;
float pixel;
FILE *file = fopen("data/predict.txt", "r");
for(int i=0; i < nbSamples; i++){
for(int j=0; j < size * size; j++){
// WHILE ITS PIXEL VALUE
if(j < size * size){
fscanf(file, "%f,", &pixel);
toPredict.at<float>(i,j) = pixel;
}
}
}
fclose(file);
Mat classOut(nbSamples, 62,CV_32F);
network.predict(toPredict,classOut);
float value;
int maxIndex = 0;
float maxValue;
for(int k = 0; k < nbSamples; k++)
{
maxIndex = 0;
maxValue = classOut.at<float>(0,0);
for(int index=1;index<62;index++){
value = classOut.at<float>(0,index);
if(value>maxValue){
maxValue = value;
maxIndex = index;
}
}
}
cout<<"Index predicted : " << maxIndex + 1 << endl;
cvReleaseFileStorage(&storage);
}
示例6: Predict
// Predict the output with the trained ANN given the two inputs.
void Predict(float data1, float data2)
{
float _sample[2];
CvMat sample = cvMat(1, 2, CV_32FC1, _sample);
float _predout[1];
CvMat predout = cvMat(1, 1, CV_32FC1, _predout);
sample.data.fl[0] = data1;
sample.data.fl[1] = data2;
machineBrain.predict(&sample, &predout);
printf("%f %f -> %f \n", data1, data2, predout.data.fl[0]);
}
示例7: CheckCircle
int CheckCircle( Mat src)//Matとcircleの組を渡すこと
{
//XMLを読み込んでニューラルネットワークの構築
CvANN_MLP nnetwork;
CvFileStorage* storage = cvOpenFileStorage( "param.xml", 0, CV_STORAGE_READ );
CvFileNode *n = cvGetFileNodeByName(storage,0,"DigitOCR");
nnetwork.read(storage,n);
cvReleaseFileStorage(&storage);
//特徴ベクトルの生成
int index;
float train[64];
for(int i=0; i<64; i++) train[i] = 0;
Mat norm(src.size(), src.type());
Mat sample(src.size(), src.type());
normalize(src, norm, 0, 255, NORM_MINMAX, CV_8UC3);
for(int y=0; y<sample.rows; y++){
for(int x=0; x<sample.cols; x++){
index = y*sample.step+x*sample.elemSize();
int color = (norm.data[index+0]/64)+
(norm.data[index+1]/64)*4+
(norm.data[index+2]/64)*16;
train[color]+=1;
}
}
int pixel = sample.cols * sample.rows;
for(int i=0; i<64; i++){
train[i] /= pixel;
}
//分類の実行
Mat data(1, ATTRIBUTES, CV_32F);
for(int col=0; col<ATTRIBUTES; col++){
data.at<float>(0,col) = train[col];
}
int maxIndex = 0;
Mat classOut(1,CLASSES,CV_32F);
nnetwork.predict(data, classOut);
float value;
float maxValue=classOut.at<float>(0,0);
for(int index=1;index<CLASSES;index++){
value = classOut.at<float>(0,index);
if(value > maxValue){
maxValue = value;
maxIndex=index;
}
}
return maxIndex;
}
示例8: predict
// Predict the output with the trained ANN given the two inputs.
void predict()
{
int test_sample_count = 78;
//The test data matrix.
float td[78][61];
float _sample[60];
CvMat sample = cvMat(1, 60, CV_32FC1, _sample);
float _predout[1];
CvMat predout = cvMat(1, 1, CV_32FC1, _predout);
//Read the test file
FILE *fin;
fin = fopen("data/sonar_test.csv", "r");
for (int i=0; i<test_sample_count; i++)
fscanf(fin,"%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f",
&td[i][0],&td[i][1],&td[i][2],&td[i][3],&td[i][4],&td[i][5],&td[i][6],&td[i][7],&td[i][8],&td[i][9],&td[i][10],&td[i][11],&td[i][12],&td[i][13],&td[i][14],&td[i][15],&td[i][16],&td[i][17],&td[i][18],&td[i][19],&td[i][20],&td[i][21],&td[i][22],&td[i][23],&td[i][24],&td[i][25],&td[i][26],&td[i][27],&td[i][28],&td[i][29],&td[i][30],&td[i][31],&td[i][32],&td[i][33],&td[i][34],&td[i][35],&td[i][36],&td[i][37],&td[i][38],&td[i][39],&td[i][40],&td[i][41],&td[i][42],&td[i][43],&td[i][44],&td[i][45],&td[i][46],&td[i][47],&td[i][48],&td[i][49],&td[i][50],&td[i][51],&td[i][52],&td[i][53],&td[i][54],&td[i][55],&td[i][56],&td[i][57],&td[i][58],&td[i][59],&td[i][60]);
fclose(fin);
int fnCount = 0;
int fpCount = 0;
for (int i=0; i < test_sample_count; i++)
{
for (int j=0; j < 60; j++) {
sample.data.fl[j] = td[i][j];
}
float actual = td[i][60];
ann.predict(&sample, &predout);
float predicted = predout.data.fl[0];
if (actual == 1.0f && predicted < 0.0f)
{
fnCount++;
std::cout << "BOOM! ";
}
else if (actual == -1.0f && predicted > 0.0f)
{
fpCount++;
}
printf("predicted: %f, actual: %f\n", predicted, actual);
}
std::cout << "False Negative %: " << ((float)fnCount / test_sample_count)*100 << "%\n";
std::cout << "False Positive %: " << ((float)fpCount / test_sample_count)*100 << "%\n";
std::cout << "Total Misses: " << ((float)(fpCount+fnCount) / test_sample_count)*100 << "%\n";
}
示例9: Predict
// Predict the output with the trained ANN given the two inputs.
void Predict(float data0, float data1, float data2, float data3, float data4, float data5)
{
float _sample[6];
CvMat sample = cvMat(1, 6, CV_32FC1, _sample);
float _predout[1];
CvMat predout = cvMat(1, 1, CV_32FC1, _predout);
sample.data.fl[0] = data0;
sample.data.fl[1] = data1;
sample.data.fl[2] = data2;
sample.data.fl[3] = data3;
sample.data.fl[4] = data4;
sample.data.fl[5] = data5;
machineBrain.predict(&sample, &predout);
printf("%f \n",predout.data.fl[0]);
}
示例10: classify_emotion
int classify_emotion(Mat& face, const char* ann_file, int tagonimg)
{
int ret = 0;
Mat output(1, OUTPUT_SIZE, CV_64FC1);
Mat data(1, nn_input_size, CV_64FC1);
CvANN_MLP nnetwork;
nnetwork.load(ann_file, "facial_ann");
vector<Point_<double> > points;
vector<double> distances;
if(!get_facial_points(face, points)) {
return -1;
}
get_euler_distance_sets(points, distances);
int j = 0;
while(!distances.empty()) {
data.at<double>(0,j) = distances.back();
distances.pop_back();
j++;
}
nnetwork.predict(data, output);
/* Find the biggest value in the output vector, that is what we want. */
double b = 0;
int k = 1;
for (j = 0; j < OUTPUT_SIZE; j++) {
cout<<output.at<double>(0, j)<<" ";
if (b < output.at<double>(0, j)) {
b = output.at<double>(0, j);
k = j + 1;
}
}
/* Print the result on the image. */
if (tagonimg) {
putText(face, get_emotion(k), Point(30, 30), FONT_HERSHEY_SIMPLEX,
0.7, Scalar(0, 255, 0), 2);
draw_distance(face, points);
}
return k;
}
示例11: testModel
void Training::testModel(string testPath,CvANN_MLP &neural_network){
int success(0),fail(0);
loadDataSet(testPath, testSet, testClassification, NB_TEST_SAMPLES);
cout<<"Test set loaded"<<endl;
cv::Mat classificationResult(1, CLASSES, CV_32F);
Mat testSample;
for(int i=0; i < NB_TEST_SAMPLES;i++){
testSample = testSet.row(i);
//predict
neural_network.predict(testSample, classificationResult);
int maxIndex = 0;
float value = 0.0f;
float maxValue = classificationResult.at<float>(0,0);
for(int j=1; j<CLASSES;j++){
value=classificationResult.at<float>(0,j);
if(value>maxValue){
maxValue = value;
maxIndex = j;
}
}
if(testClassification.at<float>(i,maxIndex)!=1.0f)
fail++;
else
success++;
}
cout<<"Successfully classified : "<<success<<endl;
cout<<"Wrongly classified ! "<<fail<<endl;
cout<<"Succes % : "<<success * 100 / NB_TEST_SAMPLES<<endl;
}
示例12: On_Execute
//.........这里部分代码省略.........
}
else
{
model.create(mat_neuralLayers, CvANN_MLP::GAUSSIAN, d_alpha, d_beta);
}
//-------------------------------------------------
// Now train the network
// TODO: Integrate init weights and indicies for record selection
// mat_Weights = GetMatrix(t_Weights, i_matType);
// mat_Indices = GetMatrix(t_Indices, i_matType);
//model.train(mat_TrainInput, mat_TrainOutput, NULL, NULL, tp_trainParams);
model.train(mat_data[0], mat_data[1], NULL, NULL, tp_trainParams);
//-------------------------------------------------
// Predict data
if (e_dataType == TABLE)
{
// Get the eavaluation/test matrix from the eval table
mat_EvalInput = GetEvalMatrix(t_EvalInput, i_matType);
}
else
{
// Train and eval data overlap in grid mode
mat_EvalInput = GetEvalMatrix(gl_TrainInputs, i_matType);
}
// Prepare output matrix
mat_EvalOutput = cvCreateMat(mat_EvalInput->rows, i_outputFeatureCount, i_matType);
// Start prediction
model.predict(mat_EvalInput, mat_EvalOutput);
Message_Add(_TL("Successfully trained the network and predicted the values. Here comes the output."));
//-------------------------------------------------
// Save and print results
if (e_dataType == TABLE)
{
// DEBUG -> Save results to output table and print results
for (int i = 0; i < i_outputFeatureCount; i++)
{
t_EvalOutput->Add_Field(CSG_String(t_TrainInput->Get_Field_Name(i_outputFeatureIdxs[i])), SG_DATATYPE_Float);
}
for (int i = 0; i < mat_EvalOutput->rows; i++)
{
CSG_Table_Record* tr_record = t_EvalOutput->Add_Record();
for (int j = 0; j < i_outputFeatureCount; j++)
{
float f_targetValue = mat_EvalOutput->data.fl[i*i_outputFeatureCount+j];
tr_record->Set_Value(j, f_targetValue);
}
}
}
else
{
// Fill the output table output
for (int i = 0; i < i_outputFeatureCount; i++)
{
// TODO: Get the class name
t_EvalOutput->Add_Field(CSG_String::Format(SG_T("CLASS_%d"), i), SG_DATATYPE_Float);
}
示例13: main
//.........这里部分代码省略.........
bool uniform = true; bool accumulate = false;
cv::calcHist(&f, 1, 0, cv::Mat(), grayHist, 1, &histSize, &histRange, uniform, accumulate);
for (int j = 0; j < 256; j++)
{
trainingData[itemIndex][j] = grayHist.ptr<float>(0)[0];
}
itemIndex++;
/*
// 创建直方图画布
int hist_w = 400; int hist_h = 400;
int bin_w = cvRound((double)hist_w / histSize);
cv::Mat histImage(hist_w, hist_h, CV_8UC3, cv::Scalar(0, 0, 0));
/// 将直方图归一化到范围 [ 0, histImage.rows ]
cv::normalize(grayHist, grayHist, 0, histImage.rows, cv::NORM_MINMAX, -1, cv::Mat());
/// 在直方图画布上画出直方图
for (int i = 1; i < histSize; i++)
{
line(histImage, cv::Point(bin_w*(i - 1), hist_h - cvRound(grayHist.at<float>(i - 1))),
cv::Point(bin_w*(i), hist_h - cvRound(grayHist.at<float>(i))),
cv::Scalar(0, 0, 255), 2, 8, 0);
}
stringstream s;
s << "samples\\反相正规化直方图\\" << str_dir[index] << "\\";
//s << "samples\\正规化直方图\\" << str_dir[index] << "\\";
//s << "samples\\均衡化直方图\\" << str_dir[index] << "\\";
//s << "samples\\直方图\\" << str_dir[index] << "\\";
//string dir = s.str();
//char* c;
//int len = dir.length();
//c = new char[len + 1];
//strcpy(c, dir.c_str());
//CheckDir(c);
s << "" << num << ".jpg";
s >> path;
cv::imwrite(path, histImage);
s.clear();
s << "samples\\反相正规化直方图\\" << str_dir[index] << "\\" << "Hist_" << num << ".jpg";
//s << "samples\\正规化直方图\\" << str_dir[index] << "\\" << "Hist_" << num << ".jpg";
//s << "samples\\均衡化直方图\\" << str_dir[index] << "\\" << "Hist_" << num << ".jpg";
//s << "samples\\直方图\\" << str_dir[index] << "\\" << "Hist_" << num << ".jpg";
s >> path;
cv::imwrite(path, grayHist);
/// 显示直方图
//cv::namedWindow("calcHist Demo", CV_WINDOW_AUTOSIZE);
//cv::imshow("calcHist Demo", histImage);
//cv::waitKey(0);
*/
}
}
//创建一个网络
cv::Mat layerSizes = (cv::Mat_<int>(1, 3) << featureCount, 25, sampleTypeCount);//创建一个featureCount输入 IDC_EDIT_YinCangCount隐藏 sampleTypeCount输出的三层网络
CvANN_MLP_TrainParams param;
param.term_crit = cvTermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 5000, 0.01);
param.train_method = CvANN_MLP_TrainParams::BACKPROP;
param.bp_dw_scale = 0.2;
param.bp_moment_scale = 0.1;
cv::Mat inputs(sampleAllCount, featureCount, CV_32FC1, trainingData);//样品总数,特征维数,储存的数据类型
cv::Mat outputs(sampleAllCount, sampleTypeCount, CV_32FC1, outputData);
bp.create(layerSizes, CvANN_MLP::SIGMOID_SYM);
bp.train(inputs, outputs, cv::Mat(), cv::Mat(), param);
bp.save("ANN_mlp.xml");
itemIndex = 0;
for (int index = 0; index < 7; index++)
{
for (int i = 1; i <= 50; i++)
{
cv::Mat sampleMat(1, featureCount, CV_32FC1, trainingData[itemIndex]);//样品总数,特征维数,储存的数据类型
cv::Mat nearest(1, sampleTypeCount, CV_32FC1, cv::Scalar(0));
bp.predict(sampleMat, nearest);
float possibility = -1;
int outindex = 0;
for (int i = 0; i < nearest.size().width; i++){
float x = nearest.at<float>(0, i);
if (x>possibility){
possibility = x;
outindex = i;
}
}
cout << str_dir[index] << "_" << i << ":" << outindex << "->" << possibility << "->" << str_dir[outindex] << endl;
itemIndex++;
}
}
return 0;
}
示例14: on_pushButton_test_clicked
void MainWindow::on_pushButton_test_clicked()
{
QString str = QFileDialog::getExistingDirectory();
QByteArray ba = str.toLocal8Bit();
char *c_str = ba.data();
string slash = "/";
Mat training;
Mat response;
read_num_class_data("train.txt", 4, training, response);
cout<<training.rows<<endl;
cout<<response.rows<<endl;
ofstream output_file;
output_file.open("Ratio.txt");
Mat layers = Mat(3,1,CV_32SC1);
int sz = training.cols ;
layers.row(0) = Scalar(sz);
layers.row(1) = Scalar(16);
layers.row(2) = Scalar(1);
CvANN_MLP mlp;
CvANN_MLP_TrainParams params;
CvTermCriteria criteria;
criteria.max_iter = 1000;
criteria.epsilon = 0.00001f;
criteria.type = CV_TERMCRIT_ITER | CV_TERMCRIT_EPS;
params.train_method = CvANN_MLP_TrainParams::BACKPROP;
params.bp_dw_scale = 0.1f;
params.bp_moment_scale = 0.1f;
params.term_crit = criteria;
mlp.create(layers,CvANN_MLP::SIGMOID_SYM);
int i = mlp.train(training, response, Mat(),Mat(),params); // Train dataset
FileStorage fs("mlp.xml", FileStorage::WRITE); // or xml
mlp.write(*fs, "mlp"); // don't think too much about the deref, it casts to a FileNode
ui->label_training->setText("Training finish");
//mlp.load("mlp.xml","mlp"); //Load ANN weights for each layer
vector<string> img_name;
string output_directory = "output_img/";
img_name = listFile(c_str);
Mat testing(1, 3, CV_32FC1);
Mat predict (1 , 1, CV_32F );
int file_num = 0;
for(int i = 0; i < img_name.size(); i++) //size of the img_name
{
ui->progressBar->setValue(i*100/img_name.size());
string file_name = c_str + slash + img_name[i];
Mat img_test = imread(file_name);
Mat img_test_clone = img_test.clone();
Mat img_thresh, img_thresh_copy, img_HSV, img_gray;
vector<Mat> img_split;
cvtColor(img_test_clone, img_HSV, CV_RGB2HSV);
cvtColor(img_test_clone, img_gray, CV_RGB2GRAY);
split(img_HSV, img_split);
threshold(img_split[0], img_thresh, 75, 255, CV_THRESH_BINARY);
img_thresh_copy = img_thresh.clone();
Mat hole = img_thresh_copy.clone();
floodFill(hole, Point(0,0), Scalar(255));
bitwise_not(hole, hole);
img_thresh_copy = (img_thresh_copy | hole);
Mat element = getStructuringElement(MORPH_RECT, Size(15, 15));
Mat open_result;
morphologyEx(img_thresh, open_result, MORPH_CLOSE, element );
int infected_num = 0;
int total_pixels = 0;
if(img_test.data)
{
file_num++;
for (int m = 0; m < img_test.rows; m++)
{
for (int n = 0; n < img_test.cols; n++)
{
if (img_thresh_copy.at<uchar>(m, n) == 255)
{
total_pixels++;
testing.at<float>(0, 0) = (float)img_test.at<Vec3b>(m, n)[0];
testing.at<float>(0, 1) = (float)img_test.at<Vec3b>(m, n)[1];
testing.at<float>(0, 2) = (float)img_test.at<Vec3b>(m, n)[2];
mlp.predict(testing,predict);
float a = predict.at<float>(0,0);
//.........这里部分代码省略.........
示例15: test
double test(cv::Mat &vocabulary, void *src)
{
// Test
std::vector<BOWImg> images;
conf.max_num = conf.max_num * 2;
std::cout<<"--->Loading testing images ... "<<std::endl;
int numImages = imgRead(images);
std::cout<<" "<<numImages<<" images loaded."<<std::endl;
if(numImages < 0)
return -1;
printf("--->Extracting %s features ...\n", conf.extractor.c_str());
features(images, conf.extractor, conf.detector);
std::cout<<"--->Extracting BOW features ..."<<std::endl;
bowFeatures(images, vocabulary, conf.extractor);
cv::Mat rawData;
for(std::vector<BOWImg>::iterator iter = images.begin();iter != images.end(); iter++)
rawData.push_back(iter->BOWDescriptor);
//PCA
#ifdef _USE_PCA_
float factor = 1;
int maxComponentsNum = static_cast<float>(conf.numClusters) * factor;
cv::PCA pca(rawData, Mat(),CV_PCA_DATA_AS_ROW, maxComponentsNum);
cv::Mat pcaData;
for(int i = 0;i<rawData.rows;i++)
{
cv::Mat vec = rawData.row(i);
cv::Mat coeffs = pca.project(vec);
pcaData.push_back(coeffs);
}
cv::Mat testData = pcaData;
#else
cv::Mat testData = rawData;
#endif
std::cout<<"--->Executing predictions ..."<<std::endl;
cv::Mat output;
double ac = 0;
double ac_rate = 0;
if(conf.classifier == "BP")
{
CvANN_MLP *classifier = (CvANN_MLP *)src;
classifier->predict(testData,output);
cout<<"--->Predict answer: "<<std::endl;
for(int i = 0;i < output.rows;i++)
{
float *p = output.ptr<float>(i);
int k = 0;
int tmp = 0;
for(int j = 0;j < output.cols;j++)
{
if(p[j] > tmp )
{
tmp = p[j];
k = j;
}
}
std::cout<<" "<<images[i].imgName<<" ---- "<<conf.classes[k]<<endl;
if(images[i].label == k+1)
ac++;
}
ac_rate = ac / static_cast<double>(output.rows);
}
else if(conf.classifier == "SVM")
{
CvSVM *classifier = (CvSVM *)src;
classifier->predict(testData,output);
cout<<"--->Predict answer: "<<std::endl;
for(int i = 0;i < output.rows;i++)
{
int k = (int)output.ptr<float>()[i]-1;
std::cout<<" "<<images[i].imgName<<" ---- "<<conf.classes[k]<<endl;
if(images[i].label == k+1)
ac++;
}
ac_rate = ac / static_cast<double>(output.rows);
}
else {
std::cout<<"--->Error: wrong classifier."<<std::endl;
}
return ac_rate;
}