本文整理汇总了C++中CvANN_MLP::save方法的典型用法代码示例。如果您正苦于以下问题:C++ CvANN_MLP::save方法的具体用法?C++ CvANN_MLP::save怎么用?C++ CvANN_MLP::save使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CvANN_MLP
的用法示例。
在下文中一共展示了CvANN_MLP::save方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: build_mlp_classifier
static
int build_mlp_classifier( char* data_filename,
char* filename_to_save, char* filename_to_load )
{
const int class_count = 26;
CvMat* data = 0;
CvMat train_data;
CvMat* responses = 0;
CvMat* mlp_response = 0;
int ok = read_num_class_data( data_filename, 16, &data, &responses );
int nsamples_all = 0, ntrain_samples = 0;
int i, j;
double train_hr = 0, test_hr = 0;
CvANN_MLP mlp;
if( !ok )
{
printf( "Could not read the database %s\n", data_filename );
return -1;
}
printf( "The database %s is loaded.\n", data_filename );
nsamples_all = data->rows;
ntrain_samples = (int)(nsamples_all*0.8);
// Create or load MLP classifier
if( filename_to_load )
{
// load classifier from the specified file
mlp.load( filename_to_load );
ntrain_samples = 0;
if( !mlp.get_layer_count() )
{
printf( "Could not read the classifier %s\n", filename_to_load );
return -1;
}
printf( "The classifier %s is loaded.\n", data_filename );
}
else
{
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//
// MLP does not support categorical variables by explicitly.
// So, instead of the output class label, we will use
// a binary vector of <class_count> components for training and,
// therefore, MLP will give us a vector of "probabilities" at the
// prediction stage
//
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
CvMat* new_responses = cvCreateMat( ntrain_samples, class_count, CV_32F );
// 1. unroll the responses
printf( "Unrolling the responses...\n");
for( i = 0; i < ntrain_samples; i++ )
{
int cls_label = cvRound(responses->data.fl[i]) - 'A';
float* bit_vec = (float*)(new_responses->data.ptr + i*new_responses->step);
for( j = 0; j < class_count; j++ )
bit_vec[j] = 0.f;
bit_vec[cls_label] = 1.f;
}
cvGetRows( data, &train_data, 0, ntrain_samples );
// 2. train classifier
int layer_sz[] = { data->cols, 100, 100, class_count };
CvMat layer_sizes =
cvMat( 1, (int)(sizeof(layer_sz)/sizeof(layer_sz[0])), CV_32S, layer_sz );
mlp.create( &layer_sizes );
printf( "Training the classifier (may take a few minutes)...\n");
mlp.train( &train_data, new_responses, 0, 0,
CvANN_MLP_TrainParams(cvTermCriteria(CV_TERMCRIT_ITER,300,0.01),
#if 1
CvANN_MLP_TrainParams::BACKPROP,0.001));
#else
CvANN_MLP_TrainParams::RPROP,0.05));
#endif
cvReleaseMat( &new_responses );
printf("\n");
}
mlp_response = cvCreateMat( 1, class_count, CV_32F );
// compute prediction error on train and test data
for( i = 0; i < nsamples_all; i++ )
{
int best_class;
CvMat sample;
cvGetRow( data, &sample, i );
CvPoint max_loc = {0,0};
mlp.predict( &sample, mlp_response );
cvMinMaxLoc( mlp_response, 0, 0, 0, &max_loc, 0 );
best_class = max_loc.x + 'A';
int r = fabs((double)best_class - responses->data.fl[i]) < FLT_EPSILON ? 1 : 0;
if( i < ntrain_samples )
train_hr += r;
else
//.........这里部分代码省略.........
示例2: excuteTrain
int CTrain::excuteTrain()
{
// 读入结果responses 特征data
FILE* f = fopen( "batch", "rb" );
fseek(f, 0l, SEEK_END);
long size = ftell(f);
fseek(f, 0l, SEEK_SET);
int count = size/4/(36+256);
CvMat* batch = cvCreateMat( count, 36+256, CV_32F );
fread(batch->data.fl, size-1, 1, f);
CvMat outputs, inputs;
cvGetCols(batch, &outputs, 0, 36);
cvGetCols(batch, &inputs, 36, 36+256);
fclose(f);
// 新建MPL
CvANN_MLP mlp;
int layer_sz[] = { 256, 20, 36 };
CvMat layer_sizes = cvMat( 1, 3, CV_32S, layer_sz );
mlp.create( &layer_sizes );
// 训练
//system( "time" );
mlp.train( &inputs, &outputs, NULL, NULL,
CvANN_MLP_TrainParams(cvTermCriteria(CV_TERMCRIT_ITER,300,0.01), CvANN_MLP_TrainParams::RPROP, 0.01)
);
//system( "time" );
// 存储MPL
mlp.save( "mpl.xml" );
// 测试
int right = 0;
CvMat* output = cvCreateMat( 1, 36, CV_32F );
for(int i=0; i<count; i++)
{
CvMat input;
cvGetRow( &inputs, &input, i );
mlp.predict( &input, output );
CvPoint max_loc = {0,0};
cvMinMaxLoc( output, NULL, NULL, NULL, &max_loc, NULL );
int best = max_loc.x;// 识别结果
int ans = -1;// 实际结果
for(int j=0; j<36; j++)
{
if( outputs.data.fl[i*(outputs.step/4)+j] == 1.0f )
{
ans = j;
break;
}
}
cout<<(char)( best<10 ? '0'+best : 'A'+best-10 );
cout<<(char)( ans<10 ? '0'+ans : 'A'+ans-10 );
if( best==ans )
{
cout<<"+";
right++;
}
//cin.get();
cout<<endl;
}
cvReleaseMat( &output );
cout<<endl<<right<<"/"<<count<<endl;
cvReleaseMat( &batch );
system( "pause" );
return 0;
}
示例3: main
//.........这里部分代码省略.........
bool uniform = true; bool accumulate = false;
cv::calcHist(&f, 1, 0, cv::Mat(), grayHist, 1, &histSize, &histRange, uniform, accumulate);
for (int j = 0; j < 256; j++)
{
trainingData[itemIndex][j] = grayHist.ptr<float>(0)[0];
}
itemIndex++;
/*
// 创建直方图画布
int hist_w = 400; int hist_h = 400;
int bin_w = cvRound((double)hist_w / histSize);
cv::Mat histImage(hist_w, hist_h, CV_8UC3, cv::Scalar(0, 0, 0));
/// 将直方图归一化到范围 [ 0, histImage.rows ]
cv::normalize(grayHist, grayHist, 0, histImage.rows, cv::NORM_MINMAX, -1, cv::Mat());
/// 在直方图画布上画出直方图
for (int i = 1; i < histSize; i++)
{
line(histImage, cv::Point(bin_w*(i - 1), hist_h - cvRound(grayHist.at<float>(i - 1))),
cv::Point(bin_w*(i), hist_h - cvRound(grayHist.at<float>(i))),
cv::Scalar(0, 0, 255), 2, 8, 0);
}
stringstream s;
s << "samples\\反相正规化直方图\\" << str_dir[index] << "\\";
//s << "samples\\正规化直方图\\" << str_dir[index] << "\\";
//s << "samples\\均衡化直方图\\" << str_dir[index] << "\\";
//s << "samples\\直方图\\" << str_dir[index] << "\\";
//string dir = s.str();
//char* c;
//int len = dir.length();
//c = new char[len + 1];
//strcpy(c, dir.c_str());
//CheckDir(c);
s << "" << num << ".jpg";
s >> path;
cv::imwrite(path, histImage);
s.clear();
s << "samples\\反相正规化直方图\\" << str_dir[index] << "\\" << "Hist_" << num << ".jpg";
//s << "samples\\正规化直方图\\" << str_dir[index] << "\\" << "Hist_" << num << ".jpg";
//s << "samples\\均衡化直方图\\" << str_dir[index] << "\\" << "Hist_" << num << ".jpg";
//s << "samples\\直方图\\" << str_dir[index] << "\\" << "Hist_" << num << ".jpg";
s >> path;
cv::imwrite(path, grayHist);
/// 显示直方图
//cv::namedWindow("calcHist Demo", CV_WINDOW_AUTOSIZE);
//cv::imshow("calcHist Demo", histImage);
//cv::waitKey(0);
*/
}
}
//创建一个网络
cv::Mat layerSizes = (cv::Mat_<int>(1, 3) << featureCount, 25, sampleTypeCount);//创建一个featureCount输入 IDC_EDIT_YinCangCount隐藏 sampleTypeCount输出的三层网络
CvANN_MLP_TrainParams param;
param.term_crit = cvTermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 5000, 0.01);
param.train_method = CvANN_MLP_TrainParams::BACKPROP;
param.bp_dw_scale = 0.2;
param.bp_moment_scale = 0.1;
cv::Mat inputs(sampleAllCount, featureCount, CV_32FC1, trainingData);//样品总数,特征维数,储存的数据类型
cv::Mat outputs(sampleAllCount, sampleTypeCount, CV_32FC1, outputData);
bp.create(layerSizes, CvANN_MLP::SIGMOID_SYM);
bp.train(inputs, outputs, cv::Mat(), cv::Mat(), param);
bp.save("ANN_mlp.xml");
itemIndex = 0;
for (int index = 0; index < 7; index++)
{
for (int i = 1; i <= 50; i++)
{
cv::Mat sampleMat(1, featureCount, CV_32FC1, trainingData[itemIndex]);//样品总数,特征维数,储存的数据类型
cv::Mat nearest(1, sampleTypeCount, CV_32FC1, cv::Scalar(0));
bp.predict(sampleMat, nearest);
float possibility = -1;
int outindex = 0;
for (int i = 0; i < nearest.size().width; i++){
float x = nearest.at<float>(0, i);
if (x>possibility){
possibility = x;
outindex = i;
}
}
cout << str_dir[index] << "_" << i << ":" << outindex << "->" << possibility << "->" << str_dir[outindex] << endl;
itemIndex++;
}
}
return 0;
}
示例4: main
int main()
{
const int sampleTypeCount = 7; //共有几种字体
const int sampleCount = 50; //每种字体的样本数
const int sampleAllCount = sampleCount*sampleTypeCount;
const int featureCount = 256; //特征维数
CvANN_MLP bp;// = CvANN_MLP(layerSizes,CvANN_MLP::SIGMOID_SYM,1,1);
string str_dir[sampleTypeCount];
str_dir[0] = "A水滴渍";
str_dir[1] = "B水纹";
str_dir[2] = "C指纹";
str_dir[3] = "D釉面凹凸";
str_dir[4] = "X凹点";
str_dir[5] = "Y杂质";
str_dir[6] = "Z划痕";
float trainingData[sampleAllCount][featureCount] = { 0 };
float outputData[sampleAllCount][sampleTypeCount] = { 0 };
int itemIndex = 0;
for (int index = 0; index < 7; index++)
{
for (int i = 1; i <= 50; i++)
{
outputData[itemIndex][index] = 1;
cout << str_dir[index] << "_" << i << endl;
stringstream ss;
char num[4];
sprintf(num, "%03d", i);
ss << "特征样本库\\" << str_dir[index] << "\\" << num << ".jpg";
string path;
ss >> path;
//读取灰度图像以便计算灰度直方图
cv::Mat f = cv::imread(path, 0);
cv::Mat grayHist;
// 设定bin数目,也就是灰度级别,这里选择的是0-255灰度
int histSize = 256;
//cv::equalizeHist(f, f);
cv::normalize(f, f, histSize, 0, cv::NORM_MINMAX);
//cv::bitwise_xor(f, cv::Scalar(255), f);//反相
FeatureMaker::GetGrayHist(f, grayHist, histSize);
for (int j = 0; j < 256; j++)
{
trainingData[itemIndex][j] = grayHist.ptr<float>(j)[0];
}
itemIndex++;
}
}
//创建一个网络
cv::Mat layerSizes = (cv::Mat_<int>(1, 3) << featureCount, 25, sampleTypeCount);//创建一个featureCount输入 IDC_EDIT_YinCangCount隐藏 sampleTypeCount输出的三层网络
CvANN_MLP_TrainParams param;
param.term_crit = cvTermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 50000, 0.002);
param.train_method = CvANN_MLP_TrainParams::BACKPROP;
param.bp_dw_scale = 0.01;//权值更新率
param.bp_moment_scale = 0.03;//权值更新冲量
cv::Mat inputs(sampleAllCount, featureCount, CV_32FC1, trainingData);//样品总数,特征维数,储存的数据类型
cv::Mat outputs(sampleAllCount, sampleTypeCount, CV_32FC1, outputData);
bp.create(layerSizes, CvANN_MLP::SIGMOID_SYM);
bp.train(inputs, outputs, cv::Mat(), cv::Mat(), param);
bp.save("ANN_mlp.xml");
itemIndex = 0;
int zhengque = 0;
for (int index = 0; index < 7; index++)
{
for (int i = 1; i <= 50; i++)
{
cv::Mat sampleMat(1, featureCount, CV_32FC1, trainingData[itemIndex]);//样品总数,特征维数,储存的数据类型
cv::Mat nearest(1, sampleTypeCount, CV_32FC1, cv::Scalar(0));
bp.predict(sampleMat, nearest);
float possibility = -1;
int outindex = 0;
for (int i = 0; i < nearest.size().width; i++){
float x = nearest.at<float>(0, i);
if (x>possibility){
possibility = x;
outindex = i;
}
}
if (outindex == index)
zhengque++;
cout << str_dir[index] << "_" << i << ":" << outindex << "->" << possibility << "->" << str_dir[outindex] << endl;
itemIndex++;
}
}
//.........这里部分代码省略.........
示例5: main
//.........这里部分代码省略.........
contador++;
}
}
fputs("indefenido\n", pFile);
for (int i = 0; i<11*410; i++) {
treino[i+(410*(49+26+58))] = indef[i];
printf("%d TREINO - %.2f \n",i+(410*(49+26+58)),treino[i+(410*(49+26+58))]);
}
float labels[144];
float trainingData[144][410];
int cont = 0;
for(int i = 0; i<144; i++) {
// printf("I - %d \n",i);
if(i < 49) {
labels[i] = 1;
} else if(i >= 49 && i<75) {
labels[i] = 2;
} else if(i >= 75 && i<133) {
labels[i] = 3;
} else {
labels[i] = 4;
}
for (int j = 0; j< 410; j++) {
trainingData[i][j] = treino[(410*cont)+j];
//printf("J*i - %d \n",j*i);
// printf("trainingData - %.2f",trainingData[i][j]);
if (j==409) {
cont=cont+1;
}
}
}
cv::Mat layers = cv::Mat(11, 1, CV_32S);
layers.at<int>(0,0) = 410;//input layer
layers.at<int>(1,0) = 400;
layers.at<int>(2,0) = 400;
layers.at<int>(3,0) = 400;
layers.at<int>(4,0) = 400;
layers.at<int>(5,0) = 400;
layers.at<int>(6,0) = 400;
layers.at<int>(7,0) = 400;
layers.at<int>(8,0) = 400;
layers.at<int>(9,0) = 400;
layers.at<int>(10,0) = 1;
Mat labelsMat(144, 1, CV_32FC1, labels);
Mat trainingDataMat(144, 410, CV_32FC1, trainingData);
printf("%lu - %lu ",trainingDataMat.total(),labelsMat.total());
CvANN_MLP ann;
//ANN criteria for termination
CvTermCriteria criter;
criter.max_iter = 500;
criter.type = CV_TERMCRIT_ITER;
//ANN parameters
CvANN_MLP_TrainParams params;
params.train_method = CvANN_MLP_TrainParams::BACKPROP;
params.bp_dw_scale = 0.1;
params.bp_moment_scale = 0.1;
params.term_crit = criter;
ann.create(layers,CvANN_MLP::SIGMOID_SYM);
printf("Erroyo");
ann.train(trainingDataMat, labelsMat, cv::Mat(), cv::Mat(), params);
ann.save("treino");
}