本文整理汇总了C++中CvANN_MLP::read方法的典型用法代码示例。如果您正苦于以下问题:C++ CvANN_MLP::read方法的具体用法?C++ CvANN_MLP::read怎么用?C++ CvANN_MLP::read使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CvANN_MLP
的用法示例。
在下文中一共展示了CvANN_MLP::read方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: predict
void predict(int nbSamples, int size)
{
CvANN_MLP network;
CvFileStorage* storage = cvOpenFileStorage( "data/neural_model.xml", 0, CV_STORAGE_READ);
CvFileNode *n = cvGetFileNodeByName(storage, 0, "neural_model");
network.read(storage, n);
Mat toPredict(nbSamples, size * size, CV_32F);
int label;
float pixel;
FILE *file = fopen("data/predict.txt", "r");
for(int i=0; i < nbSamples; i++){
for(int j=0; j < size * size; j++){
// WHILE ITS PIXEL VALUE
if(j < size * size){
fscanf(file, "%f,", &pixel);
toPredict.at<float>(i,j) = pixel;
}
}
}
fclose(file);
Mat classOut(nbSamples, 62,CV_32F);
network.predict(toPredict,classOut);
float value;
int maxIndex = 0;
float maxValue;
for(int k = 0; k < nbSamples; k++)
{
maxIndex = 0;
maxValue = classOut.at<float>(0,0);
for(int index=1;index<62;index++){
value = classOut.at<float>(0,index);
if(value>maxValue){
maxValue = value;
maxIndex = index;
}
}
}
cout<<"Index predicted : " << maxIndex + 1 << endl;
cvReleaseFileStorage(&storage);
}
示例2: CheckCircle
int CheckCircle( Mat src)//Matとcircleの組を渡すこと
{
//XMLを読み込んでニューラルネットワークの構築
CvANN_MLP nnetwork;
CvFileStorage* storage = cvOpenFileStorage( "param.xml", 0, CV_STORAGE_READ );
CvFileNode *n = cvGetFileNodeByName(storage,0,"DigitOCR");
nnetwork.read(storage,n);
cvReleaseFileStorage(&storage);
//特徴ベクトルの生成
int index;
float train[64];
for(int i=0; i<64; i++) train[i] = 0;
Mat norm(src.size(), src.type());
Mat sample(src.size(), src.type());
normalize(src, norm, 0, 255, NORM_MINMAX, CV_8UC3);
for(int y=0; y<sample.rows; y++){
for(int x=0; x<sample.cols; x++){
index = y*sample.step+x*sample.elemSize();
int color = (norm.data[index+0]/64)+
(norm.data[index+1]/64)*4+
(norm.data[index+2]/64)*16;
train[color]+=1;
}
}
int pixel = sample.cols * sample.rows;
for(int i=0; i<64; i++){
train[i] /= pixel;
}
//分類の実行
Mat data(1, ATTRIBUTES, CV_32F);
for(int col=0; col<ATTRIBUTES; col++){
data.at<float>(0,col) = train[col];
}
int maxIndex = 0;
Mat classOut(1,CLASSES,CV_32F);
nnetwork.predict(data, classOut);
float value;
float maxValue=classOut.at<float>(0,0);
for(int index=1;index<CLASSES;index++){
value = classOut.at<float>(0,index);
if(value > maxValue){
maxValue = value;
maxIndex=index;
}
}
return maxIndex;
}
示例3: main
int main( int argc, char** argv ) {
//To avoid preprocessing the images all the time
std::cout << "Have you preprocessed files already? Y/N ";
char temp = toupper( getchar() ); std::cin.get();
if (temp == 'N'){
std::cout << "Preprocessing images";
if (preprocess(ImageSize, alphabetSize, dataSet, letters) == 0){
std::cout << "\nSomething went wrong when preprocessing the files" << endl;
std::cin.get(); return -1;
}
}
std::cout << "\nDo you need to make and train a new Neural Net? Y/N ";
temp = toupper( getchar() );
std::cin.get();
if (temp == 'Y'){
Mat training_set = Mat::zeros(trainingSamples, attributes,CV_32F); //zeroed matrix to hold the training samples.
Mat training_results = Mat::zeros(trainingSamples, alphabetSize, CV_32F); //zeroed matrix to hold the training results.
Mat test_set = Mat::zeros(testSamples,attributes,CV_32F); //zeroed matrix to hold the test samples.
Mat test_results = Mat::zeros(testSamples,alphabetSize,CV_32F); //zeroed matrix to hold the test results.
std::cout << "\nReading training data";
if (readPreprocessed(training_set, training_results, ImageSize, alphabetSize, letters, (dataSet-dataSet+1), trainingSet) == 0) {
std::cout << "\nSomething went wrong when opening preprocessed files" << endl;
std::cin.get(); return -1;
}
std::cout << "\nReading test data";
if (readPreprocessed(test_set, test_results, ImageSize, alphabetSize, letters, (dataSet-trainingSet+1), dataSet) == 0) {
std::cout << "\nSomething went wrong when opening preprocessed files" << endl;
std::cin.get(); return -1;
}
std::cout << "\nSetting up Neural Net";
Mat layers(numberOfLayers, 1, CV_32S);
layers.at<int>(0,0) = attributes; //input layer
layers.at<int>(1,0) = sizeOfHiddenLayer; //hidden layer
layers.at<int>(2,0) = alphabetSize; //output layer
//create the neural network.
CvANN_MLP NeuralNet(layers, CvANN_MLP::SIGMOID_SYM,alpha,beta);
// terminate the training after either 10 000
// iterations or a very small change in the
// network wieghts below the specified value
// use backpropogation for training
// co-efficents for backpropogation training
// recommended values taken from http://docs.opencv.org/modules/ml/doc/neural_networks.html#cvann-mlp-trainparams
CvANN_MLP_TrainParams params( cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 10000, 0.005), CvANN_MLP_TrainParams::BACKPROP, 0.1, 0.5 );
std::cout << "\nTraining Neural Net" << endl;
int iterations = NeuralNet.train(training_set, training_results, Mat(), Mat(), params);
std::cout << "\nCompleted after " << iterations << " iterations trough the training data set." << endl;
// Save the model generated into an xml file.
std::cout << "\nWriting to file param.xml ..." << endl;
CvFileStorage* storage = cvOpenFileStorage( "param.xml", 0, CV_STORAGE_WRITE );
NeuralNet.write(storage,"DigitOCR");
cvReleaseFileStorage(&storage);
std::cout << "\t\t ...Done." << endl; std::cin.get();
cv::Mat classificationResult(1, alphabetSize, CV_32F);
// Test the generated model with the test samples.
cv::Mat test_sample;
//count of correct prediction
int correct_class = 0;
//count of wrong prediction
int wrong_class = 0;
//classification matrix gives the count of classes to which the samples were classified.
int classification_matrix[alphabetSize][alphabetSize] = {{}};
// for each sample in the test set.
for (int tsample = 0; tsample < testSamples; tsample++) {
// extract the sample
test_sample = test_set.row(tsample);
//try to predict its class
NeuralNet.predict(test_sample, classificationResult);
/*The classification result matrix holds weightage of each class.
we take the class with the highest weightage as the resultant class */
// find the class with maximum weightage.
int maxIndex = 0;
float value = 0.0f;
float maxValue = classificationResult.at<float>(0,0);
for(int index = 1; index < alphabetSize; index++) {
value = classificationResult.at<float>(0,index);
//.........这里部分代码省略.........
示例4: predictDigits
string predictDigits(Mat &originalImage) {
string numbers = "";
Mat clon = originalImage.clone();
// Read the model from the XML file and create the neural network.
CvANN_MLP nnetwork;
CvFileStorage* storage = cvOpenFileStorage(
"/home/andersson/Escritorio/Temporales/neural_network.xml", 0,
CV_STORAGE_READ);
CvFileNode *n = cvGetFileNodeByName(storage, 0, "DigitOCR");
nnetwork.read(storage, n);
cvReleaseFileStorage(&storage);
int rows = originalImage.rows;
int cols = originalImage.cols;
int lx = 0;
int ty = 0;
int by = 0;
int rx = 0;
int flag = 0;
int currentColumn = 1;
bool temp = false;
while (!temp) {
/* Left X */
for (int i = currentColumn; i < cols; i++) {
for (int j = 1; j < rows; j++) {
if (i != (cols - 1)) {
if (originalImage.at<uchar> (j, i) == 0) {
lx = i;
flag = 1;
break;
}
} else {
temp = true;
break;
}
}
if (!temp) {
if (flag == 1) {
flag = 0;
break;
}
} else {
break;
}
}
if (temp) {
continue;
}
/* Right X */
int tempNum;
for (int i = lx; i < cols; i++) {
tempNum = 0;
for (int j = 1; j < rows; j++) {
if (originalImage.at<uchar> (j, i) == 0) {
tempNum += 1;
}
}
if (tempNum == 0) {
rx = (i - 1);
break;
}
}
currentColumn = rx + 1;
/* Top Y */
for (int i = 1; i < rows; i++) {
for (int j = lx; j <= rx; j++) {
if (originalImage.at<uchar> (i, j) == 0) {
ty = i;
flag = 1;
break;
}
}
if (flag == 1) {
flag = 0;
break;
}
}
/* Bottom Y */
for (int i = (rows - 1); i >= 1; i--) {
for (int j = lx; j <= rx; j++) {
if (originalImage.at<uchar> (i, j) == 0) {
by = i;
flag = 1;
break;
}
}
if (flag == 1) {
flag = 0;
//.........这里部分代码省略.........