本文整理汇总了C++中TrainingSet::getData方法的典型用法代码示例。如果您正苦于以下问题:C++ TrainingSet::getData方法的具体用法?C++ TrainingSet::getData怎么用?C++ TrainingSet::getData使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TrainingSet
的用法示例。
在下文中一共展示了TrainingSet::getData方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: test
void NeuralNetwork::test(TrainingSet &testSet)
{
vector<InputImage *>* data = testSet.getData();
int numCorrect = 0;
for (vector<InputImage *>::iterator testImage = data->begin(); testImage != data->end(); ++testImage)
{
Mat *trainingImageMat = (*testImage)->getImage();
vector<int> *actualLabel = (*testImage)->getLabelVector();
// Get V
Mat V = parameters * (*trainingImageMat);
// Compute prediction
vector<float> predictions(LABEL_SIZE);
predictHelper(V, predictions);
// Find max for prediction
float max = 0;
int maxInd = 0;
int count = 0;
for (vector<float>::iterator it = predictions.begin(); it != predictions.end(); ++it)
{
if (*it > max)
{
max = *it;
maxInd = count;
}
count++;
}
char predictedChar = InputImage::oneHotIndexToChar(maxInd);
cout << "Predicted: " << predictedChar << " | Actual: " << (*testImage)->getCharLabel() << endl;
if (tolower(predictedChar) == tolower((*testImage)->getCharLabel()))
{
numCorrect++;
}
}
float percentCorrect = ((float)numCorrect / (float)data->size()) * 100;
cout << "Percent correct: " << (int)percentCorrect << "%" << endl;
}
示例2: train
void NeuralNetwork::train(TrainingSet &trainingSet)
{
vector<InputImage *>* data = trainingSet.getData();
vector<float> G;
// Repeat until convergence
bool hasConverged = false;
int count = 0;
float avgCrossEntropy = 100;
time_t timer;
time(&timer);
int k = 0;
while (!hasConverged)
{
if (count > MIN_TRAIN_TIME)
{
hasConverged = true;
break;
}
count++;
if (count % 5 == 0)
{
cout << count << "th cycle with " << avgCrossEntropy << " avg cross entropy" << endl;
cout << difftime(time(0), timer) << " seconds elapsed" << endl;
}
// Reset average crossentropy
avgCrossEntropy = 0;
// Get predictions
vector<vector<float>> allPredictions;
vector<InputImage *> inputImages;
for (int m = k; m < k + BATCH_SIZE; ++m)
{
int ind = m % data->size();
Mat *trainingImageMat = data->at(ind)->getImage();
vector<int> *actualLabel = data->at(ind)->getLabelVector();
// Get V
Mat V = parameters * (*trainingImageMat);
// Compute prediction
vector<float> predictions(LABEL_SIZE);
predictHelper(V, predictions);
avgCrossEntropy -= (logf(predictions[data->at(ind)->getLabelIndex()]));
allPredictions.push_back(predictions);
inputImages.push_back(data->at(ind));
}
// Update parameters
for (int i = 0; i < parameters.rows; ++i)
{
for (int j = 0; j < parameters.cols; ++j)
{
float grad = 0;
#pragma omp parallel for reduction(+:grad)
for (int p = 0; p < BATCH_SIZE; p++)
{
grad += inputImages.at(p)->getImage()->at<float>(j, 0) * (inputImages.at(p)->getLabelVector()->at(i) - allPredictions[p][i]);
}
parameters.at<float>(i, j) += TRAINING_STEP * grad;
}
}
// Average the cross entropy
avgCrossEntropy /= BATCH_SIZE;
k += BATCH_SIZE;
}
// Save to file
ofstream nnsave;
nnsave.open("savednn.txt");
for (int i = 0; i < parameters.rows; ++i)
{
for (int j = 0; j < parameters.cols; ++j)
{
nnsave << parameters.at<float>(i, j) << "\t";
}
nnsave << endl;
}
nnsave << endl;
nnsave.close();
//cout << parameters << endl;
}