本文整理汇总了C++中LabelledClassificationData::getClassData方法的典型用法代码示例。如果您正苦于以下问题:C++ LabelledClassificationData::getClassData方法的具体用法?C++ LabelledClassificationData::getClassData怎么用?C++ LabelledClassificationData::getClassData使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类LabelledClassificationData
的用法示例。
在下文中一共展示了LabelledClassificationData::getClassData方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: train
bool MinDist::train(LabelledClassificationData &labelledTrainingData,double gamma){
const unsigned int M = labelledTrainingData.getNumSamples();
const unsigned int N = labelledTrainingData.getNumDimensions();
const unsigned int K = labelledTrainingData.getNumClasses();
trained = false;
models.clear();
classLabels.clear();
if( M == 0 ){
errorLog << "train(LabelledClassificationData &labelledTrainingData,double gamma) - Training data has zero samples!" << endl;
return false;
}
if( M <= numClusters ){
errorLog << "train(LabelledClassificationData &labelledTrainingData,double gamma) - There are not enough training samples for the number of clusters. Either reduce the number of clusters or increase the number of training samples!" << endl;
return false;
}
numFeatures = N;
numClasses = K;
models.resize(K);
classLabels.resize(K);
ranges = labelledTrainingData.getRanges();
//Train each of the models
for(UINT k=0; k<numClasses; k++){
//Get the class label for the kth class
UINT classLabel = labelledTrainingData.getClassTracker()[k].classLabel;
//Set the kth class label
classLabels[k] = classLabel;
//Get all the training data for this class
LabelledClassificationData classData = labelledTrainingData.getClassData(classLabel);
MatrixDouble data(classData.getNumSamples(),N);
//Copy the training data into a matrix, scaling the training data if needed
for(UINT i=0; i<data.getNumRows(); i++){
for(UINT j=0; j<data.getNumCols(); j++){
if( useScaling ){
data[i][j] = scale(classData[i][j],ranges[j].minValue,ranges[j].maxValue,0,1);
}else data[i][j] = classData[i][j];
}
}
//Train the model for this class
models[k].setGamma( gamma );
if( !models[k].train(classLabel,data,numClusters) ){
errorLog << "train(LabelledClassificationData &labelledTrainingData,double gamma) - Failed to train model for class: " << classLabel;
errorLog << ". This is might be because this class does not have enough training samples! You should reduce the number of clusters or increase the number of training samples for this class." << endl;
models.clear();
return false;
}
}
trained = true;
return true;
}
示例2: train
bool ANBC::train(LabelledClassificationData &labelledTrainingData,double gamma) {
const unsigned int M = labelledTrainingData.getNumSamples();
const unsigned int N = labelledTrainingData.getNumDimensions();
const unsigned int K = labelledTrainingData.getNumClasses();
trained = false;
models.clear();
classLabels.clear();
if( M == 0 ) {
errorLog << "train(LabelledClassificationData &labelledTrainingData,double gamma) - Training data has zero samples!" << endl;
return false;
}
if( weightsDataSet ) {
if( weightsData.getNumDimensions() != N ) {
errorLog << "train(LabelledClassificationData &labelledTrainingData,double gamma) - The number of dimensions in the weights data (" << weightsData.getNumDimensions() << ") is not equal to the number of dimensions of the training data (" << N << ")" << endl;
return false;
}
}
numFeatures = N;
numClasses = K;
models.resize(K);
classLabels.resize(K);
ranges = labelledTrainingData.getRanges();
//Train each of the models
for(UINT k=0; k<numClasses; k++) {
//Get the class label for the kth class
UINT classLabel = labelledTrainingData.getClassTracker()[k].classLabel;
//Set the kth class label
classLabels[k] = classLabel;
//Get the weights for this class
VectorDouble weights(numFeatures);
if( weightsDataSet ) {
bool weightsFound = false;
for(UINT i=0; i<weightsData.getNumSamples(); i++) {
if( weightsData[i].getClassLabel() == classLabel ) {
weights = weightsData[i].getSample();
weightsFound = true;
break;
}
}
if( !weightsFound ) {
errorLog << "train(LabelledClassificationData &labelledTrainingData,double gamma) - Failed to find the weights for class " << classLabel << endl;
return false;
}
} else {
//If the weights data has not been set then all the weights are 1
for(UINT j=0; j<numFeatures; j++) weights[j] = 1.0;
}
//Get all the training data for this class
LabelledClassificationData classData = labelledTrainingData.getClassData(classLabel);
MatrixDouble data(classData.getNumSamples(),N);
//Copy the training data into a matrix, scaling the training data if needed
for(UINT i=0; i<data.getNumRows(); i++) {
for(UINT j=0; j<data.getNumCols(); j++) {
if( useScaling ) {
data[i][j] = scale(classData[i][j],ranges[j].minValue,ranges[j].maxValue,MIN_SCALE_VALUE,MAX_SCALE_VALUE);
} else data[i][j] = classData[i][j];
}
}
//Train the model for this class
models[k].gamma = gamma;
if( !models[k].train(classLabel,data,weights) ) {
errorLog << "train(LabelledClassificationData &labelledTrainingData,double gamma) - Failed to train model for class: " << classLabel << endl;
//Try and work out why the training failed
if( models[k].N == 0 ) {
errorLog << "train(LabelledClassificationData &labelledTrainingData,double gamma) - N == 0!" << endl;
models.clear();
return false;
}
for(UINT j=0; j<numFeatures; j++) {
if( models[k].mu[j] == 0 ) {
errorLog << "train(LabelledClassificationData &labelledTrainingData,double gamma) - The mean of column " << j+1 << " is zero! Check the training data" << endl;
models.clear();
return false;
}
}
models.clear();
return false;
}
}
//Store the null rejection thresholds
nullRejectionThresholds.resize(numClasses);
for(UINT k=0; k<numClasses; k++) {
nullRejectionThresholds[k] = models[k].threshold;
}
//.........这里部分代码省略.........
示例3: train
bool GMM::train(LabelledClassificationData trainingData){
//Clear any old models
models.clear();
trained = false;
numFeatures = 0;
numClasses = 0;
if( trainingData.getNumSamples() == 0 ){
errorLog << "train(LabelledClassificationData &trainingData) - Training data is empty!" << endl;
return false;
}
//Set the number of features and number of classes and resize the models buffer
numFeatures = trainingData.getNumDimensions();
numClasses = trainingData.getNumClasses();
models.resize(numClasses);
if( numFeatures >= 6 ){
warningLog << "train(LabelledClassificationData &trainingData) - The number of features in your training data is high (" << numFeatures << "). The GMMClassifier does not work well with high dimensional data, you might get better results from one of the other classifiers." << endl;
}
//Get the ranges of the training data if the training data is going to be scaled
if( useScaling ){
ranges = trainingData.getRanges();
}
//Fit a Mixture Model to each class (independently)
for(UINT k=0; k<numClasses; k++){
UINT classLabel = trainingData.getClassTracker()[k].classLabel;
LabelledClassificationData classData = trainingData.getClassData( classLabel );
//Scale the training data if needed
if( useScaling ){
if( !classData.scale(ranges,GMM_MIN_SCALE_VALUE, GMM_MAX_SCALE_VALUE) ){
errorLog << "train(LabelledClassificationData &trainingData) - Failed to scale training data!" << endl;
return false;
}
}
//Convert the labelled data to unlabelled data
UnlabelledClassificationData unlabelledData = classData.reformatAsUnlabelledClassificationData();
//Train the Mixture Model for this class
GaussianMixtureModels gaussianMixtureModel;
gaussianMixtureModel.setMinChange( minChange );
gaussianMixtureModel.setMaxIter( maxIter );
if( !gaussianMixtureModel.train(unlabelledData, numMixtureModels) ){
errorLog << "train(LabelledClassificationData &trainingData) - Failed to train Mixture Model for class " << classLabel << endl;
return false;
}
//Setup the model container
models[k].resize( numMixtureModels );
models[k].setClassLabel( classLabel );
//Store the mixture model in the container
for(UINT j=0; j<numMixtureModels; j++){
models[k][j].mu = gaussianMixtureModel.getMu().getRowVector(j);
models[k][j].sigma = gaussianMixtureModel.getSigma()[j];
//Compute the determinant and invSigma for the realtime prediction
LUDecomposition ludcmp(models[k][j].sigma);
if( !ludcmp.inverse( models[k][j].invSigma ) ){
models.clear();
errorLog << "train(LabelledClassificationData &trainingData) - Failed to invert Matrix for class " << classLabel << "!" << endl;
return false;
}
models[k][j].det = ludcmp.det();
}
//Compute the normalize factor
models[k].recomputeNormalizationFactor();
//Compute the rejection thresholds
double mu = 0;
double sigma = 0;
VectorDouble predictionResults(classData.getNumSamples(),0);
for(UINT i=0; i<classData.getNumSamples(); i++){
vector< double > sample = classData[i].getSample();
predictionResults[i] = models[k].computeMixtureLikelihood( sample );
mu += predictionResults[i];
}
//Update mu
mu /= double( classData.getNumSamples() );
//Calculate the standard deviation
for(UINT i=0; i<classData.getNumSamples(); i++)
sigma += SQR( (predictionResults[i]-mu) );
sigma = sqrt( sigma / (double(classData.getNumSamples())-1.0) );
sigma = 0.2;
//Set the models training mu and sigma
models[k].setTrainingMuAndSigma(mu,sigma);
if( !models[k].recomputeNullRejectionThreshold(nullRejectionCoeff) && useNullRejection ){
warningLog << "train(LabelledClassificationData &trainingData) - Failed to recompute rejection threshold for class " << classLabel << " - the nullRjectionCoeff value is too high!" << endl;
}
//.........这里部分代码省略.........