本文整理汇总了C++中TimeSeriesClassificationData类的典型用法代码示例。如果您正苦于以下问题:C++ TimeSeriesClassificationData类的具体用法?C++ TimeSeriesClassificationData怎么用?C++ TimeSeriesClassificationData使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了TimeSeriesClassificationData类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: getTrainingFoldData
TimeSeriesClassificationData TimeSeriesClassificationData::getTrainingFoldData(const UINT foldIndex) const {
TimeSeriesClassificationData trainingData;
if( !crossValidationSetup ){
errorLog << "getTrainingFoldData(UINT foldIndex) - Cross Validation has not been setup! You need to call the spiltDataIntoKFolds(UINT K,bool useStratifiedSampling) function first before calling this function!" << std::endl;
return trainingData;
}
if( foldIndex >= kFoldValue ) return trainingData;
trainingData.setNumDimensions( numDimensions );
//Add the data to the training set, this will consist of all the data that is NOT in the foldIndex
UINT index = 0;
for(UINT k=0; k<kFoldValue; k++){
if( k != foldIndex ){
for(UINT i=0; i<crossValidationIndexs[k].size(); i++){
index = crossValidationIndexs[k][i];
trainingData.addSample( data[ index ].getClassLabel(), data[ index ].getData() );
}
}
}
return trainingData;
}
示例2: train_
bool ParticleClassifier::train_(TimeSeriesClassificationData &trainingData){
clear();
numClasses = trainingData.getNumClasses();
numInputDimensions = trainingData.getNumDimensions();
ranges = trainingData.getRanges();
//Scale the training data if needed
if( useScaling ){
trainingData.scale(0, 1);
}
//Train the particle filter
particleFilter.train( numParticles, trainingData, sensorNoise, transitionSigma, phaseSigma, velocitySigma );
classLabels.resize(numClasses);
classLikelihoods.resize(numClasses,0);
classDistances.resize(numClasses,0);
for(unsigned int i=0; i<numClasses; i++){
classLabels[i] = trainingData.getClassTracker()[i].classLabel;
}
trained = true;
return trained;
}
示例3: convertDataToObservationSequence
bool HMM::convertDataToObservationSequence( TimeSeriesClassificationData &classData, vector< vector< UINT > > &observationSequences ){
observationSequences.resize( classData.getNumSamples() );
for(UINT i=0; i<classData.getNumSamples(); i++){
MatrixDouble ×eries = classData[i].getData();
observationSequences[i].resize( timeseries.getNumRows() );
for(UINT j=0; j<timeseries.getNumRows(); j++){
if( timeseries[j][0] >= numSymbols ){
errorLog << "train(TimeSeriesClassificationData &trainingData) - Found an observation sequence with a value outside of the symbol range! Value: " << timeseries[j][0] << endl;
return false;
}
observationSequences[i][j] = (UINT)timeseries[j][0];
}
}
return true;
}
示例4: getTimeSeriesClassificationData
TimeSeriesClassificationData TimeSeriesClassificationDataStream::getTimeSeriesClassificationData( const bool includeNullGestures ) const {
TimeSeriesClassificationData tsData;
tsData.setNumDimensions( getNumDimensions() );
tsData.setAllowNullGestureClass( includeNullGestures );
bool addSample = false;
const UINT numTimeseries = (UINT)timeSeriesPositionTracker.size();
for(UINT i=0; i<numTimeseries; i++){
addSample = includeNullGestures ? true : timeSeriesPositionTracker[i].getClassLabel() != GRT_DEFAULT_NULL_CLASS_LABEL;
if( addSample ){
tsData.addSample(timeSeriesPositionTracker[i].getClassLabel(), getTimeSeriesData( timeSeriesPositionTracker[i] ) );
}
}
return tsData;
}
示例5: getTestFoldData
TimeSeriesClassificationData TimeSeriesClassificationData::getTestFoldData(const UINT foldIndex) const {
TimeSeriesClassificationData testData;
if( !crossValidationSetup ) return testData;
if( foldIndex >= kFoldValue ) return testData;
//Add the data to the training
testData.setNumDimensions( numDimensions );
UINT index = 0;
for(UINT i=0; i<crossValidationIndexs[ foldIndex ].size(); i++){
index = crossValidationIndexs[ foldIndex ][i];
testData.addSample( data[ index ].getClassLabel(), data[ index ].getData() );
}
return testData;
}
示例6: main
int main (int argc, const char * argv[])
{
TimeSeriesClassificationData trainingData; //This will store our training data
GestureRecognitionPipeline pipeline; //This is a wrapper for our classifier and any pre/post processing modules
string dirPath = "/home/vlad/AndroidStudioProjects/DataCapture/dataSetGenerator/build";
if (!trainingData.loadDatasetFromFile(dirPath + "/acc-training-set-segmented.data")) {
printf("Cannot open training set\n");
return 0;
}
printf("Successfully opened training data set ...\n");
HMM hmm;
hmm.setHMMType( HMM_CONTINUOUS );
hmm.setDownsampleFactor( 5 );
hmm.setAutoEstimateSigma( true );
hmm.setSigma( 20.0 );
hmm.setModelType( HMM_LEFTRIGHT );
hmm.setDelta( 1 );
// LowPassFilter lpf(0.1, 1, 3);
// pipeline.setPreProcessingModule(lpf);
pipeline.setClassifier( hmm );
pipeline.train(trainingData, 20);
//You can then get then get the accuracy of how well the pipeline performed during the k-fold cross validation testing
double accuracy = pipeline.getCrossValidationAccuracy();
printf("Accuracy: %f\n", accuracy);
}
示例7: labelledData
bool TimeSeriesClassificationData::merge(const TimeSeriesClassificationData &labelledData){
if( labelledData.getNumDimensions() != numDimensions ){
errorLog << "merge(TimeSeriesClassificationData &labelledData) - The number of dimensions in the labelledData (" << labelledData.getNumDimensions() << ") does not match the number of dimensions of this dataset (" << numDimensions << ")" << std::endl;
return false;
}
//The dataset has changed so flag that any previous cross validation setup will now not work
crossValidationSetup = false;
crossValidationIndexs.clear();
//Add the data from the labelledData to this instance
for(UINT i=0; i<labelledData.getNumSamples(); i++){
addSample(labelledData[i].getClassLabel(), labelledData[i].getData());
}
//Set the class names from the dataset
Vector< ClassTracker > classTracker = labelledData.getClassTracker();
for(UINT i=0; i<classTracker.size(); i++){
setClassNameForCorrespondingClassLabel(classTracker[i].className, classTracker[i].classLabel);
}
return true;
}
示例8: train_
bool KMeansFeatures::train_(TimeSeriesClassificationData &trainingData){
MatrixDouble data = trainingData.getDataAsMatrixDouble();
return train_( data );
}
示例9: main
int main (int argc, const char * argv[])
{
//Create a new DTW instance, using the default parameters
DTW dtw;
//Load some training data to train the classifier - the DTW uses TimeSeriesClassificationData
TimeSeriesClassificationData trainingData;
if( !trainingData.load("DTWTrainingData.grt") ){
cout << "Failed to load training data!\n";
return EXIT_FAILURE;
}
//Use 20% of the training dataset to create a test dataset
TimeSeriesClassificationData testData = trainingData.partition( 80 );
//Trim the training data for any sections of non-movement at the start or end of the recordings
dtw.enableTrimTrainingData(true,0.1,90);
//Train the classifier
if( !dtw.train( trainingData ) ){
cout << "Failed to train classifier!\n";
return EXIT_FAILURE;
}
//Save the DTW model to a file
if( !dtw.save("DTWModel.grt") ){
cout << "Failed to save the classifier model!\n";
return EXIT_FAILURE;
}
//Load the DTW model from a file
if( !dtw.load("DTWModel.grt") ){
cout << "Failed to load the classifier model!\n";
return EXIT_FAILURE;
}
//Use the test dataset to test the DTW model
double accuracy = 0;
for(UINT i=0; i<testData.getNumSamples(); i++){
//Get the i'th test sample - this is a timeseries
UINT classLabel = testData[i].getClassLabel();
MatrixDouble timeseries = testData[i].getData();
//Perform a prediction using the classifier
if( !dtw.predict( timeseries ) ){
cout << "Failed to perform prediction for test sampel: " << i <<"\n";
return EXIT_FAILURE;
}
//Get the predicted class label
UINT predictedClassLabel = dtw.getPredictedClassLabel();
double maximumLikelihood = dtw.getMaximumLikelihood();
VectorDouble classLikelihoods = dtw.getClassLikelihoods();
VectorDouble classDistances = dtw.getClassDistances();
//Update the accuracy
if( classLabel == predictedClassLabel ) accuracy++;
cout << "TestSample: " << i << "\tClassLabel: " << classLabel << "\tPredictedClassLabel: " << predictedClassLabel << "\tMaximumLikelihood: " << maximumLikelihood << endl;
}
cout << "Test Accuracy: " << accuracy/double(testData.getNumSamples())*100.0 << "%" << endl;
return EXIT_SUCCESS;
}
示例10: train_
bool RBMQuantizer::train_(TimeSeriesClassificationData &trainingData){
MatrixFloat data = trainingData.getDataAsMatrixFloat();
return train_( data );
}
示例11: clear
bool HMM::train_continuous(TimeSeriesClassificationData &trainingData){
clear();
if( trainingData.getNumSamples() == 0 ){
errorLog << "train_continuous(TimeSeriesClassificationData &trainingData) - There are no training samples to train the CHMM classifer!" << endl;
return false;
}
//Reset the CHMM
numInputDimensions = trainingData.getNumDimensions();
numClasses = trainingData.getNumClasses();
classLabels.resize( numClasses );
for(UINT k=0; k<numClasses; k++){
classLabels[k] = trainingData.getClassTracker()[k].classLabel;
}
//Scale the training data if needed
ranges = trainingData.getRanges();
if( useScaling ){
trainingData.scale(0, 1);
}
//Setup the models, there will be 1 model for each training sample
const UINT numTrainingSamples = trainingData.getNumSamples();
continuousModels.resize( numTrainingSamples );
//Train each of the models
for(UINT k=0; k<numTrainingSamples; k++){
//Init the model
continuousModels[k].setDownsampleFactor( downsampleFactor );
continuousModels[k].setModelType( modelType );
continuousModels[k].setDelta( delta );
continuousModels[k].setSigma( sigma );
continuousModels[k].setAutoEstimateSigma( autoEstimateSigma );
continuousModels[k].enableScaling( false ); //Scaling should always off for the models as we do any scaling in the CHMM
//Train the model
if( !continuousModels[k].train_( trainingData[k] ) ){
errorLog << "train_continuous(TimeSeriesClassificationData &trainingData) - Failed to train CHMM for sample " << k << endl;
return false;
}
}
if( committeeSize > trainingData.getNumSamples() ){
committeeSize = trainingData.getNumSamples();
warningLog << "train_continuous(TimeSeriesClassificationData &trainingData) - The committeeSize is larger than the number of training sample. Setting committeeSize to number of training samples: " << trainingData.getNumSamples() << endl;
}
//Flag that the model has been trained
trained = true;
//Compute any null rejection thresholds if needed
if( useNullRejection ){
//Compute the rejection thresholds
nullRejectionThresholds.resize(numClasses);
}
return true;
}
示例12: main
int main() {
vector<string> gestures(0,"");
GetFilesInDirectory(gestures, "rawdata");
CreateDirectory("processed", NULL);
sort(gestures.begin(), gestures.end());
data = vector<vector<vector<double > > >(gestures.size(), vector<vector<double > >(0,vector<double>(0,0)));
for(size_t i = 0; i < gestures.size(); i++) {
ifstream fin(gestures[i]);
int n; fin >> n;
// cerr << gestures[i] << endl;
// cerr << n << endl;
data[i] = vector<vector<double> >(n, vector<double>(NUMPARAM, 0));
for(int j = 0; j < n; j++) {
for(int k = 0; k < NUMPARAM; k++) {
fin >> data[i][j][k];
}
}
fin.close();
}
//Create a new instance of the TimeSeriesClassificationDataStream
TimeSeriesClassificationData trainingData;
// ax, ay, az
trainingData.setNumDimensions(3);
trainingData.setDatasetName("processed\\GestureTrainingData.txt");
ofstream labelfile("processed\\GestureTrainingDataLabels.txt");
UINT currLabel = 1;
Random random;
map<string, int> gesturenames;
for(size_t overall = 0; overall < gestures.size(); overall++) {
string nam = gestures[overall].substr(8,gestures[overall].find_first_of('_')-8);
if(gesturenames.count(nam)) currLabel = gesturenames[nam];
else {
currLabel = gesturenames.size()+1;
gesturenames[nam] = currLabel;
labelfile << currLabel << " " << nam << endl;
}
MatrixDouble trainingSample;
VectorDouble currVec( trainingData.getNumDimensions() );
for(size_t k = 1; k < data[overall].size(); k++) {
for(UINT j=0; j<currVec.size(); j++){
currVec[j] = data[overall][k][j];
}
trainingSample.push_back(currVec);
}
trainingData.addSample(currLabel, trainingSample);
}
for(size_t i = 0; i < gestures.size(); i++) {
MatrixDouble trainingSample;
VectorDouble currVec(trainingData.getNumDimensions());
for(UINT j = 0; j < currVec.size(); j++) {
currVec[j] = random.getRandomNumberUniform(-1.0, 1.0);
}
for(size_t k = 0; k < 100; k++) {
trainingSample.push_back(currVec);
}
trainingData.addSample(0, trainingSample);
}
//After recording your training data you can then save it to a file
if( !trainingData.save( "processed\\TrainingData.grt" ) ){
cout << "ERROR: Failed to save dataset to file!\n";
return EXIT_FAILURE;
}
//This can then be loaded later
if( !trainingData.load( "processed\\TrainingData.grt" ) ){
cout << "ERROR: Failed to load dataset from file!\n";
return EXIT_FAILURE;
}
//This is how you can get some stats from the training data
string datasetName = trainingData.getDatasetName();
string infoText = trainingData.getInfoText();
UINT numSamples = trainingData.getNumSamples();
UINT numDimensions = trainingData.getNumDimensions();
UINT numClasses = trainingData.getNumClasses();
cout << "Dataset Name: " << datasetName << endl;
cout << "InfoText: " << infoText << endl;
cout << "NumberOfSamples: " << numSamples << endl;
cout << "NumberOfDimensions: " << numDimensions << endl;
cout << "NumberOfClasses: " << numClasses << endl;
//You can also get the minimum and maximum ranges of the data
vector< MinMax > ranges = trainingData.getRanges();
cout << "The ranges of the dataset are: \n";
for(UINT j=0; j<ranges.size(); j++){
cout << "Dimension: " << j << " Min: " << ranges[j].minValue << " Max: " << ranges[j].maxValue << endl;
}
DTW dtw;
if( !dtw.train( trainingData ) ){
cerr << "Failed to train classifier!\n";
//.........这里部分代码省略.........
示例13: main
int main (int argc, const char * argv[])
{
//Create a new instance of the TimeSeriesClassificationData
TimeSeriesClassificationData trainingData;
//Set the dimensionality of the data (you need to do this before you can add any samples)
trainingData.setNumDimensions( 3 );
//You can also give the dataset a name (the name should have no spaces)
trainingData.setDatasetName("DummyData");
//You can also add some info text about the data
trainingData.setInfoText("This data contains some dummy timeseries data");
//Here you would record a time series, when you have finished recording the time series then add the training sample to the training data
UINT gestureLabel = 1;
MatrixDouble trainingSample;
//For now we will just add 10 x 20 random walk data timeseries
Random random;
for(UINT k=0; k<10; k++){//For the number of classes
gestureLabel = k+1;
//Get the init random walk position for this gesture
VectorDouble startPos( trainingData.getNumDimensions() );
for(UINT j=0; j<startPos.size(); j++){
startPos[j] = random.getRandomNumberUniform(-1.0,1.0);
}
//Generate the 20 time series
for(UINT x=0; x<20; x++){
//Clear any previous timeseries
trainingSample.clear();
//Generate the random walk
UINT randomWalkLength = random.getRandomNumberInt(90, 110);
VectorDouble sample = startPos;
for(UINT i=0; i<randomWalkLength; i++){
for(UINT j=0; j<startPos.size(); j++){
sample[j] += random.getRandomNumberUniform(-0.1,0.1);
}
//Add the sample to the training sample
trainingSample.push_back( sample );
}
//Add the training sample to the dataset
trainingData.addSample( gestureLabel, trainingSample );
}
}
//After recording your training data you can then save it to a file
if( !trainingData.saveDatasetToFile( "TrainingData.txt" ) ){
cout << "Failed to save dataset to file!\n";
return EXIT_FAILURE;
}
//This can then be loaded later
if( !trainingData.loadDatasetFromFile( "TrainingData.txt" ) ){
cout << "Failed to load dataset from file!\n";
return EXIT_FAILURE;
}
//This is how you can get some stats from the training data
string datasetName = trainingData.getDatasetName();
string infoText = trainingData.getInfoText();
UINT numSamples = trainingData.getNumSamples();
UINT numDimensions = trainingData.getNumDimensions();
UINT numClasses = trainingData.getNumClasses();
cout << "Dataset Name: " << datasetName << endl;
cout << "InfoText: " << infoText << endl;
cout << "NumberOfSamples: " << numSamples << endl;
cout << "NumberOfDimensions: " << numDimensions << endl;
cout << "NumberOfClasses: " << numClasses << endl;
//You can also get the minimum and maximum ranges of the data
vector< MinMax > ranges = trainingData.getRanges();
cout << "The ranges of the dataset are: \n";
for(UINT j=0; j<ranges.size(); j++){
cout << "Dimension: " << j << " Min: " << ranges[j].minValue << " Max: " << ranges[j].maxValue << endl;
}
//If you want to partition the dataset into a training dataset and a test dataset then you can use the partition function
//A value of 80 means that 80% of the original data will remain in the training dataset and 20% will be returned as the test dataset
TimeSeriesClassificationData testData = trainingData.partition( 80 );
//If you have multiple datasets that you want to merge together then use the merge function
if( !trainingData.merge( testData ) ){
cout << "Failed to merge datasets!\n";
return EXIT_FAILURE;
}
//If you want to run K-Fold cross validation using the dataset then you should first spilt the dataset into K-Folds
//A value of 10 splits the dataset into 10 folds and the true parameter signals that stratified sampling should be used
if( !trainingData.spiltDataIntoKFolds( 10, true ) ){
cout << "Failed to spiltDataIntoKFolds!\n";
//.........这里部分代码省略.........
示例14: main
int main(int argc, const char * argv[]){
//Load the training data
TimeSeriesClassificationData trainingData;
if( !trainingData.loadDatasetFromFile("HMMTrainingData.grt") ){
cout << "ERROR: Failed to load training data!\n";
return false;
}
//Remove 20% of the training data to use as test data
TimeSeriesClassificationData testData = trainingData.partition( 80 );
//The input to the HMM must be a quantized discrete value
//We therefore use a KMeansQuantizer to covert the N-dimensional continuous data into 1-dimensional discrete data
const UINT NUM_SYMBOLS = 10;
KMeansQuantizer quantizer( NUM_SYMBOLS );
//Train the quantizer using the training data
if( !quantizer.train( trainingData ) ){
cout << "ERROR: Failed to train quantizer!\n";
return false;
}
//Quantize the training data
TimeSeriesClassificationData quantizedTrainingData( 1 );
for(UINT i=0; i<trainingData.getNumSamples(); i++){
UINT classLabel = trainingData[i].getClassLabel();
MatrixDouble quantizedSample;
for(UINT j=0; j<trainingData[i].getLength(); j++){
quantizer.quantize( trainingData[i].getData().getRowVector(j) );
quantizedSample.push_back( quantizer.getFeatureVector() );
}
if( !quantizedTrainingData.addSample(classLabel, quantizedSample) ){
cout << "ERROR: Failed to quantize training data!\n";
return false;
}
}
//Create a new HMM instance
HMM hmm;
//Set the number of states in each model
hmm.setNumStates( 4 );
//Set the number of symbols in each model, this must match the number of symbols in the quantizer
hmm.setNumSymbols( NUM_SYMBOLS );
//Set the HMM model type to LEFTRIGHT with a delta of 1
hmm.setModelType( HiddenMarkovModel::LEFTRIGHT );
hmm.setDelta( 1 );
//Set the training parameters
hmm.setMinImprovement( 1.0e-5 );
hmm.setMaxNumIterations( 100 );
hmm.setNumRandomTrainingIterations( 20 );
//Train the HMM model
if( !hmm.train( quantizedTrainingData ) ){
cout << "ERROR: Failed to train the HMM model!\n";
return false;
}
//Save the HMM model to a file
if( !hmm.save( "HMMModel.grt" ) ){
cout << "ERROR: Failed to save the model to a file!\n";
return false;
}
//Load the HMM model from a file
if( !hmm.load( "HMMModel.grt" ) ){
cout << "ERROR: Failed to load the model from a file!\n";
return false;
}
//Quantize the test data
TimeSeriesClassificationData quantizedTestData( 1 );
for(UINT i=0; i<testData.getNumSamples(); i++){
UINT classLabel = testData[i].getClassLabel();
MatrixDouble quantizedSample;
for(UINT j=0; j<testData[i].getLength(); j++){
quantizer.quantize( testData[i].getData().getRowVector(j) );
quantizedSample.push_back( quantizer.getFeatureVector() );
}
if( !quantizedTestData.addSample(classLabel, quantizedSample) ){
cout << "ERROR: Failed to quantize training data!\n";
return false;
}
}
//.........这里部分代码省略.........
示例15: main
int main(int argc, const char * argv[]){
//Load the training data
TimeSeriesClassificationData trainingData;
if( !trainingData.load("HMMTrainingData.grt") ){
cout << "ERROR: Failed to load training data!\n";
return false;
}
//Remove 20% of the training data to use as test data
TimeSeriesClassificationData testData = trainingData.partition( 80 );
//Create a new HMM instance
HMM hmm;
//Set the HMM as a Continuous HMM
hmm.setHMMType( HMM_CONTINUOUS );
//Set the downsample factor, a higher downsample factor will speed up the prediction time, but might reduce the classification accuracy
hmm.setDownsampleFactor( 5 );
//Set the committee size, this sets the (top) number of models that will be used to make a prediction
hmm.setCommitteeSize( 10 );
//Tell the hmm algorithm that we want it to estimate sigma from the training data
hmm.setAutoEstimateSigma( true );
//Set the minimum value for sigma, you might need to adjust this based on the range of your data
//If you set setAutoEstimateSigma to false, then all sigma values will use the value below
hmm.setSigma( 20.0 );
//Set the HMM model type to LEFTRIGHT with a delta of 1, this means the HMM can only move from the left-most state to the right-most state
//in steps of 1
hmm.setModelType( HMM_LEFTRIGHT );
hmm.setDelta( 1 );
//Train the HMM model
if( !hmm.train( trainingData ) ){
cout << "ERROR: Failed to train the HMM model!\n";
return false;
}
//Save the HMM model to a file
if( !hmm.save( "HMMModel.grt" ) ){
cout << "ERROR: Failed to save the model to a file!\n";
return false;
}
//Load the HMM model from a file
if( !hmm.load( "HMMModel.grt" ) ){
cout << "ERROR: Failed to load the model from a file!\n";
return false;
}
//Compute the accuracy of the HMM models using the test data
double numCorrect = 0;
double numTests = 0;
for(UINT i=0; i<testData.getNumSamples(); i++){
UINT classLabel = testData[i].getClassLabel();
hmm.predict( testData[i].getData() );
if( classLabel == hmm.getPredictedClassLabel() ) numCorrect++;
numTests++;
VectorFloat classLikelihoods = hmm.getClassLikelihoods();
VectorFloat classDistances = hmm.getClassDistances();
cout << "ClassLabel: " << classLabel;
cout << " PredictedClassLabel: " << hmm.getPredictedClassLabel();
cout << " MaxLikelihood: " << hmm.getMaximumLikelihood();
cout << " ClassLikelihoods: ";
for(UINT k=0; k<classLikelihoods.size(); k++){
cout << classLikelihoods[k] << "\t";
}
cout << "ClassDistances: ";
for(UINT k=0; k<classDistances.size(); k++){
cout << classDistances[k] << "\t";
}
cout << endl;
}
cout << "Test Accuracy: " << numCorrect/numTests*100.0 << endl;
return true;
}