当前位置: 首页>>代码示例>>C++>>正文


C++ MatrixDouble类代码示例

本文整理汇总了C++中MatrixDouble的典型用法代码示例。如果您正苦于以下问题:C++ MatrixDouble类的具体用法?C++ MatrixDouble怎么用?C++ MatrixDouble使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了MatrixDouble类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: restingDataCollected

CalibrateResult restingDataCollected(const MatrixDouble& data)
{
    // take average of X and Y acceleration as the zero G value
    zeroG = (data.getMean()[0] + data.getMean()[1]) / 2;
    oneG = data.getMean()[2]; // use Z acceleration as one G value
    
    double range = abs(oneG - zeroG);
    vector<double> stddev = data.getStdDev();
    
    if (stddev[0] / range > 0.05 ||
        stddev[1] / range > 0.05 ||
        stddev[2] / range > 0.05)
        return CalibrateResult(CalibrateResult::WARNING,
            "Accelerometer seemed to be moving; consider recollecting the "
            "calibration sample.");
    
    if (abs(data.getMean()[0] - data.getMean()[1]) / range > 0.1)
        return CalibrateResult(CalibrateResult::WARNING,
            "X and Y axes differ by " + std::to_string(
            abs(data.getMean()[0] - data.getMean()[1]) / range * 100) +
            " percent. Check that accelerometer is flat.");

    return CalibrateResult::SUCCESS;
}
开发者ID:damellis,项目名称:ESP,代码行数:24,代码来源:user_accelerometer_walk_detection.cpp

示例2: sample

bool TimeSeriesClassificationData::addSample(const UINT classLabel,const MatrixDouble &trainingSample){
	
    if( trainingSample.getNumCols() != numDimensions ){
        errorLog << "addSample(UINT classLabel, MatrixDouble trainingSample) - The dimensionality of the training sample (" << trainingSample.getNumCols() << ") does not match that of the dataset (" << numDimensions << ")" << endl;
        return false;
    }
    
    //The class label must be greater than zero (as zero is used for the null rejection class label
    if( classLabel == GRT_DEFAULT_NULL_CLASS_LABEL && !allowNullGestureClass ){
        errorLog << "addSample(UINT classLabel, MatrixDouble sample) - the class label can not be 0!" << endl;
        return false;
    }

    TimeSeriesClassificationSample newSample(classLabel,trainingSample);
    data.push_back( newSample );
    totalNumSamples++;

    if( classTracker.size() == 0 ){
        ClassTracker tracker(classLabel,1);
        classTracker.push_back(tracker);
    }else{
        bool labelFound = false;
        for(UINT i=0; i<classTracker.size(); i++){
            if( classLabel == classTracker[i].classLabel ){
                classTracker[i].counter++;
                labelFound = true;
                break;
            }
        }
        if( !labelFound ){
            ClassTracker tracker(classLabel,1);
            classTracker.push_back(tracker);
        }
    }
    return true;
}
开发者ID:kodojong,项目名称:SignLanguage-Recognition,代码行数:36,代码来源:TimeSeriesClassificationData.cpp

示例3: vector

bool PrincipalComponentAnalysis::project(const MatrixDouble &data,MatrixDouble &prjData) {

    if( !trained ) {
        warningLog << "project(const MatrixDouble &data,MatrixDouble &prjData) - The PrincipalComponentAnalysis module has not been trained!" << endl;
        return false;
    }
    if( data.getNumCols() != numInputDimensions ) {
        warningLog << "project(const MatrixDouble &data,MatrixDouble &prjData) - The number of columns in the input vector (" << data.getNumCols() << ") does not match the number of input dimensions (" << numInputDimensions << ")!" << endl;
        return false;
    }

    MatrixDouble msData( data );
    prjData.resize(data.getNumRows(),numPrincipalComponents);

    if( normData ) {
        //Mean subtract the data
        for(UINT i=0; i<data.getNumRows(); i++)
            for(UINT j=0; j<numInputDimensions; j++)
                msData[i][j] = (msData[i][j]-mean[j])/stdDev[j];
    } else {
        //Mean subtract the data
        for(UINT i=0; i<data.getNumRows(); i++)
            for(UINT j=0; j<numInputDimensions; j++)
                msData[i][j] -= mean[j];
    }

    //Projected Data
    for(UINT row=0; row<msData.getNumRows(); row++) { //For each row in the final data
        for(UINT i=0; i<numPrincipalComponents; i++) { //For each PC
            prjData[row][i]=0;
            for(UINT j=0; j<data.getNumCols(); j++)//For each feature
                prjData[row][i] += msData[row][j] * eigenvectors[j][sortedEigenvalues[i].index];
        }
    }

    return true;
}
开发者ID:GaoXiaojian,项目名称:grt,代码行数:37,代码来源:PrincipalComponentAnalysis.cpp

示例4: main

int main(int argc, const char * argv[]){
    
    //Load the training data
    TimeSeriesClassificationData trainingData;
    
    if( !trainingData.loadDatasetFromFile("HMMTrainingData.grt") ){
        cout << "ERROR: Failed to load training data!\n";
        return false;
    }
    
    //Remove 20% of the training data to use as test data
    TimeSeriesClassificationData testData = trainingData.partition( 80 );
    
    //The input to the HMM must be a quantized discrete value
    //We therefore use a KMeansQuantizer to covert the N-dimensional continuous data into 1-dimensional discrete data
    const UINT NUM_SYMBOLS = 10;
    KMeansQuantizer quantizer( NUM_SYMBOLS );
    
    //Train the quantizer using the training data
    if( !quantizer.train( trainingData ) ){
        cout << "ERROR: Failed to train quantizer!\n";
        return false;
    }
    
    //Quantize the training data
    TimeSeriesClassificationData quantizedTrainingData( 1 );
    
    for(UINT i=0; i<trainingData.getNumSamples(); i++){
        
        UINT classLabel = trainingData[i].getClassLabel();
        MatrixDouble quantizedSample;
        
        for(UINT j=0; j<trainingData[i].getLength(); j++){
            quantizer.quantize( trainingData[i].getData().getRowVector(j) );
            
            quantizedSample.push_back( quantizer.getFeatureVector() );
        }
        
        if( !quantizedTrainingData.addSample(classLabel, quantizedSample) ){
            cout << "ERROR: Failed to quantize training data!\n";
            return false;
        }
        
    }
    
    //Create a new HMM instance
    HMM hmm;
    
    //Set the number of states in each model
    hmm.setNumStates( 4 );
    
    //Set the number of symbols in each model, this must match the number of symbols in the quantizer
    hmm.setNumSymbols( NUM_SYMBOLS );
    
    //Set the HMM model type to LEFTRIGHT with a delta of 1
    hmm.setModelType( HiddenMarkovModel::LEFTRIGHT );
    hmm.setDelta( 1 );
    
    //Set the training parameters
    hmm.setMinImprovement( 1.0e-5 );
    hmm.setMaxNumIterations( 100 );
    hmm.setNumRandomTrainingIterations( 20 );
    
    //Train the HMM model
    if( !hmm.train( quantizedTrainingData ) ){
        cout << "ERROR: Failed to train the HMM model!\n";
        return false;
    }
    
    //Save the HMM model to a file
    if( !hmm.save( "HMMModel.grt" ) ){
        cout << "ERROR: Failed to save the model to a file!\n";
        return false;
    }
    
    //Load the HMM model from a file
    if( !hmm.load( "HMMModel.grt" ) ){
        cout << "ERROR: Failed to load the model from a file!\n";
        return false;
    }
    
    //Quantize the test data
    TimeSeriesClassificationData quantizedTestData( 1 );
    
    for(UINT i=0; i<testData.getNumSamples(); i++){
        
        UINT classLabel = testData[i].getClassLabel();
        MatrixDouble quantizedSample;
        
        for(UINT j=0; j<testData[i].getLength(); j++){
            quantizer.quantize( testData[i].getData().getRowVector(j) );
            
            quantizedSample.push_back( quantizer.getFeatureVector() );
        }
        
        if( !quantizedTestData.addSample(classLabel, quantizedSample) ){
            cout << "ERROR: Failed to quantize training data!\n";
            return false;
        }
    }
//.........这里部分代码省略.........
开发者ID:Amos-zq,项目名称:grt,代码行数:101,代码来源:HMMExample.cpp

示例5: scale

bool GaussianMixtureModels::train_(MatrixDouble &data){
    
    trained = false;
    
    //Clear any previous training results
    det.clear();
    invSigma.clear();
    numTrainingIterationsToConverge = 0;
    
    if( data.getNumRows() == 0 ){
        errorLog << "train_(MatrixDouble &data) - Training Failed! Training data is empty!" << endl;
        return false;
    }
    
    //Resize the variables
    numTrainingSamples = data.getNumRows();
    numInputDimensions = data.getNumCols();
    
    //Resize mu and resp
    mu.resize(numClusters,numInputDimensions);
    resp.resize(numTrainingSamples,numClusters);
    
    //Resize sigma
    sigma.resize(numClusters);
    for(UINT k=0; k<numClusters; k++){
        sigma[k].resize(numInputDimensions,numInputDimensions);
    }
    
    //Resize frac and lndets
    frac.resize(numClusters);
    lndets.resize(numClusters);
    
    //Scale the data if needed
    ranges = data.getRanges();
    if( useScaling ){
        for(UINT i=0; i<numTrainingSamples; i++){
            for(UINT j=0; j<numInputDimensions; j++){
                data[i][j] = scale(data[i][j],ranges[j].minValue,ranges[j].maxValue,0,1);
            }
        }
    }
    
    //Pick K random starting points for the inital guesses of Mu
    Random random;
    vector< UINT > randomIndexs(numTrainingSamples);
    for(UINT i=0; i<numTrainingSamples; i++) randomIndexs[i] = i;
    for(UINT i=0; i<numClusters; i++){
        SWAP(randomIndexs[ i ],randomIndexs[ random.getRandomNumberInt(0,numTrainingSamples) ]);
    }
    for(UINT k=0; k<numClusters; k++){
        for(UINT n=0; n<numInputDimensions; n++){
            mu[k][n] = data[ randomIndexs[k] ][n];
        }
    }
    
    //Setup sigma and the uniform prior on P(k)
    for(UINT k=0; k<numClusters; k++){
        frac[k] = 1.0/double(numClusters);
        for(UINT i=0; i<numInputDimensions; i++){
            for(UINT j=0; j<numInputDimensions; j++) sigma[k][i][j] = 0;
            sigma[k][i][i] = 1.0e-2;   //Set the diagonal to a small number
        }
    }
    
    loglike = 0;
    bool keepGoing = true;
    double change = 99.9e99;
    UINT numIterationsNoChange = 0;
    VectorDouble u(numInputDimensions);
	VectorDouble v(numInputDimensions);
    
    while( keepGoing ){
        
        //Run the estep
        if( estep( data, u, v, change ) ){
            
            //Run the mstep
            mstep( data );
        
            //Check for convergance
            if( fabs( change ) < minChange ){
                if( ++numIterationsNoChange >= minNumEpochs ){
                    keepGoing = false;
                }
            }else numIterationsNoChange = 0;
            if( ++numTrainingIterationsToConverge >= maxNumEpochs ) keepGoing = false;
            
        }else{
            errorLog << "train_(MatrixDouble &data) - Estep failed at iteration " << numTrainingIterationsToConverge << endl;
            return false;
        }
    }
    
    //Compute the inverse of sigma and the determinants for prediction
    if( !computeInvAndDet() ){
        det.clear();
        invSigma.clear();
        errorLog << "train_(MatrixDouble &data) - Failed to compute inverse and determinat!" << endl;
        return false;
    }
//.........这里部分代码省略.........
开发者ID:H1115372943,项目名称:grt,代码行数:101,代码来源:GaussianMixtureModels.cpp

示例6: trainModel

bool KMeans::trainModel(MatrixDouble &data){
    
    if( numClusters == 0 ){
        errorLog << "trainModel(MatrixDouble &data) - Failed to train model. NumClusters is zero!" << endl;
		return false;
	}
    
    if( clusters.getNumRows() != numClusters ){
        errorLog << "trainModel(MatrixDouble &data) - Failed to train model. The number of rows in the cluster matrix does not match the number of clusters! You should need to initalize the clusters matrix first before calling this function!" << endl;
		return false;
	}
    
    if( clusters.getNumCols() != numInputDimensions ){
        errorLog << "trainModel(MatrixDouble &data) - Failed to train model. The number of columns in the cluster matrix does not match the number of input dimensions! You should need to initalize the clusters matrix first before calling this function!" << endl;
		return false;
	}

    Timer timer;
	UINT currentIter = 0;
    UINT numChanged = 0;
	bool keepTraining = true;
    double theta = 0;
    double lastTheta = 0;
    double delta = 0;
    double startTime = 0;
    thetaTracker.clear();
    finalTheta = 0;
    numTrainingIterationsToConverge = 0;
    trained = false;
    converged = false;
    
    //Scale the data if needed
    ranges = data.getRanges();
    if( useScaling ){
        data.scale(0,1);
    }

    //Init the assign and count vectors
    //Assign is set to K+1 so that the nChanged values in the eStep at the first iteration will be updated correctly
    for(UINT m=0; m<numTrainingSamples; m++) assign[m] = numClusters+1;
	for(UINT k=0; k<numClusters; k++) count[k] = 0;

    //Run the training loop
    timer.start();
	while( keepTraining ){
        startTime = timer.getMilliSeconds();

		//Compute the E step
		numChanged = estep( data );

        //Compute the M step
        mstep( data );

        //Update the iteration counter
		currentIter++;

		//Compute theta if needed
		if( computeTheta ){
            theta = calculateTheta(data);
            delta = lastTheta - theta;
            lastTheta = theta;
        }else theta = delta = 0;
        
        //Check convergance
		if( numChanged == 0 && currentIter > minNumEpochs ){ converged = true; keepTraining = false; }
		if( currentIter >= maxNumEpochs ){ keepTraining = false; }
		if( fabs( delta ) < minChange && computeTheta && currentIter > minNumEpochs ){ converged = true; keepTraining = false; }
        if( computeTheta )  thetaTracker.push_back( theta );
        
        trainingLog << "Epoch: " << currentIter << "/" << maxNumEpochs;
        trainingLog << " Epoch time: " << (timer.getMilliSeconds()-startTime)/1000.0 << " seconds";
        trainingLog << " Theta: " << theta << " Delta: " << delta << endl;
	}
    trainingLog << "Model Trained at epoch: " << currentIter << " with a theta value of: " << theta << endl;

    finalTheta = theta;
    numTrainingIterationsToConverge = currentIter;
	trained = true;
	
	return true;
}
开发者ID:H1115372943,项目名称:grt,代码行数:81,代码来源:KMeans.cpp

示例7: scale

bool KMeansFeatures::train_(MatrixDouble &trainingData){
    
    if( !initialized ){
        errorLog << "train_(MatrixDouble &trainingData) - The quantizer has not been initialized!" << endl;
        return false;
    }
    
    //Reset any previous model
    featureDataReady = false;
    
    const UINT M = trainingData.getNumRows();
    const UINT N = trainingData.getNumCols();
    
    numInputDimensions = N;
    numOutputDimensions = numClustersPerLayer[ numClustersPerLayer.size()-1 ];
    
    //Scale the input data if needed
    ranges = trainingData.getRanges();
    if( useScaling ){
        for(UINT i=0; i<M; i++){
            for(UINT j=0; j<N; j++){
                trainingData[i][j] = scale(trainingData[i][j],ranges[j].minValue,ranges[j].maxValue,0,1.0);
            }
        }
    }
    
    //Train the KMeans model at each layer
    const UINT K = (UINT)numClustersPerLayer.size();
    for(UINT k=0; k<K; k++){
        KMeans kmeans;
        kmeans.setNumClusters( numClustersPerLayer[k] );
        kmeans.setComputeTheta( true );
        kmeans.setMinChange( minChange );
        kmeans.setMinNumEpochs( minNumEpochs );
        kmeans.setMaxNumEpochs( maxNumEpochs );
        
        trainingLog << "Layer " << k+1 << "/" << K << " NumClusters: " << numClustersPerLayer[k] << endl;
        if( !kmeans.train_( trainingData ) ){
            errorLog << "train_(MatrixDouble &trainingData) - Failed to train kmeans model at layer: " << k << endl;
            return false;
        }
        
        //Save the clusters
        clusters.push_back( kmeans.getClusters() );
        
        //Project the data through the current layer to use as training data for the next layer
        if( k+1 != K ){
            MatrixDouble data( M, numClustersPerLayer[k] );
            VectorDouble input( trainingData.getNumCols() );
            VectorDouble output( data.getNumCols() );
            
            for(UINT i=0; i<M; i++){
                
                //Copy the data into the sample
                for(UINT j=0; j<input.size(); j++){
                    input[j] = trainingData[i][j];
                }
                
                //Project the sample through the current layer
                if( !projectDataThroughLayer( input, output, k ) ){
                    errorLog << "train_(MatrixDouble &trainingData) - Failed to project sample through layer: " << k << endl;
                    return false;
                }
                
                //Copy the result into the training data for the next layer
                for(UINT j=0; j<output.size(); j++){
                    data[i][j] = output[j];
                }
            }
            
            //Swap the data for the next layer
            trainingData = data;
            
        }
        
    }
    
    //Flag that the kmeans model has been trained
    trained = true;
    featureVector.resize( numOutputDimensions, 0 );
    
    return true;
}
开发者ID:ios4u,项目名称:grt,代码行数:83,代码来源:KMeansFeatures.cpp

示例8: ExceptionDimension

/*!
 * Solve linear system using Gauss-Jordan procedure
 *
 * \throw	ExceptionDimension	incompatible matrix dimensions
 * \throw ExceptionRuntime	Equation has either no solution or an infinity of solutions
 *
 * \param[in]	Coefficients	matrix of equations' coefficients
 * \param[in]	ConstantTerms	column matrix of constant terms
 *
 * \return	solutions packed in a column matrix (SMatrixDouble)
 */
MatrixDouble LinearSystem::GaussJordan(const SquareMatrixDouble &Coefficients, const MatrixDouble &ConstantTerms)
{
	size_t n = Coefficients.GetRows();
		
	if (ConstantTerms.GetRows() != n)
	{
		throw ExceptionDimension(StringUTF8("LinearEquationsSystem::GaussJordanSolver("
					"const SquareMatrixDouble *Coefficients, const MatrixDouble *ConstantTerms): ") + 
				_("invalid or incompatible matrix dimensions"));
	}
	else
	{
		USquareMatrixDouble CopyCoefficients = CloneAs<SquareMatrixDouble>(Coefficients);
		UMatrixDouble CopyConstantTerms = CloneAs<MatrixDouble>(ConstantTerms);
	
		for (size_t c = 0; c < n - 1; c++)
		{
			// Search the greatest pivot in column
				
			double Pivot = CopyCoefficients->At(c, c);
			double AbsMaxPivot = fabs(Pivot);
			size_t RowIndex = c;
				
			for (size_t r = c + 1 ; r < n; r++)
			{
				double Candidate = CopyCoefficients->At(r, c);
					
				if (fabs(Candidate) > AbsMaxPivot)
				{
					Pivot = Candidate;
					AbsMaxPivot = fabs(Pivot);
					RowIndex = r;
				}
			}
			
			// If no non-null pivot found, system may have infinite number of solutions
				
			if (Pivot == 0.0)
			{
				throw ExceptionRuntime(_("Equation has either no solution or an infinity of solutions."));
			}
			
			if (RowIndex != c)
			{
				CopyCoefficients->SwapRows(c, RowIndex);
				CopyConstantTerms->SwapRows(c, RowIndex);
			}
				// Elimination
			
			for (size_t r = c + 1; r < n; r++)
			{
				double Coeff = CopyCoefficients->At(r, c);
				
				if (Coeff != 0.0)
				{
					double Scale = - Coeff / Pivot;
					
					for (size_t k = c; k < n; k++)
					{
						CopyCoefficients->IncreaseElement(r, k, CopyCoefficients->At(c, k) * Scale);
					}
					
					CopyConstantTerms->IncreaseElement(r, 0, CopyConstantTerms->At(c, 0) * Scale);
				}
			}
		}
		// End of loop for column
			
		MatrixDouble Solutions(n, 1, 0.0);
			
		Solutions.At(n - 1, 0) = CopyConstantTerms->At(n - 1, 0) / CopyCoefficients->At(n - 1, n - 1);
			
		for (auto r = int(n) - 2; r >= 0; --r)
		{
			double Cumul = 0.0;
			
			for (auto c = int(n) - 1; c > r; --c)
			{
				Cumul += CopyCoefficients->At(r, c) * Solutions.At(c, 0);
			}
				
			Solutions.At(r, 0) = (CopyConstantTerms->At(r, 0) - Cumul) / CopyCoefficients->At(r, r);
		}
			
		return Solutions;
	}
}
开发者ID:Liris-Pleiad,项目名称:libcrn,代码行数:98,代码来源:CRNEquationSolver.cpp

示例9: train

bool ANBC_Model::train(UINT classLabel,MatrixDouble &trainingData,VectorDouble &weightsVector){

	//Check to make sure the column sizes match
	if( trainingData.getNumCols() != weightsVector.size() ){
		N = 0;
		return false;
	}
    
	UINT M = trainingData.getNumRows();
	N = trainingData.getNumCols();
    this->classLabel = classLabel;

	//Update the weights buffer
	weights = weightsVector;

	//Resize the buffers
	mu.resize( N );
	sigma.resize( N );

	//Calculate the mean for each dimension
	for(UINT j=0; j<N; j++){
		mu[j] = 0.0;

		for(UINT i=0; i<M; i++){
			mu[j] += trainingData[i][j];
        }

		mu[j] /= double(M);
        
        if( mu[j] == 0 ){
            return false;
        }
	}

	//Calculate the sample standard deviation
	for(UINT j=0; j<N; j++){
		sigma[j] = 0.0;

		for(UINT i=0; i<M; i++){
			sigma[j] += SQR( trainingData[i][j]-mu[j] );
        }

		sigma[j] = sqrt( sigma[j]/double(M-1) );
        
        if( sigma[j] == 0 ){
            return false;
        }
	}

	//Now compute the threshold
    double meanPrediction = 0.0;
	VectorDouble predictions(M);
	for(UINT i=0; i<M; i++){
		//Test the ith training example
		vector<double> testData(N);
		for(UINT j=0; j<N; j++) {
			testData[j] = trainingData[i][j];
        }
        
		predictions[i] = predict(testData);
        meanPrediction += predictions[i];
	}

	//Calculate the mean prediction value
	meanPrediction /= double(M);

	//Calculate the standard deviation
	double stdDev = 0.0;
	for(UINT i=0; i<M; i++) {
		stdDev += SQR( predictions[i]-meanPrediction );
    }
	stdDev = sqrt( stdDev / (double(M)-1.0) );

	threshold = meanPrediction-(stdDev*gamma);

	//Update the training mu and sigma values so the threshold value can be dynamically computed at a later stage
	trainingMu = meanPrediction;
	trainingSigma = stdDev;

	return true;
}
开发者ID:Mr07,项目名称:MA-Gesture-Recognition,代码行数:81,代码来源:ANBC_Model.cpp

示例10: results

bool DTW::train_NDDTW(LabelledTimeSeriesClassificationData &trainingData,DTWTemplate &dtwTemplate,UINT &bestIndex){

   UINT numExamples = trainingData.getNumSamples();
   VectorDouble results(numExamples,0.0);
   MatrixDouble distanceResults(numExamples,numExamples);
   dtwTemplate.averageTemplateLength = 0;
    
   for(UINT m=0; m<numExamples; m++){
       
	   MatrixDouble templateA; //The m'th template
	   MatrixDouble templateB; //The n'th template
	   dtwTemplate.averageTemplateLength += trainingData[m].getLength();

	   //Smooth the data if required
	   if( useSmoothing ) smoothData(trainingData[m].getData(),smoothingFactor,templateA);
	   else templateA = trainingData[m].getData();
       
       if( offsetUsingFirstSample ){
           offsetTimeseries(templateA);
       }

	   for(UINT n=0; n<numExamples; n++){
		if(m!=n){
		    //Smooth the data if required
		    if( useSmoothing ) smoothData(trainingData[n].getData(),smoothingFactor,templateB);
		    else templateB = trainingData[n].getData();
            
            if( offsetUsingFirstSample ){
                offsetTimeseries(templateB);
            }

			//Compute the distance between the two time series
            MatrixDouble distanceMatrix(templateA.getNumRows(),templateB.getNumRows());
            vector< IndexDist > warpPath;
			double dist = computeDistance(templateA,templateB,distanceMatrix,warpPath);
            
            trainingLog << "Template: " << m << " Timeseries: " << n << " Dist: " << dist << endl;

			//Update the results values
			distanceResults[m][n] = dist;
			results[m] += dist;
		}else distanceResults[m][n] = 0; //The distance is zero because the two timeseries are the same
	   }
   }

	for(UINT m=0; m<numExamples; m++) results[m]/=(numExamples-1);
	//Find the best average result, this is the result with the minimum value
	bestIndex = 0;
	double bestAverage = results[0];
	for(UINT m=1; m<numExamples; m++){
		if( results[m] < bestAverage ){
			bestAverage = results[m];
			bestIndex = m;
		}
	}

    if( numExamples > 2 ){

        //Work out the threshold value for the best template
        dtwTemplate.trainingMu = results[bestIndex];
        dtwTemplate.trainingSigma = 0.0;

        for(UINT n=0; n<numExamples; n++){
            if(n!=bestIndex){
                dtwTemplate.trainingSigma += SQR( distanceResults[ bestIndex ][n] - dtwTemplate.trainingMu );
            }
        }
        dtwTemplate.trainingSigma = sqrt( dtwTemplate.trainingSigma / double(numExamples-2) );
    }else{
        warningLog << "_train_NDDTW(LabelledTimeSeriesClassificationData &trainingData,DTWTemplate &dtwTemplate,UINT &bestIndex - There are not enough examples to compute the trainingMu and trainingSigma for the template for class " << dtwTemplate.classLabel << endl;
        dtwTemplate.trainingMu = 0.0;
        dtwTemplate.trainingSigma = 0.0;
    }

	//Set the average length of the training examples
	dtwTemplate.averageTemplateLength = (UINT) (dtwTemplate.averageTemplateLength/double(numExamples));
    
    trainingLog << "AverageTemplateLength: " << dtwTemplate.averageTemplateLength << endl;

    //Flag that the training was successfull
	return true;
}
开发者ID:gaurav38,项目名称:HackDuke13,代码行数:82,代码来源:DTW.cpp

示例11: computeDistance

double DTW::computeDistance(MatrixDouble &timeSeriesA,MatrixDouble &timeSeriesB,MatrixDouble &distanceMatrix,vector< IndexDist > &warpPath){

	const int M = timeSeriesA.getNumRows();
	const int N = timeSeriesB.getNumRows();
	const int C = timeSeriesA.getNumCols();
	int i,j,k,index = 0;
	double totalDist,v,normFactor = 0.;
    
    warpPath.clear();
    if( int(distanceMatrix.getNumRows()) != M || int(distanceMatrix.getNumCols()) != N ){
        distanceMatrix.resize(M, N);
    }

	switch (distanceMethod) {
		case (ABSOLUTE_DIST):
			for(i=0; i<M; i++){
				for(j=0; j<N; j++){
					distanceMatrix[i][j] = 0.0;
					for(k=0; k< C; k++){
					   distanceMatrix[i][j] += fabs(timeSeriesA[i][k]-timeSeriesB[j][k]);
					}
				}
			}
			break;
		case (EUCLIDEAN_DIST):
			//Calculate Euclidean Distance for all possible values
			for(i=0; i<M; i++){
				for(j=0; j<N; j++){
					distanceMatrix[i][j] = 0.0;
					for(k=0; k< C; k++){
						distanceMatrix[i][j] += SQR( timeSeriesA[i][k]-timeSeriesB[j][k] );
					}
					distanceMatrix[i][j] = sqrt( distanceMatrix[i][j] );
				}
			}
			break;
		case (NORM_ABSOLUTE_DIST):
			for(i=0; i<M; i++){
				for(j=0; j<N; j++){
					distanceMatrix[i][j] = 0.0;
					for(k=0; k< C; k++){
					   distanceMatrix[i][j] += fabs(timeSeriesA[i][k]-timeSeriesB[j][k]);
					}
					distanceMatrix[i][j]/=N;
				}
			}
			break;
		default:
			errorLog<<"ERROR: Unknown distance method: "<<distanceMethod<<endl;
			return -1;
			break;
	}

    //Run the recursive search function to build the cost matrix
    double distance = sqrt( d(M-1,N-1,distanceMatrix,M,N) );

    if( isinf(distance) || isnan(distance) ){
        warningLog << "DTW computeDistance(...) - Distance Matrix Values are INF!" << endl;
        return INFINITY;
    }
    
    //cout << "DIST: " << distance << endl;

    //The distMatrix values are negative so make them positive
    for(i=0; i<M; i++){
        for(j=0; j<N; j++){
            distanceMatrix[i][j] = fabs( distanceMatrix[i][j] );
        }
    }

	//Now Create the Warp Path through the cost matrix, starting at the end
    i=M-1;
	j=N-1;
	totalDist = distanceMatrix[i][j];
    warpPath.push_back( IndexDist(i,j,distanceMatrix[i][j]) );
    
	//Use dynamic programming to navigate through the cost matrix until [0][0] has been reached
    normFactor = 1;
	while( true ) {
        if( i==0 && j==0 ) break;
		if( i==0 ){ j--; }
        else{ 
            if( j==0 ) i--;
            else{
                //Find the minimum cell to move to
                v = numeric_limits<double>::max();
                index = 0;
                if( distanceMatrix[i-1][j] < v ){ v = distanceMatrix[i-1][j]; index = 1; }
                if( distanceMatrix[i][j-1] < v ){ v = distanceMatrix[i][j-1]; index = 2; }
                if( distanceMatrix[i-1][j-1] <= v ){ index = 3; }
                switch(index){
                    case(1):
                        i--;
                        break;
                    case(2):
                        j--;
                        break;
                    case(3):
                        i--;
                        j--;
//.........这里部分代码省略.........
开发者ID:gaurav38,项目名称:HackDuke13,代码行数:101,代码来源:DTW.cpp

示例12: matrix

bool HMM::predict_continuous(MatrixDouble &timeseries){
    
    if( !trained ){
        errorLog << "predict_continuous(MatrixDouble &timeseries) - The HMM classifier has not been trained!" << endl;
        return false;
    }
    
	if( timeseries.getNumCols() != numInputDimensions ){
        errorLog << "predict_continuous(MatrixDouble &timeseries) - The number of columns in the input matrix (" << timeseries.getNumCols() << ") does not match the num features in the model (" << numInputDimensions << endl;
		return false;
	}
    
    //Scale the input vector if needed
    if( useScaling ){
        const UINT timeseriesLength = timeseries.getNumRows();
        for(UINT j=0; j<numInputDimensions; j++){
            for(UINT i=0; i<timeseriesLength; i++){
                timeseries[i][j] = scale(timeseries[i][j], ranges[j].minValue, ranges[j].maxValue, 0, 1);
            }
        }
    }
    
    if( classLikelihoods.size() != numClasses ) classLikelihoods.resize(numClasses,0);
    if( classDistances.size() != numClasses ) classDistances.resize(numClasses,0);
    
    std::fill(classLikelihoods.begin(),classLikelihoods.end(),0);
    std::fill(classDistances.begin(),classDistances.end(),0);
    
    bestDistance = -1000;
    UINT bestIndex = 0;
    double minValue = -1000;
    
    const UINT numModels = (UINT)continuousModels.size();
    vector< IndexedDouble > results(numModels);
    for(UINT i=0; i<numModels; i++){
        
        //Run the prediction for this model
        if( continuousModels[i].predict_( timeseries ) ){
            results[i].value = continuousModels[i].getLoglikelihood();
            results[i].index = continuousModels[i].getClassLabel();
        }else{
            errorLog << "predict_(VectorDouble &inputVector) - Prediction failed for model: " << i << endl;
            return false;
        }
        
        if( results[i].value < minValue ){
            minValue = results[i].value;
        }
        
        if( results[i].value > bestDistance ){
            bestDistance = results[i].value;
            bestIndex = i;
        }
    }
    
    //Store the phase from the best model
    phase = continuousModels[ bestIndex ].getPhase();
    
    //Sort the results
    std::sort(results.begin(),results.end(),IndexedDouble::sortIndexedDoubleByValueDescending);
    
    //Run the majority vote
    const double committeeWeight = 1.0 / committeeSize;
    for(UINT i=0; i<committeeSize; i++){
        classDistances[ getClassLabelIndexValue( results[i].index ) ] += Util::scale(results[i].value, -1000, 0, 0, committeeWeight, true);
    }
    
    //Turn the class distances into likelihoods
    double sum = Util::sum(classDistances);
    if( sum > 0 ){
        for(UINT k=0; k<numClasses; k++){
            classLikelihoods[k] = classDistances[k] / sum;
        }
        
        //Find the maximum label
        for(UINT k=0; k<numClasses; k++){
            if( classDistances[k] > bestDistance ){
                bestDistance = classDistances[k];
                bestIndex = k;
            }
        }
        
        maxLikelihood = classLikelihoods[ bestIndex ];
        predictedClassLabel = classLabels[ bestIndex ];
    }else{
        //If the sum is not greater than 1, then no class is close to any model
        maxLikelihood = 0;
        predictedClassLabel = 0;
    }
    return true;
}
开发者ID:eboix,项目名称:Myo-Gesture,代码行数:91,代码来源:HMM.cpp

示例13: predict_discrete

bool HMM::predict_discrete(MatrixDouble &timeseries){
    
    if( !trained ){
        errorLog << "predict_continuous(MatrixDouble &timeseries) - The HMM classifier has not been trained!" << endl;
        return false;
    }
    
    if( timeseries.getNumCols() != 1 ){
        errorLog << "predict_discrete(MatrixDouble &timeseries) The number of columns in the input matrix must be 1. It is: " << timeseries.getNumCols() << endl;
        return false;
    }
    
    //Covert the matrix double to observations
    const UINT M = timeseries.getNumRows();
    vector<UINT> observationSequence( M );
    
    for(UINT i=0; i<M; i++){
        observationSequence[i] = (UINT)timeseries[i][0];
        
        if( observationSequence[i] >= numSymbols ){
            errorLog << "predict_discrete(VectorDouble &inputVector) - The new observation is not a valid symbol! It should be in the range [0 numSymbols-1]" << endl;
            return false;
        }
    }
    
    if( classLikelihoods.size() != numClasses ) classLikelihoods.resize(numClasses,0);
    if( classDistances.size() != numClasses ) classDistances.resize(numClasses,0);
    
    bestDistance = -99e+99;
    UINT bestIndex = 0;
    double sum = 0;
	for(UINT k=0; k<numClasses; k++){
		classDistances[k] = discreteModels[k].predict( observationSequence );
        
        //Set the class likelihood as the antilog of the class distances
        classLikelihoods[k] = antilog( classDistances[k] );
        
        //The loglikelihood values are negative so we want the values closest to 0
		if( classDistances[k] > bestDistance ){
			bestDistance = classDistances[k];
			bestIndex = k;
		}
        
        sum += classLikelihoods[k];
    }
    
    //Turn the class distances into proper likelihoods
    for(UINT k=0; k<numClasses; k++){
		classLikelihoods[k] /= sum;
    }
    
    maxLikelihood = classLikelihoods[ bestIndex ];
    predictedClassLabel = classLabels[ bestIndex ];
    
    if( useNullRejection ){
        if( maxLikelihood > nullRejectionThresholds[ bestIndex ] ){
            predictedClassLabel = classLabels[ bestIndex ];
        }else predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
    }
    
    return true;
}
开发者ID:eboix,项目名称:Myo-Gesture,代码行数:62,代码来源:HMM.cpp

示例14: main

int main() {
    vector<string> gestures(0,"");
    GetFilesInDirectory(gestures, "rawdata");
    CreateDirectory("processed", NULL);
    sort(gestures.begin(), gestures.end());
    data = vector<vector<vector<double > > >(gestures.size(), vector<vector<double > >(0,vector<double>(0,0)));
    for(size_t i = 0; i < gestures.size(); i++) {
        ifstream fin(gestures[i]);
        int n; fin >> n;
       // cerr << gestures[i] << endl;
       // cerr << n << endl;
        data[i] = vector<vector<double> >(n, vector<double>(NUMPARAM, 0));
        for(int j = 0; j < n; j++) {
            for(int k = 0; k < NUMPARAM; k++) {
                fin >> data[i][j][k];
            }
        }
        fin.close();
    }


    //Create a new instance of the TimeSeriesClassificationDataStream
    TimeSeriesClassificationData trainingData;

    // ax, ay, az
    trainingData.setNumDimensions(3);
    trainingData.setDatasetName("processed\\GestureTrainingData.txt");
    ofstream labelfile("processed\\GestureTrainingDataLabels.txt");
    UINT currLabel = 1;
    Random random;
    map<string, int> gesturenames;
    for(size_t overall = 0; overall < gestures.size(); overall++) {

        string nam = gestures[overall].substr(8,gestures[overall].find_first_of('_')-8);
        if(gesturenames.count(nam)) currLabel = gesturenames[nam];
        else {
            currLabel = gesturenames.size()+1;
            gesturenames[nam] = currLabel;
            labelfile << currLabel << " " << nam << endl;
        }
        MatrixDouble trainingSample;
        VectorDouble currVec( trainingData.getNumDimensions() );
        for(size_t k = 1; k < data[overall].size(); k++) {
            for(UINT j=0; j<currVec.size(); j++){
                currVec[j] = data[overall][k][j];
            }
            trainingSample.push_back(currVec);
        }
        trainingData.addSample(currLabel, trainingSample);

    }
    for(size_t i = 0; i < gestures.size(); i++) {
        MatrixDouble trainingSample;
        VectorDouble currVec(trainingData.getNumDimensions());
        for(UINT j = 0; j < currVec.size(); j++) {
            currVec[j] = random.getRandomNumberUniform(-1.0, 1.0);
        }
        for(size_t k = 0; k < 100; k++) {
            trainingSample.push_back(currVec);
        }
        trainingData.addSample(0, trainingSample);
    }

    //After recording your training data you can then save it to a file
    if( !trainingData.save( "processed\\TrainingData.grt" ) ){
        cout << "ERROR: Failed to save dataset to file!\n";
        return EXIT_FAILURE;
    }

    //This can then be loaded later
    if( !trainingData.load( "processed\\TrainingData.grt" ) ){
        cout << "ERROR: Failed to load dataset from file!\n";
        return EXIT_FAILURE;
    }

    //This is how you can get some stats from the training data
    string datasetName = trainingData.getDatasetName();
    string infoText = trainingData.getInfoText();
    UINT numSamples = trainingData.getNumSamples();
    UINT numDimensions = trainingData.getNumDimensions();
    UINT numClasses = trainingData.getNumClasses();

    cout << "Dataset Name: " << datasetName << endl;
    cout << "InfoText: " << infoText << endl;
    cout << "NumberOfSamples: " << numSamples << endl;
    cout << "NumberOfDimensions: " << numDimensions << endl;
    cout << "NumberOfClasses: " << numClasses << endl;

    //You can also get the minimum and maximum ranges of the data
    vector< MinMax > ranges = trainingData.getRanges();

    cout << "The ranges of the dataset are: \n";
    for(UINT j=0; j<ranges.size(); j++){
        cout << "Dimension: " << j << " Min: " << ranges[j].minValue << " Max: " << ranges[j].maxValue << endl;
    }

    DTW dtw;

    if( !dtw.train( trainingData ) ){
        cerr << "Failed to train classifier!\n";
//.........这里部分代码省略.........
开发者ID:eboix,项目名称:Myo-Gesture,代码行数:101,代码来源:processraw.cpp

示例15: main

int main (int argc, const char * argv[])
{
    //Create a new instance of the TimeSeriesClassificationData
    TimeSeriesClassificationData trainingData;
    
    //Set the dimensionality of the data (you need to do this before you can add any samples)
    trainingData.setNumDimensions( 3 );
    
    //You can also give the dataset a name (the name should have no spaces)
    trainingData.setDatasetName("DummyData");
    
    //You can also add some info text about the data
    trainingData.setInfoText("This data contains some dummy timeseries data");
    
    //Here you would record a time series, when you have finished recording the time series then add the training sample to the training data
    UINT gestureLabel = 1;
    MatrixDouble trainingSample;
    
    //For now we will just add 10 x 20 random walk data timeseries
    Random random;
    for(UINT k=0; k<10; k++){//For the number of classes
        gestureLabel = k+1;
        
        //Get the init random walk position for this gesture
        VectorDouble startPos( trainingData.getNumDimensions() );
        for(UINT j=0; j<startPos.size(); j++){
            startPos[j] = random.getRandomNumberUniform(-1.0,1.0);
        }
                
        //Generate the 20 time series
        for(UINT x=0; x<20; x++){
            
            //Clear any previous timeseries
            trainingSample.clear();
            
            //Generate the random walk
            UINT randomWalkLength = random.getRandomNumberInt(90, 110);
            VectorDouble sample = startPos;
            for(UINT i=0; i<randomWalkLength; i++){
                for(UINT j=0; j<startPos.size(); j++){
                    sample[j] += random.getRandomNumberUniform(-0.1,0.1);
                }
                
                //Add the sample to the training sample
                trainingSample.push_back( sample );
            }
            
            //Add the training sample to the dataset
            trainingData.addSample( gestureLabel, trainingSample );
            
        }
    }
    
    //After recording your training data you can then save it to a file
    if( !trainingData.saveDatasetToFile( "TrainingData.txt" ) ){
	    cout << "Failed to save dataset to file!\n";
	    return EXIT_FAILURE;
	}
    
    //This can then be loaded later
    if( !trainingData.loadDatasetFromFile( "TrainingData.txt" ) ){
		cout << "Failed to load dataset from file!\n";
		return EXIT_FAILURE;
	}
    
    //This is how you can get some stats from the training data
    string datasetName = trainingData.getDatasetName();
    string infoText = trainingData.getInfoText();
    UINT numSamples = trainingData.getNumSamples();
    UINT numDimensions = trainingData.getNumDimensions();
    UINT numClasses = trainingData.getNumClasses();
    
    cout << "Dataset Name: " << datasetName << endl;
    cout << "InfoText: " << infoText << endl;
    cout << "NumberOfSamples: " << numSamples << endl;
    cout << "NumberOfDimensions: " << numDimensions << endl;
    cout << "NumberOfClasses: " << numClasses << endl;
    
    //You can also get the minimum and maximum ranges of the data
    vector< MinMax > ranges = trainingData.getRanges();
    
    cout << "The ranges of the dataset are: \n";
    for(UINT j=0; j<ranges.size(); j++){
        cout << "Dimension: " << j << " Min: " << ranges[j].minValue << " Max: " << ranges[j].maxValue << endl;
    }
    
    //If you want to partition the dataset into a training dataset and a test dataset then you can use the partition function
    //A value of 80 means that 80% of the original data will remain in the training dataset and 20% will be returned as the test dataset
    TimeSeriesClassificationData testData = trainingData.partition( 80 );
    
    //If you have multiple datasets that you want to merge together then use the merge function
    if( !trainingData.merge( testData ) ){
		cout << "Failed to merge datasets!\n";
		return EXIT_FAILURE;
	}
    
    //If you want to run K-Fold cross validation using the dataset then you should first spilt the dataset into K-Folds
    //A value of 10 splits the dataset into 10 folds and the true parameter signals that stratified sampling should be used
    if( !trainingData.spiltDataIntoKFolds( 10, true ) ){
		cout << "Failed to spiltDataIntoKFolds!\n";
//.........这里部分代码省略.........
开发者ID:GaoXiaojian,项目名称:grt,代码行数:101,代码来源:TimeSeriesClassificationDataExample.cpp


注:本文中的MatrixDouble类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。