当前位置: 首页>>代码示例>>C++>>正文


C++ VectorFloat::getSize方法代码示例

本文整理汇总了C++中VectorFloat::getSize方法的典型用法代码示例。如果您正苦于以下问题:C++ VectorFloat::getSize方法的具体用法?C++ VectorFloat::getSize怎么用?C++ VectorFloat::getSize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在VectorFloat的用法示例。


在下文中一共展示了VectorFloat::getSize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: filter

VectorFloat DoubleMovingAverageFilter::filter(const VectorFloat &x){
    
    //If the filter has not been initialised then return 0, otherwise filter x and return y
    if( !initialized ){
        errorLog << "filter(const VectorFloat &x) - The filter has not been initialized!" << std::endl;
        return VectorFloat();
    }
    
    if( x.getSize() != numInputDimensions ){
        errorLog << "filter(const VectorFloat &x) - The size of the input vector (" << x.getSize() << ") does not match that of the number of dimensions of the filter (" << numInputDimensions << ")!" << std::endl;
        return VectorFloat();
    }
    
    //Perform the first filter
    VectorFloat y = filter1.filter( x );
    
    if( y.size() == 0 ) return y;
    
    //Perform the second filter
    VectorFloat yy = filter2.filter( y );
    
    if( yy.size() == 0 ) return y;
    
    //Account for the filter lag
    const UINT N = y.getSize();
    for(UINT i=0; i<N; i++){
        yy[i] = y[i] + (y[i] - yy[i]); 
        processedData[i] = yy[i];
    }
    
    return yy;
}
开发者ID:sboettcher,项目名称:grt,代码行数:32,代码来源:DoubleMovingAverageFilter.cpp

示例2: predict_

bool Softmax::predict_(VectorFloat &inputVector){
    
    if( !trained ){
        errorLog << __GRT_LOG__ << " Model Not Trained!" << std::endl;
        return false;
    }
    
    predictedClassLabel = 0;
    maxLikelihood = -10000;
    
    if( !trained ) return false;
    
    if( inputVector.getSize() != numInputDimensions ){
        errorLog << __GRT_LOG__ << " The size of the input vector (" << inputVector.getSize() << ") does not match the num features in the model (" << numInputDimensions << std::endl;
        return false;
    }
    
    if( useScaling ){
        for(UINT n=0; n<numInputDimensions; n++){
            inputVector[n] = scale(inputVector[n], ranges[n].minValue, ranges[n].maxValue, 0, 1);
        }
    }
    
    if( classLikelihoods.size() != numClasses ) classLikelihoods.resize(numClasses,0);
    if( classDistances.size() != numClasses ) classDistances.resize(numClasses,0);
    
    //Loop over each class and compute the likelihood of the input data coming from class k. Pick the class with the highest likelihood
    Float sum = 0;
    Float bestEstimate = -grt_numeric_limits< Float >::max();
    UINT bestIndex = 0;
    for(UINT k=0; k<numClasses; k++){
        Float estimate = models[k].compute( inputVector );
        
        if( estimate > bestEstimate ){
            bestEstimate = estimate;
            bestIndex = k;
        }
        
        classDistances[k] = estimate;
        classLikelihoods[k] = estimate;
        sum += estimate;
    }
    
    if( sum > 1.0e-5 ){
        for(UINT k=0; k<numClasses; k++){
            classLikelihoods[k] /= sum;
        }
    }else{
        //If the sum is less than the value above then none of the models found a positive class
        maxLikelihood = bestEstimate;
        predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
        return true;
    }
    maxLikelihood = classLikelihoods[bestIndex];
    predictedClassLabel = classLabels[bestIndex];
    
    return true;
}
开发者ID:nickgillian,项目名称:grt,代码行数:58,代码来源:Softmax.cpp

示例3: addSample

bool ClassificationData::addSample(const UINT classLabel,const VectorFloat &sample){
    
	if( sample.getSize() != numDimensions ){
        if( totalNumSamples == 0 ){
            warningLog << "addSample(const UINT classLabel, VectorFloat &sample) - the size of the new sample (" << sample.getSize() << ") does not match the number of dimensions of the dataset (" << numDimensions << "), setting dimensionality to: " << numDimensions << std::endl;
            numDimensions = sample.getSize();
        }else{
            errorLog << "addSample(const UINT classLabel, VectorFloat &sample) - the size of the new sample (" << sample.getSize() << ") does not match the number of dimensions of the dataset (" << numDimensions << ")" << std::endl;
            return false;
        }
    }

    //The class label must be greater than zero (as zero is used for the null rejection class label
    if( classLabel == GRT_DEFAULT_NULL_CLASS_LABEL && !allowNullGestureClass ){
        errorLog << "addSample(const UINT classLabel, VectorFloat &sample) - the class label can not be 0!" << std::endl;
        return false;
    }

    //The dataset has changed so flag that any previous cross validation setup will now not work
    crossValidationSetup = false;
    crossValidationIndexs.clear();

	ClassificationSample newSample(classLabel,sample);
	data.push_back( newSample );
	totalNumSamples++;

	if( classTracker.getSize() == 0 ){
		ClassTracker tracker(classLabel,1);
		classTracker.push_back(tracker);
	}else{
		bool labelFound = false;
		for(UINT i=0; i<classTracker.getSize(); i++){
			if( classLabel == classTracker[i].classLabel ){
				classTracker[i].counter++;
				labelFound = true;
				break;
			}
		}
		if( !labelFound ){
			ClassTracker tracker(classLabel,1);
			classTracker.push_back(tracker);
		}
	}

    //Update the class labels
    sortClassLabels();

	return true;
}
开发者ID:sgrignard,项目名称:grt,代码行数:49,代码来源:ClassificationData.cpp

示例4: update

VectorFloat TimeseriesBuffer::update(const VectorFloat &x){
    
    if( !initialized ){
        errorLog << "update(const VectorFloat &x) - Not Initialized!" << std::endl;
        return VectorFloat();
    }
    
    if( x.getSize() != numInputDimensions ){
        errorLog << "update(const VectorFloat &x)- The Number Of Input Dimensions (" << numInputDimensions << ") does not match the size of the input vector (" << x.getSize() << ")!" << std::endl;
        return VectorFloat();
    }
    
    //Add the new data to the buffer
    dataBuffer.push_back( x );
    
    //Search the buffer for the zero crossing features
    UINT colIndex = 0;
    for(UINT j=0; j<numInputDimensions; j++){
        for(UINT i=0; i<dataBuffer.getSize(); i++){
            featureVector[ colIndex++ ] = dataBuffer[i][j];
        }
    }
    
    //Flag that the feature data has been computed
    if( dataBuffer.getBufferFilled() ){
        featureDataReady = true;
    }else featureDataReady = false;
    
    return featureVector;
}
开发者ID:nickgillian,项目名称:grt,代码行数:30,代码来源:TimeseriesBuffer.cpp

示例5: quantize

UINT RBMQuantizer::quantize(const VectorFloat &inputVector){
    
    if( !trained ){
        errorLog << "quantize(const VectorFloat &inputVector) - The quantizer model has not been trained!" << std::endl;
        return 0;
    }
    
    if( inputVector.getSize() != numInputDimensions ){
        errorLog << "quantize(const VectorFloat &inputVector) - The size of the inputVector (" << inputVector.getSize() << ") does not match that of the filter (" << numInputDimensions << ")!" << std::endl;
        return 0;
    }
    
    if( !rbm.predict( inputVector ) ){
        errorLog << "quantize(const VectorFloat &inputVector) - Failed to quantize input!" << std::endl;
        return 0;
    }
    
    quantizationDistances = rbm.getOutputData();
    
    //Search for the neuron with the maximum output
    UINT quantizedValue = 0;
    Float maxValue = 0;
    for(UINT k=0; k<numClusters; k++){
        if( quantizationDistances[k] > maxValue ){
            maxValue = quantizationDistances[k];
            quantizedValue = k;
        }
    }
    
    featureVector[0] = quantizedValue;
    featureDataReady = true;
    
    return quantizedValue;
}
开发者ID:sgrignard,项目名称:grt,代码行数:34,代码来源:RBMQuantizer.cpp

示例6: predict_

bool MultidimensionalRegression::predict_(VectorFloat &inputVector){
    
    if( !trained ){
        errorLog << "predict_(VectorFloat &inputVector) - Model Not Trained!" << std::endl;
        return false;
    }
    
    if( !trained ) return false;
    
	if( inputVector.getSize() != numInputDimensions ){
        errorLog << "predict_(VectorFloat &inputVector) - The size of the input Vector (" << inputVector.getSize() << ") does not match the num features in the model (" << numInputDimensions << std::endl;
		return false;
	}
    
    if( useScaling ){
        for(UINT n=0; n<numInputDimensions; n++){
            inputVector[n] = grt_scale(inputVector[n], inputVectorRanges[n].minValue, inputVectorRanges[n].maxValue, 0.0, 1.0);
        }
    }
    
    for(UINT n=0; n<numOutputDimensions; n++){
        if( !regressionModules[ n ]->predict( inputVector ) ){
            errorLog << "predict_(VectorFloat &inputVector) - Failed to predict for regression module " << n << std::endl;
        }
        regressionData[ n ] = regressionModules[ n ]->getRegressionData()[0];
    }
    
    if( useScaling ){
        for(UINT n=0; n<numOutputDimensions; n++){
            regressionData[n] = grt_scale(regressionData[n], 0.0, 1.0, targetVectorRanges[n].minValue, targetVectorRanges[n].maxValue);
        }
    }
    
    return true;
}
开发者ID:BryanBo-Cao,项目名称:grt,代码行数:35,代码来源:MultidimensionalRegression.cpp

示例7: quantize

UINT KMeansQuantizer::quantize(const VectorFloat &inputVector){
	
    if( !trained ){
        errorLog << "computeFeatures(const VectorFloat &inputVector) - The quantizer has not been trained!" << std::endl;
        return 0;
    }

    if( inputVector.getSize() != numInputDimensions ){
        errorLog << "computeFeatures(const VectorFloat &inputVector) - The size of the inputVector (" << inputVector.getSize() << ") does not match that of the filter (" << numInputDimensions << ")!" << std::endl;
        return 0;
    }

	//Find the minimum cluster
    Float minDist = grt_numeric_limits< Float >::max();
    UINT quantizedValue = 0;
    
    for(UINT k=0; k<numClusters; k++){
        //Compute the squared Euclidean distance
        quantizationDistances[k] = 0;
        for(UINT i=0; i<numInputDimensions; i++){
            quantizationDistances[k] += grt_sqr( inputVector[i]-clusters[k][i] );
        }
        if( quantizationDistances[k] < minDist ){
            minDist = quantizationDistances[k];
            quantizedValue = k;
        }
    }
    
    featureVector[0] = quantizedValue;
    featureDataReady = true;
	
	return quantizedValue;
}
开发者ID:pscholl,项目名称:grt,代码行数:33,代码来源:KMeansQuantizer.cpp

示例8: predict_

bool KMeans::predict_(VectorFloat &inputVector){
    
    if( !trained ){
        return false;
	}
	
	if( inputVector.getSize() != numInputDimensions ){
		return false;
	}
    
    if( useScaling ){
        for(UINT n=0; n<numInputDimensions; n++){
            inputVector[n] = grt_scale(inputVector[n], ranges[n].minValue, ranges[n].maxValue, 0.0, 1.0);
        }
    }
	
    const Float sigma = 1.0;
    const Float gamma = 1.0 / (2.0*grt_sqr(sigma));
    Float sum = 0;
    Float dist = 0;
	UINT minIndex = 0;
	bestDistance = grt_numeric_limits< Float >::max();
	predictedClusterLabel = 0;
	maxLikelihood = 0;
	if( clusterLikelihoods.getSize() != numClusters )
        clusterLikelihoods.resize( numClusters );
    if( clusterDistances.getSize() != numClusters )
        clusterDistances.resize( numClusters );
	
	for(UINT i=0; i<numClusters; i++){
		
        //We don't need to compute the sqrt as it works without it and is faster
		dist = 0;
		for(UINT j=0; j<numInputDimensions; j++){
			dist += grt_sqr( inputVector[j]-clusters[i][j] );
		}
    
        clusterDistances[i] = dist;
        clusterLikelihoods[i] = exp( - grt_sqr(gamma * dist) ); //1.0/(1.0+dist); //This will give us a value close to 1 for a dist of 0, and a value closer to 0 when the dist is large
        
		sum += clusterLikelihoods[i];
        
		if( dist < bestDistance ){
			bestDistance = dist;
			minIndex = i;
		}
	}
	
	//Normalize the likelihood
	for(UINT i=0; i<numClusters; i++){
		clusterLikelihoods[i] /= sum;
	}
	
	predictedClusterLabel = clusterLabels[ minIndex ];
	maxLikelihood = clusterLikelihoods[ minIndex ];
    
    return true;
}
开发者ID:BryanBo-Cao,项目名称:grt,代码行数:58,代码来源:KMeans.cpp

示例9: a

// Tests the VectorFloat type
TEST(DynamicType, VectorFloatTest) {
  DynamicType type;
  VectorFloat a(3);
  a[0] = 1.1; a[1] = 1.2; a[2] = 1.3;
  EXPECT_TRUE( type.set( a ) );
  VectorFloat b = type.get< VectorFloat >();
  EXPECT_EQ( a.getSize(), b.getSize() );
  for(unsigned int i=0; i<a.getSize(); i++){
    EXPECT_EQ( a[i], b[i] );
  }
}
开发者ID:sgrignard,项目名称:grt,代码行数:12,代码来源:DynamicTypeTest.cpp

示例10: predict_

bool GaussianMixtureModels::predict_(VectorFloat &x){
    
    if( !trained ){
        return false;
    }
    
    if( x.getSize() != numInputDimensions ){
        return false;
    }
    
    if( useScaling ){
        for(UINT n=0; n<numInputDimensions; n++){
            x[n] = grt_scale(x[n], ranges[n].minValue, ranges[n].maxValue, 0.0, 1.0);
        }
    }
    
    Float sum = 0;
    Float dist = 0;
    UINT minIndex = 0;
    bestDistance = 0;
    predictedClusterLabel = 0;
    maxLikelihood = 0;
    if( clusterLikelihoods.size() != numClusters )
        clusterLikelihoods.resize( numClusters );
    if( clusterDistances.size() != numClusters )
        clusterDistances.resize( numClusters );
    
    for(UINT i=0; i<numClusters; i++){
        
        dist = gauss(x,i,det,mu,invSigma);
        
        clusterDistances[i] = dist;
        clusterLikelihoods[i] = dist;
        
        sum += clusterLikelihoods[i];
        
        if( dist > bestDistance ){
            bestDistance = dist;
            minIndex = i;
        }
    }
    
    //Normalize the likelihood
    for(UINT i=0; i<numClusters; i++){
        clusterLikelihoods[i] /= sum;
    }
    
    predictedClusterLabel = clusterLabels[ minIndex ];
    maxLikelihood = clusterLikelihoods[ minIndex ];
    
    return true;
}
开发者ID:BryanBo-Cao,项目名称:grt,代码行数:52,代码来源:GaussianMixtureModels.cpp

示例11: update

VectorFloat ZeroCrossingCounter::update(const VectorFloat &x){
    
    if( !initialized ){
        errorLog << "update(const VectorFloat &x) - Not Initialized!" << std::endl;
        return VectorFloat();
    }
    
    if( x.getSize() != numInputDimensions ){
        errorLog << "update(const VectorFloat &x)- The Number Of Input Dimensions (" << numInputDimensions << ") does not match the size of the input vector (" << x.getSize() << ")!" << std::endl;
        return VectorFloat();
    }
    
    //Clear the feature vector
    std::fill(featureVector.begin(),featureVector.end(),0);
    
    //Update the derivative data and 
    derivative.computeDerivative( x );
    
    //Dead zone the derivative data
    deadZone.filter( derivative.getProcessedData() );
    
    //Add the deadzone data to the buffer
    dataBuffer.push_back( deadZone.getProcessedData() );
    
    //Search the buffer for the zero crossing features
    for(UINT j=0; j<numInputDimensions; j++){
        UINT colIndex = (featureMode == INDEPENDANT_FEATURE_MODE ? (TOTAL_NUM_ZERO_CROSSING_FEATURES*j) : 0);
        for(UINT i=1; i<dataBuffer.getSize(); i++){
            //Search for a zero crossing
            if( (dataBuffer[i][j] > 0 && dataBuffer[i-1][j] <= 0) || (dataBuffer[i][j] < 0 && dataBuffer[i-1][j] >= 0) ){
                //Update the zero crossing count
                featureVector[ NUM_ZERO_CROSSINGS_COUNTED + colIndex ]++;
                
                //Update the magnitude, search the last 5 values around the zero crossing to make sure we get the maxima of the peak
                Float maxValue = 0;
                UINT searchSize = i > 5 ? 5 : i;
                for(UINT n=0; n<searchSize; n++){
                    Float value = fabs( dataBuffer[ i-n ][j] );
                    if( value > maxValue ) maxValue = value;
                }
                featureVector[ ZERO_CROSSING_MAGNITUDE + colIndex ] += maxValue;
            }
        }
    }
    
    //Flag that the feature data has been computed
    featureDataReady = true;

    return featureVector;
}
开发者ID:BryanBo-Cao,项目名称:grt,代码行数:50,代码来源:ZeroCrossingCounter.cpp

示例12: predict_

bool ContinuousHiddenMarkovModel::predict_(VectorFloat &x){
    
    if( !trained ){
        errorLog << "predict_(VectorFloat &x) - The model is not trained!" << std::endl;
        return false;
    }
    
    if( x.getSize() != numInputDimensions ){
        errorLog << "predict_(VectorFloat &x) - The input vector size (" << x.getSize() << ") does not match the number of input dimensions (" << numInputDimensions << ")" << std::endl;
        return false;
    }
    
    //Add the new sample to the circular buffer
    observationSequence.push_back( x );
    
    //Convert the circular buffer to MatrixFloat
    for(unsigned int i=0; i<observationSequence.getSize(); i++){
        for(unsigned int j=0; j<numInputDimensions; j++){
            obsSequence[i][j] = observationSequence[i][j];
        }
    }
    
    return predict_( obsSequence );
}
开发者ID:sgrignard,项目名称:grt,代码行数:24,代码来源:ContinuousHiddenMarkovModel.cpp

示例13: process

bool Derivative::process(const VectorFloat &inputVector) {

    if( !initialized ) {
        errorLog << "process(const VectorFloat &inputVector) - Not initialized!" << std::endl;
        return false;
    }

    if( inputVector.getSize() != numInputDimensions ) {
        errorLog << "process(const VectorFloat &inputVector) - The size of the inputVector (" << inputVector.size() << ") does not match that of the filter (" << numInputDimensions << ")!" << std::endl;
        return false;
    }

    computeDerivative( inputVector );

    if( processedData.size() == numOutputDimensions ) return true;
    return false;
}
开发者ID:codeflakes0,项目名称:grt,代码行数:17,代码来源:Derivative.cpp

示例14: process

bool MovingAverageFilter::process(const VectorFloat &inputVector){
    
    if( !initialized ){
        errorLog << "process(const VectorFloat &inputVector) - The filter has not been initialized!" << std::endl;
        return false;
    }

    if( inputVector.getSize() != numInputDimensions ){
        errorLog << "process(const VectorFloat &inputVector) - The size of the inputVector (" << inputVector.getSize() << ") does not match that of the filter (" << numInputDimensions << ")!" << std::endl;
        return false;
    }
    
    filter( inputVector );
    
    if( processedData.getSize() == numOutputDimensions ) return true;

    return false;
}
开发者ID:CV-IP,项目名称:grt,代码行数:18,代码来源:MovingAverageFilter.cpp

示例15: main

int main (int argc, const char * argv[])
{
    //Create a new instance of an FFT with a window size of 256 and a hop size of 1
    FFT fft(256,1);
    
    //Create some varaibles to help generate the signal data
    const UINT numSeconds = 10;                         //The number of seconds of data we want to generate
    double t = 0;                                       //This keeps track of the time
    double tStep = 1.0/1000.0;                          //This is how much the time will be updated at each iteration in the for loop
    double freq = 100;                                  //Stores the frequency
    
    //Generate the signal and filter the data
    for(UINT i=0; i<numSeconds*1000; i++){
        
        //Generate the signal
        double signal = sin( t * TWO_PI*freq );
        
        //Compute the FFT of the input signal (and the previous buffer data)
        fft.update( signal );
        
        //Update the t
        t += tStep;
    }
    
    //Take the output of the last FFT and save the values to a file
    Vector<FastFourierTransform> fftResults = fft.getFFTResults();
    
    //The input signal is a 1 dimensional signal, so get the magnitude data for dimension 1 (which is at element 0)
    VectorFloat magnitudeData = fftResults[0].getMagnitudeData();
    
    //Write the magnitude data to a file
    cout << "Magnitude Data:\n";
    for(UINT i=0; i<magnitudeData.getSize(); i++){
        cout << magnitudeData[i] << endl;
    }
    
    return EXIT_SUCCESS;
    
}
开发者ID:BryanBo-Cao,项目名称:grt,代码行数:39,代码来源:FFTExample.cpp


注:本文中的VectorFloat::getSize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。