本文整理汇总了C++中VectorFloat::size方法的典型用法代码示例。如果您正苦于以下问题:C++ VectorFloat::size方法的具体用法?C++ VectorFloat::size怎么用?C++ VectorFloat::size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VectorFloat
的用法示例。
在下文中一共展示了VectorFloat::size方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: multiple
VectorFloat MatrixFloat::multiple(const VectorFloat &b) const{
const unsigned int M = rows;
const unsigned int N = cols;
const unsigned int K = (unsigned int)b.size();
if( N != K ){
warningLog << "multiple(vector b) - The size of b (" << b.size() << ") does not match the number of columns in this matrix (" << N << ")" << std::endl;
return VectorFloat();
}
VectorFloat c(M);
const Float *pb = &b[0];
Float *pc = &c[0];
unsigned int i,j = 0;
for(i=0; i<rows; i++){
pc[i] = 0;
for(j=0; j<cols; j++){
pc[i] += dataPtr[i*cols+j]*pb[j];
}
}
return c;
}
示例2: filter
VectorFloat DoubleMovingAverageFilter::filter(const VectorFloat &x){
//If the filter has not been initialised then return 0, otherwise filter x and return y
if( !initialized ){
errorLog << "filter(const VectorFloat &x) - The filter has not been initialized!" << std::endl;
return VectorFloat();
}
if( x.getSize() != numInputDimensions ){
errorLog << "filter(const VectorFloat &x) - The size of the input vector (" << x.getSize() << ") does not match that of the number of dimensions of the filter (" << numInputDimensions << ")!" << std::endl;
return VectorFloat();
}
//Perform the first filter
VectorFloat y = filter1.filter( x );
if( y.size() == 0 ) return y;
//Perform the second filter
VectorFloat yy = filter2.filter( y );
if( yy.size() == 0 ) return y;
//Account for the filter lag
const UINT N = y.getSize();
for(UINT i=0; i<N; i++){
yy[i] = y[i] + (y[i] - yy[i]);
processedData[i] = yy[i];
}
return yy;
}
示例3: predict_
bool BernoulliRBM::predict_(VectorFloat &inputData,VectorFloat &outputData){
if( !trained ){
errorLog << "predict_(VectorFloat &inputData,VectorFloat &outputData) - Failed to run prediction - the model has not been trained." << std::endl;
return false;
}
if( inputData.size() != numVisibleUnits ){
errorLog << "predict_(VectorFloat &inputData,VectorFloat &outputData) - Failed to run prediction - the input data size (" << inputData.size() << ")";
errorLog << " does not match the number of visible units (" << numVisibleUnits << "). " << std::endl;
return false;
}
if( outputData.size() != numHiddenUnits ){
outputData.resize( numHiddenUnits );
}
//Scale the data if needed
if( useScaling ){
for(UINT i=0; i<numVisibleUnits; i++){
inputData[i] = grt_scale(inputData[i],ranges[i].minValue,ranges[i].maxValue,0.0,1.0);
}
}
//Propagate the data up through the RBM
Float x = 0.0;
for(UINT i=0; i<numHiddenUnits; i++){
for(UINT j=0; j<numVisibleUnits; j++) {
x += weightsMatrix[i][j] * inputData[j];
}
outputData[i] = grt_sigmoid( x + hiddenLayerBias[i] );
}
return true;
}
示例4: update
bool FFT::update(const VectorFloat &x){
if( !initialized ){
errorLog << "update(const VectorFloat &x) - Not initialized!" << std::endl;
return false;
}
if( x.size() != numInputDimensions ){
errorLog << "update(const VectorFloat &x) - The size of the input (" << x.size() << ") does not match that of the FeatureExtraction (" << numInputDimensions << ")!" << std::endl;
return false;
}
//Add the current input to the data buffers
dataBuffer.push_back( x );
featureDataReady = false;
if( ++hopCounter == hopSize ){
hopCounter = 0;
//Compute the FFT for each dimension
for(UINT j=0; j<numInputDimensions; j++){
//Copy the input data for this dimension into the temp buffer
for(UINT i=0; i<dataBufferSize; i++){
tempBuffer[i] = dataBuffer[i][j];
}
//Compute the FFT
if( !fft[j].computeFFT( tempBuffer ) ){
errorLog << "update(const VectorFloat &x) - Failed to compute FFT!" << std::endl;
return false;
}
}
//Flag that the fft was computed during this update
featureDataReady = true;
//Copy the FFT data to the feature vector
UINT index = 0;
for(UINT j=0; j<numInputDimensions; j++){
if( computeMagnitude ){
Float *mag = fft[j].getMagnitudeDataPtr();
for(UINT i=0; i<fft[j].getFFTSize()/2; i++){
featureVector[index++] = *mag++;
}
}
if( computePhase ){
Float *phase = fft[j].getPhaseDataPtr();
for(UINT i=0; i<fft[j].getFFTSize()/2; i++){
featureVector[index++] = *phase++;
}
}
}
}
return true;
}
示例5: main
int main (int argc, const char * argv[])
{
//Load the example data
ClassificationData data;
if( !data.load("WiiAccShakeData.grt") ){
cout << "ERROR: Failed to load data from file!\n";
return EXIT_FAILURE;
}
//The variables used to initialize the MovementIndex feature extraction
UINT windowSize = 10;
UINT numDimensions = data.getNumDimensions();
//Create a new instance of the MovementIndex feature extraction
MovementIndex movementIndex(windowSize,numDimensions);
//Loop over the accelerometer data, at each time sample (i) compute the features using the new sample and then write the results to a file
for(UINT i=0; i<data.getNumSamples(); i++){
//Compute the features using this new sample
movementIndex.computeFeatures( data[i].getSample() );
//Write the data
cout << "InputVector: ";
for(UINT j=0; j<data.getNumDimensions(); j++){
cout << data[i].getSample()[j] << "\t";
}
//Get the latest feature vector
VectorFloat featureVector = movementIndex.getFeatureVector();
//Write the features
cout << "FeatureVector: ";
for(UINT j=0; j<featureVector.size(); j++){
cout << featureVector[j];
if( j != featureVector.size()-1 ) cout << "\t";
}
cout << endl;
}
//Save the MovementIndex settings to a file
movementIndex.save("MovementIndexSettings.grt");
//You can then load the settings again if you need them
movementIndex.load("MovementIndexSettings.grt");
return EXIT_SUCCESS;
}
示例6: predict_
bool LinearRegression::predict_(VectorFloat &inputVector){
if( !trained ){
errorLog << "predict_(VectorFloat &inputVector) - Model Not Trained!" << std::endl;
return false;
}
if( !trained ) return false;
if( inputVector.size() != numInputDimensions ){
errorLog << "predict_(VectorFloat &inputVector) - The size of the input Vector (" << int( inputVector.size() ) << ") does not match the num features in the model (" << numInputDimensions << std::endl;
return false;
}
if( useScaling ){
for(UINT n=0; n<numInputDimensions; n++){
inputVector[n] = scale(inputVector[n], inputVectorRanges[n].minValue, inputVectorRanges[n].maxValue, 0, 1);
}
}
regressionData[0] = w0;
for(UINT j=0; j<numInputDimensions; j++){
regressionData[0] += inputVector[j] * w[j];
}
if( useScaling ){
for(UINT n=0; n<numOutputDimensions; n++){
regressionData[n] = scale(regressionData[n], 0, 1, targetVectorRanges[n].minValue, targetVectorRanges[n].maxValue);
}
}
return true;
}
示例7: predict_
bool RegressionTree::predict_(VectorFloat &inputVector){
if( !trained ){
Regressifier::errorLog << "predict_(VectorFloat &inputVector) - Model Not Trained!" << std::endl;
return false;
}
if( tree == NULL ){
Regressifier::errorLog << "predict_(VectorFloat &inputVector) - Tree pointer is null!" << std::endl;
return false;
}
if( inputVector.size() != numInputDimensions ){
Regressifier::errorLog << "predict_(VectorFloat &inputVector) - The size of the input Vector (" << inputVector.size() << ") does not match the num features in the model (" << numInputDimensions << std::endl;
return false;
}
if( useScaling ){
for(UINT n=0; n<numInputDimensions; n++){
inputVector[n] = scale(inputVector[n], inputVectorRanges[n].minValue, inputVectorRanges[n].maxValue, 0, 1);
}
}
if( !tree->predict( inputVector, regressionData ) ){
Regressifier::errorLog << "predict_(VectorFloat &inputVector) - Failed to predict!" << std::endl;
return false;
}
return true;
}
示例8: filter
VectorFloat MovingAverageFilter::filter(const VectorFloat &x){
//If the filter has not been initialised then return 0, otherwise filter x and return y
if( !initialized ){
errorLog << "filter(const VectorFloat &x) - The filter has not been initialized!" << std::endl;
return VectorFloat();
}
if( x.size() != numInputDimensions ){
errorLog << "filter(const VectorFloat &x) - The size of the input vector (" << x.getSize() << ") does not match that of the number of dimensions of the filter (" << numInputDimensions << ")!" << std::endl;
return VectorFloat();
}
if( ++inputSampleCounter > filterSize ) inputSampleCounter = filterSize;
//Add the new value to the buffer
dataBuffer.push_back( x );
for(unsigned int j=0; j<numInputDimensions; j++){
processedData[j] = 0;
for(unsigned int i=0; i<inputSampleCounter; i++) {
processedData[j] += dataBuffer[i][j];
}
processedData[j] /= Float(inputSampleCounter);
}
return processedData;
}
示例9: computeDerivative
VectorFloat Derivative::computeDerivative(const VectorFloat &x) {
if( !initialized ) {
errorLog << "computeDerivative(const VectorFloat &x) - Not Initialized!" << std::endl;
return VectorFloat();
}
if( x.size() != numInputDimensions ) {
errorLog << "computeDerivative(const VectorFloat &x) - The Number Of Input Dimensions (" << numInputDimensions << ") does not match the size of the input vector (" << x.size() << ")!" << std::endl;
return VectorFloat();
}
VectorFloat y;
if( filterData ) {
y = filter.filter( x );
} else y = x;
for(UINT n=0; n<numInputDimensions; n++) {
processedData[n] = (y[n]-yy[n])/delta;
yy[n] = y[n];
}
if( derivativeOrder == SECOND_DERIVATIVE ) {
Float tmp = 0;
for(UINT n=0; n<numInputDimensions; n++) {
tmp = processedData[n];
processedData[n] = (processedData[n]-yyy[n])/delta;
yyy[n] = tmp;
}
}
return processedData;
}
示例10: setWeights
bool BAG::setWeights(const VectorFloat &weights){
if( this->weights.size() != weights.size() ){
return false;
}
this->weights = weights;
return true;
}
示例11: predict_
bool Softmax::predict_(VectorFloat &inputVector){
if( !trained ){
errorLog << "predict_(VectorFloat &inputVector) - Model Not Trained!" << std::endl;
return false;
}
predictedClassLabel = 0;
maxLikelihood = -10000;
if( !trained ) return false;
if( inputVector.size() != numInputDimensions ){
errorLog << "predict_(VectorFloat &inputVector) - The size of the input vector (" << inputVector.size() << ") does not match the num features in the model (" << numInputDimensions << std::endl;
return false;
}
if( useScaling ){
for(UINT n=0; n<numInputDimensions; n++){
inputVector[n] = scale(inputVector[n], ranges[n].minValue, ranges[n].maxValue, 0, 1);
}
}
if( classLikelihoods.size() != numClasses ) classLikelihoods.resize(numClasses,0);
if( classDistances.size() != numClasses ) classDistances.resize(numClasses,0);
//Loop over each class and compute the likelihood of the input data coming from class k. Pick the class with the highest likelihood
Float sum = 0;
Float bestEstimate = -grt_numeric_limits< Float >::max();
UINT bestIndex = 0;
for(UINT k=0; k<numClasses; k++){
Float estimate = models[k].compute( inputVector );
if( estimate > bestEstimate ){
bestEstimate = estimate;
bestIndex = k;
}
classDistances[k] = estimate;
classLikelihoods[k] = estimate;
sum += estimate;
}
if( sum > 1.0e-5 ){
for(UINT k=0; k<numClasses; k++){
classLikelihoods[k] /= sum;
}
}else{
//If the sum is less than the value above then none of the models found a positive class
maxLikelihood = bestEstimate;
predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
return true;
}
maxLikelihood = classLikelihoods[bestIndex];
predictedClassLabel = classLabels[bestIndex];
return true;
}
示例12: predict_
bool MinDist::predict_(VectorFloat &inputVector){
predictedClassLabel = 0;
maxLikelihood = 0;
if( !trained ){
errorLog << "predict_(VectorFloat &inputVector) - MinDist Model Not Trained!" << std::endl;
return false;
}
if( inputVector.size() != numInputDimensions ){
errorLog << "predict_(VectorFloat &inputVector) - The size of the input vector (" << inputVector.size() << ") does not match the num features in the model (" << numInputDimensions << std::endl;
return false;
}
if( useScaling ){
for(UINT n=0; n<numInputDimensions; n++){
inputVector[n] = grt_scale(inputVector[n], ranges[n].minValue, ranges[n].maxValue, 0.0, 1.0);
}
}
if( classLikelihoods.size() != numClasses ) classLikelihoods.resize(numClasses,0);
if( classDistances.size() != numClasses ) classDistances.resize(numClasses,0);
Float sum = 0;
Float minDist = grt_numeric_limits< Float >::max();
for(UINT k=0; k<numClasses; k++){
//Compute the distance for class k
classDistances[k] = models[k].predict( inputVector );
//Keep track of the best value
if( classDistances[k] < minDist ){
minDist = classDistances[k];
predictedClassLabel = k;
}
//Set the class likelihoods as 1.0 / dist[k], the small number is to stop divide by zero
classLikelihoods[k] = 1.0 / (classDistances[k] + 0.0001);
sum += classLikelihoods[k];
}
//Normalize the classlikelihoods
if( sum != 0 ){
for(UINT k=0; k<numClasses; k++){
classLikelihoods[k] /= sum;
}
maxLikelihood = classLikelihoods[predictedClassLabel];
}else maxLikelihood = classLikelihoods[predictedClassLabel];
if( useNullRejection ){
//Check to see if the best result is greater than the models threshold
if( minDist <= models[predictedClassLabel].getRejectionThreshold() ) predictedClassLabel = models[predictedClassLabel].getClassLabel();
else predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
}else predictedClassLabel = models[predictedClassLabel].getClassLabel();
return true;
}
示例13: predict_
bool MovementDetector::predict_( VectorFloat &input ){
movementDetected = false;
noMovementDetected = false;
if( !trained ){
errorLog << "predict_(VectorFloat &input) - AdaBoost Model Not Trained!" << std::endl;
return false;
}
if( input.size() != numInputDimensions ){
errorLog << "predict_(VectorFloat &input) - The size of the input vector (" << input.size() << ") does not match the num features in the model (" << numInputDimensions << std::endl;
return false;
}
//Compute the movement index, unless we are in the first sample
Float x = 0;
if( !firstSample ){
for(UINT n=0; n<numInputDimensions; n++){
x += SQR( input[n] - lastSample[n] );
}
movementIndex = (movementIndex*gamma) + sqrt( x );
}
//Flag that this is not the first sample and store the input for the next prediction
firstSample = false;
lastSample = input;
switch( state ){
case SEARCHING_FOR_MOVEMENT:
if( movementIndex >= upperThreshold ){
movementDetected = true;
state = SEARCHING_FOR_NO_MOVEMENT;
}
break;
case SEARCHING_FOR_NO_MOVEMENT:
if( movementIndex < lowerThreshold ){
noMovementDetected = true;
state = SEARCH_TIMEOUT;
searchTimer.start();
}
break;
case SEARCH_TIMEOUT:
// searchTimeout is cast because of a C4018 warning on visual (signed/unsigned incompatibility)
if( searchTimer.getMilliSeconds() >= (signed long)searchTimeout ){
state = SEARCH_TIMEOUT;
searchTimer.stop();
}
break;
}
return true;
}
示例14: filter
Float SavitzkyGolayFilter::filter(const Float x){
//If the filter has not been initialised then return 0, otherwise filter x and return y
if( !initialized ){
errorLog << "filter(Float x) - The filter has not been initialized!" << std::endl;
return 0;
}
VectorFloat y = filter(VectorFloat(1,x));
if( y.size() > 0 ) return y[0];
return 0;
}
示例15: computeFeatures
bool TimeseriesBuffer::computeFeatures(const VectorFloat &inputVector){
if( !initialized ){
errorLog << "computeFeatures(const VectorFloat &inputVector) - Not initialized!" << std::endl;
return false;
}
if( inputVector.size() != numInputDimensions ){
errorLog << "computeFeatures(const VectorFloat &inputVector) - The size of the inputVector (" << inputVector.size() << ") does not match that of the filter (" << numInputDimensions << ")!" << std::endl;
return false;
}
update( inputVector );
return true;
}