本文整理汇总了C++中MatrixDouble::getNumCols方法的典型用法代码示例。如果您正苦于以下问题:C++ MatrixDouble::getNumCols方法的具体用法?C++ MatrixDouble::getNumCols怎么用?C++ MatrixDouble::getNumCols使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MatrixDouble
的用法示例。
在下文中一共展示了MatrixDouble::getNumCols方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: subtract
bool MatrixDouble::subtract(const MatrixDouble &b){
if( b.getNumRows() != rows ){
errorLog << "subtract(const MatrixDouble &b) - Failed to add matrix! The rows do not match!" << endl;
errorLog << " rows: " << rows << " b rows: " << b.getNumRows() << endl;
return false;
}
if( b.getNumCols() != cols ){
errorLog << "subtract(const MatrixDouble &b) - Failed to add matrix! The rows do not match!" << endl;
errorLog << " cols: " << cols << " b cols: " << b.getNumCols() << endl;
return false;
}
unsigned int i,j;
//Using direct pointers really helps speed up the computation time
double **pb = b.getDataPointer();
for(i=0; i<rows; i++){
for(j=0; j<cols; j++){
dataPtr[i*cols+j] -= pb[i][j];
}
}
return true;
}
示例2: znormData
void DTW::znormData(MatrixDouble &data,MatrixDouble &normData){
const UINT R = data.getNumRows();
const UINT C = data.getNumCols();
if( normData.getNumRows() != R || normData.getNumCols() != C ){
normData.resize(R,C);
}
for(UINT j=0; j<C; j++){
double mean = 0.0;
double stdDev = 0.0;
//Calculate Mean
for(UINT i=0; i<R; i++) mean += data[i][j];
mean /= double(R);
//Calculate Std Dev
for(UINT i=0; i<R; i++)
stdDev += SQR(data[i][j]-mean);
stdDev = sqrt( stdDev / (R - 1.0) );
if(constrainZNorm && stdDev < 0.01){
//Normalize the data to 0 mean
for(UINT i=0; i<R; i++)
normData[i][j] = (data[i][j] - mean);
}else{
//Normalize the data to 0 mean and standard deviation of 1
for(UINT i=0; i<R; i++)
normData[i][j] = (data[i][j] - mean) / stdDev;
}
}
}
示例3: predict_
bool BernoulliRBM::predict_(MatrixDouble &inputData,MatrixDouble &outputData,const UINT rowIndex){
if( !trained ){
errorLog << "predict_(MatrixDouble &inputData,MatrixDouble &outputData,const UINT rowIndex) - Failed to run prediction - the model has not been trained." << endl;
return false;
}
if( inputData.getNumCols() != numVisibleUnits ){
errorLog << "predict_(MatrixDouble &inputData,MatrixDouble &outputData,const UINT rowIndex) -";
errorLog << " Failed to run prediction - the number of columns in the input matrix (" << inputData.getNumCols() << ")";
errorLog << " does not match the number of visible units (" << numVisibleUnits << ")." << endl;
return false;
}
if( outputData.getNumCols() != numHiddenUnits ){
errorLog << "predict_(MatrixDouble &inputData,MatrixDouble &outputData,const UINT rowIndex) -";
errorLog << " Failed to run prediction - the number of columns in the output matrix (" << outputData.getNumCols() << ")";
errorLog << " does not match the number of hidden units (" << numHiddenUnits << ")." << endl;
return false;
}
//Propagate the data up through the RBM
double x = 0.0;
for(UINT j=0; j<numHiddenUnits; j++){
x = 0;
for(UINT i=0; i<numVisibleUnits; i++) {
x += weightsMatrix[j][i] * inputData[rowIndex][i];
}
outputData[rowIndex][j] = sigmoid( x + hiddenLayerBias[j] ); //This gives P( h_j = 1 | input )
}
return true;
}
示例4: multiple
bool MatrixDouble::multiple(const MatrixDouble &a,const MatrixDouble &b,const bool aTranspose){
const unsigned int M = !aTranspose ? a.getNumRows() : a.getNumCols();
const unsigned int N = !aTranspose ? a.getNumCols() : a.getNumRows();
const unsigned int K = b.getNumRows();
const unsigned int L = b.getNumCols();
if( N != K ) {
errorLog << "multiple(const MatrixDouble &a,const MatrixDouble &b,const bool aTranspose) - The number of rows in a (" << K << ") does not match the number of columns in matrix b (" << N << ")" << std::endl;
return false;
}
if( !resize( M, L ) ){
errorLog << "multiple(const MatrixDouble &b,const MatrixDouble &c,const bool bTranspose) - Failed to resize matrix!" << endl;
return false;
}
unsigned int i, j, k = 0;
//Using direct pointers really helps speed up the computation time
double **pa = a.getDataPointer();
double **pb = b.getDataPointer();
if( aTranspose ){
for(j=0; j<L; j++){
for(i=0; i<M; i++){
dataPtr[i*cols+j] = 0;
for(k=0; k<K; k++){
dataPtr[i*cols+j] += pa[k][i] * pb[k][j];
}
}
}
}else{
for(j=0; j<L; j++){
for(i=0; i<M; i++){
dataPtr[i*cols+j] = 0;
for(k=0; k<K; k++){
dataPtr[i*cols+j] += pa[i][k] * pb[k][j];
}
}
}
}
return true;
}
示例5: scaleData
void DTW::scaleData(MatrixDouble &data,MatrixDouble &scaledData){
const UINT R = data.getNumRows();
const UINT C = data.getNumCols();
if( scaledData.getNumRows() != R || scaledData.getNumCols() != C ){
scaledData.resize(R, C);
}
//Scale the data using the min and max values
for(UINT i=0; i<R; i++)
for(UINT j=0; j<C; j++)
scaledData[i][j] = scale(data[i][j],rangesBuffer[j].minValue,rangesBuffer[j].maxValue,0.0,1.0);
}
示例6: setClusters
bool KMeans::setClusters(const MatrixDouble &clusters){
clear();
numClusters = clusters.getNumRows();
numInputDimensions = clusters.getNumCols();
this->clusters = clusters;
return true;
}
示例7:
//Init the model with a pre-trained a, b, and pi matrices
DiscreteHiddenMarkovModel::DiscreteHiddenMarkovModel(const MatrixDouble &a,const MatrixDouble &b,const VectorDouble &pi,const UINT modelType,const UINT delta){
numStates = 0;
numSymbols = 0;
numRandomTrainingIterations = 5;
maxNumEpochs = 100;
cThreshold = -1000;
logLikelihood = 0.0;
minChange = 1.0e-5;
debugLog.setProceedingText("[DEBUG DiscreteHiddenMarkovModel]");
errorLog.setProceedingText("[ERROR DiscreteHiddenMarkovModel]");
warningLog.setProceedingText("[WARNING DiscreteHiddenMarkovModel]");
trainingLog.setProceedingText("[TRAINING DiscreteHiddenMarkovModel]");
if( a.getNumRows() == a.getNumRows() && a.getNumRows() == b.getNumRows() && a.getNumRows() == pi.size() ){
this->a = a;
this->b = b;
this->pi = pi;
this->modelType = modelType;
this->delta = delta;
numStates = b.getNumRows();
numSymbols = b.getNumCols();
trained = true;
}else{
errorLog << "DiscreteHiddenMarkovModel(...) - The a,b,pi sizes are invalid!" << endl;
}
}
示例8: multiple
MatrixDouble MatrixDouble::multiple(const MatrixDouble &b){
const unsigned int M = rows;
const unsigned int N = cols;
const unsigned int K = (unsigned int)b.getNumRows();
const unsigned int L = (unsigned int)b.getNumCols();
if( N != K ) {
warningLog << "multiple(MatrixDouble b) - The number of rows in b (" << b.getNumRows() << ") does not match the number of columns in this matrix (" << N << ")" << std::endl;
return MatrixDouble();
}
MatrixDouble c(M,L);
for(unsigned int i=0; i<M; i++){
for(unsigned int j=0; j<L; j++){
c[i][j] = 0;
for(unsigned int k=0; k<K; k++){
c[i][j] += dataPtr[i][k] * b[k][j];
}
}
}
return c;
}
示例9: train_
bool RBMQuantizer::train_(MatrixDouble &trainingData){
//Clear any previous model
clear();
if( trainingData.getNumRows() == 0 ){
errorLog << "train_(MatrixDouble &trainingData) - Failed to train quantizer, the training data is empty!" << endl;
return false;
}
//Train the RBM model
rbm.setNumHiddenUnits( numClusters );
rbm.setLearningRate( learningRate );
rbm.setMinNumEpochs( minNumEpochs );
rbm.setMaxNumEpochs( maxNumEpochs );
rbm.setMinChange( minChange );
if( !rbm.train_( trainingData ) ){
errorLog << "train_(MatrixDouble &trainingData) - Failed to train quantizer!" << endl;
return false;
}
//Flag that the feature vector is now initalized
initialized = true;
trained = true;
numInputDimensions = trainingData.getNumCols();
numOutputDimensions = 1; //This is always 1 for the quantizer
featureVector.resize(numOutputDimensions,0);
quantizationDistances.resize(numClusters,0);
return true;
}
示例10: train_
bool KMeansQuantizer::train_(MatrixDouble &trainingData){
//Clear any previous model
clear();
//Train the KMeans model
KMeans kmeans;
kmeans.setNumClusters(numClusters);
kmeans.setComputeTheta( true );
kmeans.setMinChange( minChange );
kmeans.setMinNumEpochs( minNumEpochs );
kmeans.setMaxNumEpochs( maxNumEpochs );
if( !kmeans.train_(trainingData) ){
errorLog << "train_(MatrixDouble &trainingData) - Failed to train quantizer!" << endl;
return false;
}
trained = true;
initialized = true;
numInputDimensions = trainingData.getNumCols();
numOutputDimensions = 1; //This is always 1 for the KMeansQuantizer
featureVector.resize(numOutputDimensions,0);
clusters = kmeans.getClusters();
quantizationDistances.resize(numClusters,0);
return true;
}
示例11: saveResults
bool saveResults( const GestureRecognitionPipeline &pipeline, const string &filename ){
infoLog << "Saving results to file: " << filename << endl;
fstream file( filename.c_str(), fstream::out );
if( !file.is_open() ){
errorLog << "Failed to open results file: " << filename << endl;
return false;
}
file << pipeline.getTestAccuracy() << endl;
vector< UINT > classLabels = pipeline.getClassLabels();
for(UINT k=0; k<pipeline.getNumClassesInModel(); k++){
file << pipeline.getTestPrecision( classLabels[k] );
if( k+1 < pipeline.getNumClassesInModel() ) file << "\t";
else file << endl;
}
for(UINT k=0; k<pipeline.getNumClassesInModel(); k++){
file << pipeline.getTestRecall( classLabels[k] );
if( k+1 < pipeline.getNumClassesInModel() ) file << "\t";
else file << endl;
}
for(UINT k=0; k<pipeline.getNumClassesInModel(); k++){
file << pipeline.getTestFMeasure( classLabels[k] );
if( k+1 < pipeline.getNumClassesInModel() ) file << "\t";
else file << endl;
}
MatrixDouble confusionMatrix = pipeline.getTestConfusionMatrix();
for(UINT i=0; i<confusionMatrix.getNumRows(); i++){
for(UINT j=0; j<confusionMatrix.getNumCols(); j++){
file << confusionMatrix[i][j];
if( j+1 < confusionMatrix.getNumCols() ) file << "\t";
}file << endl;
}
file.close();
infoLog << "Results saved." << endl;
return true;
}
示例12: offsetTimeseries
void DTW::offsetTimeseries(MatrixDouble ×eries){
VectorDouble firstRow = timeseries.getRowVector(0);
for(UINT i=0; i<timeseries.getNumRows(); i++){
for(UINT j=0; j<timeseries.getNumCols(); j++){
timeseries[i][j] -= firstRow[j];
}
}
}
示例13: main
int main (int argc, const char * argv[])
{
//Create some input data for the PCA algorithm - this data comes from the Matlab PCA example
MatrixDouble data(13,4);
data[0][0] = 7; data[0][1] = 26; data[0][2] = 6; data[0][3] = 60;
data[1][0] = 1; data[1][1] = 29; data[1][2] = 15; data[1][3] = 52;
data[2][0] = 11; data[2][1] = 56; data[2][2] = 8; data[2][3] = 20;
data[3][0] = 11; data[3][1] = 31; data[3][2] = 8; data[3][3] = 47;
data[4][0] = 7; data[4][1] = 52; data[4][2] = 6; data[4][3] = 33;
data[5][0] = 11; data[5][1] = 55; data[5][2] = 9; data[5][3] = 22;
data[6][0] = 3; data[6][1] = 71; data[6][2] = 17; data[6][3] = 6;
data[7][0] = 1; data[7][1] = 31; data[7][2] = 22; data[7][3] = 44;
data[8][0] = 2; data[8][1] = 54; data[8][2] = 18; data[8][3] = 22;
data[9][0] = 21; data[9][1] = 47; data[9][2] = 4; data[9][3] = 26;
data[10][0] = 1; data[10][1] = 40; data[10][2] = 23; data[10][3] = 34;
data[11][0] = 11; data[11][1] = 66; data[11][2] = 9; data[11][3] = 12;
data[12][0] = 10; data[12][1] = 68; data[12][2] = 8; data[12][3] = 12;
//Print the input data
data.print("Input Data:");
//Create a new principal component analysis instance
PrincipalComponentAnalysis pca;
//Run pca on the input data, setting the maximum variance value to 95% of the variance
if( !pca.computeFeatureVector( data, 0.95 ) ){
cout << "ERROR: Failed to compute feature vector!\n";
return EXIT_FAILURE;
}
//Get the number of principal components
UINT numPrincipalComponents = pca.getNumPrincipalComponents();
cout << "Number of Principal Components: " << numPrincipalComponents << endl;
//Project the original data onto the principal subspace
MatrixDouble prjData;
if( !pca.project( data, prjData ) ){
cout << "ERROR: Failed to project data!\n";
return EXIT_FAILURE;
}
//Print out the pca info
//Print our
pca.print("PCA Info:");
//Print the projected data
cout << "ProjectedData:\n";
for(UINT i=0; i<prjData.getNumRows(); i++){
for(UINT j=0; j<prjData.getNumCols(); j++){
cout << prjData[i][j] << "\t";
}cout << endl;
}
return EXIT_SUCCESS;
}
开发者ID:AdriannaGmz,项目名称:gesture-recognition-toolkit,代码行数:56,代码来源:PrincipalComponentAnalysisExample.cpp
示例14: computeFeatureVector
bool PrincipalComponentAnalysis::computeFeatureVector(const MatrixDouble &data,UINT numPrincipalComponents,bool normData) {
trained = false;
if( numPrincipalComponents > data.getNumCols() ) {
errorLog << "computeFeatureVector(const MatrixDouble &data,UINT numPrincipalComponents,bool normData) - The number of principal components (";
errorLog << numPrincipalComponents << ") is greater than the number of columns in your data (" << data.getNumCols() << ")" << endl;
return false;
}
this->numPrincipalComponents = numPrincipalComponents;
this->normData = normData;
return computeFeatureVector_(data,MAX_NUM_PCS);
}
示例15: project
bool PrincipalComponentAnalysis::project(const MatrixDouble &data,MatrixDouble &prjData) {
if( !trained ) {
warningLog << "project(const MatrixDouble &data,MatrixDouble &prjData) - The PrincipalComponentAnalysis module has not been trained!" << endl;
return false;
}
if( data.getNumCols() != numInputDimensions ) {
warningLog << "project(const MatrixDouble &data,MatrixDouble &prjData) - The number of columns in the input vector (" << data.getNumCols() << ") does not match the number of input dimensions (" << numInputDimensions << ")!" << endl;
return false;
}
MatrixDouble msData( data );
prjData.resize(data.getNumRows(),numPrincipalComponents);
if( normData ) {
//Mean subtract the data
for(UINT i=0; i<data.getNumRows(); i++)
for(UINT j=0; j<numInputDimensions; j++)
msData[i][j] = (msData[i][j]-mean[j])/stdDev[j];
} else {
//Mean subtract the data
for(UINT i=0; i<data.getNumRows(); i++)
for(UINT j=0; j<numInputDimensions; j++)
msData[i][j] -= mean[j];
}
//Projected Data
for(UINT row=0; row<msData.getNumRows(); row++) { //For each row in the final data
for(UINT i=0; i<numPrincipalComponents; i++) { //For each PC
prjData[row][i]=0;
for(UINT j=0; j<data.getNumCols(); j++)//For each feature
prjData[row][i] += msData[row][j] * eigenvectors[j][sortedEigenvalues[i].index];
}
}
return true;
}