本文整理汇总了C++中GMatrix::cols方法的典型用法代码示例。如果您正苦于以下问题:C++ GMatrix::cols方法的具体用法?C++ GMatrix::cols怎么用?C++ GMatrix::cols使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类GMatrix
的用法示例。
在下文中一共展示了GMatrix::cols方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: trainInner
// virtual
void GLinearRegressor::trainInner(const GMatrix& features, const GMatrix& labels)
{
if(!features.relation().areContinuous())
throw Ex("GLinearRegressor only supports continuous features. Perhaps you should wrap it in a GAutoFilter.");
if(!labels.relation().areContinuous())
throw Ex("GLinearRegressor only supports continuous labels. Perhaps you should wrap it in a GAutoFilter.");
// Use a fast, but not-very-numerically-stable technique to compute an initial approximation for beta and epsilon
clear();
GMatrix* pAll = GMatrix::mergeHoriz(&features, &labels);
Holder<GMatrix> hAll(pAll);
GPCA pca(features.cols());
pca.train(*pAll);
size_t inputs = features.cols();
size_t outputs = labels.cols();
GMatrix f(inputs, inputs);
GMatrix l(inputs, outputs);
for(size_t i = 0; i < inputs; i++)
{
GVec::copy(f[i].data(), pca.basis()->row(i).data(), inputs);
double sqmag = f[i].squaredMagnitude();
if(sqmag > 1e-10)
f[i] *= 1.0 / sqmag;
l[i].set(pca.basis()->row(i).data() + inputs, outputs);
}
m_pBeta = GMatrix::multiply(l, f, true, false);
m_epsilon.resize(outputs);
GVecWrapper vw(pca.centroid().data(), m_pBeta->cols());
m_pBeta->multiply(vw.vec(), m_epsilon, false);
m_epsilon *= -1.0;
GVec::add(m_epsilon.data(), pca.centroid().data() + inputs, outputs);
// Refine the results using gradient descent
refine(features, labels, 0.06, 20, 0.75);
}
示例2: loadData
void loadData(GMatrix& m, const char* szFilename)
{
// Load the dataset by extension
PathData pd;
GFile::parsePath(szFilename, &pd);
if(_stricmp(szFilename + pd.extStart, ".arff") == 0)
m.loadArff(szFilename);
else if(_stricmp(szFilename + pd.extStart, ".csv") == 0)
{
GCSVParser parser;
parser.parse(m, szFilename);
cerr << "\nParsing Report:\n";
for(size_t i = 0; i < m.cols(); i++)
cerr << to_str(i) << ") " << parser.report(i) << "\n";
}
else if(_stricmp(szFilename + pd.extStart, ".dat") == 0)
{
GCSVParser parser;
parser.setSeparator('\0');
parser.parse(m, szFilename);
cerr << "\nParsing Report:\n";
for(size_t i = 0; i < m.cols(); i++)
cerr << to_str(i) << ") " << parser.report(i) << "\n";
}
else
throw Ex("Unsupported file format: ", szFilename + pd.extStart);
}
示例3: trainInner
// virtual
void GLinearRegressor::trainInner(GMatrix& features, GMatrix& labels)
{
// Use a fast, but not-very-numerically-stable technique to compute an initial approximation for beta and epsilon
clear();
GMatrix* pAll = GMatrix::mergeHoriz(&features, &labels);
Holder<GMatrix> hAll(pAll);
GPCA pca(features.cols(), &m_rand);
pca.train(*pAll);
size_t inputs = features.cols();
size_t outputs = labels.cols();
GMatrix f(inputs, inputs);
GMatrix l(inputs, outputs);
for(size_t i = 0; i < inputs; i++)
{
GVec::copy(f[i], pca.basis(i), inputs);
double sqmag = GVec::squaredMagnitude(f[i], inputs);
if(sqmag > 1e-10)
GVec::multiply(f[i], 1.0 / sqmag, inputs);
GVec::copy(l[i], pca.basis(i) + inputs, outputs);
}
m_pBeta = GMatrix::multiply(l, f, true, false);
m_pEpsilon = new double[outputs];
m_pBeta->multiply(pca.mean(), m_pEpsilon, false);
GVec::multiply(m_pEpsilon, -1.0, outputs);
GVec::add(m_pEpsilon, pca.mean() + inputs, outputs);
// Refine the results using gradient descent
refine(features, labels, 0.06, 20, 0.75);
}
示例4: matrix_not_symmetric
/***********************************************************************//**
* @brief GMatrix to GSymMatrix storage class convertor
*
* @param[in] matrix General matrix (GMatrix).
*
* @exception GException::matrix_not_symmetric
* Matrix is not symmetric.
*
* Converts a general matrix into the symmetric storage class. If the input
* matrix is not symmetric, an exception is thrown.
***************************************************************************/
GSymMatrix::GSymMatrix(const GMatrix& matrix)
{
// Initialise class members for clean destruction
init_members();
// Allocate matrix memory
alloc_members(matrix.rows(), matrix.cols());
// Fill matrix
for (int col = 0; col < matrix.cols(); ++col) {
for (int row = col; row < matrix.rows(); ++row) {
double value_ll = matrix(row,col);
double value_ur = matrix(col,row);
if (value_ll != value_ur) {
throw GException::matrix_not_symmetric(G_CAST_MATRIX,
matrix.rows(),
matrix.cols());
}
(*this)(row, col) = matrix(row, col);
}
}
// Return
return;
}
示例5: train
void GPolynomialSingleLabel::train(GMatrix& features, GMatrix& labels)
{
GAssert(labels.cols() == 1);
init(features.cols());
GPolynomialRegressCritic critic(this, features, labels);
//GStochasticGreedySearch search(&critic);
GMomentumGreedySearch search(&critic);
search.searchUntil(100, 30, .01);
setCoefficients(search.currentVector());
fromBezierCoefficients();
}
示例6: LoadData
void LoadData(GArgReader &args, std::unique_ptr<GMatrix> &hOutput)
{
// Load the dataset by extension
if(args.size() < 1)
throw Ex("Expected the filename of a datset. (Found end of arguments.)");
const char* szFilename = args.pop_string();
PathData pd;
GFile::parsePath(szFilename, &pd);
GMatrix data;
vector<size_t> abortedCols;
vector<size_t> ambiguousCols;
const char *input_type;
if (args.next_is_flag() && args.if_pop("-input_type")) {
input_type = args.pop_string();
} else { /* deduce it from extension (if any) */
input_type = szFilename + pd.extStart;
if (*input_type != '.') /* no extension - assume ARFF */
input_type = "arff";
else
input_type++;
}
// Now load the data
if(_stricmp(input_type, "arff") == 0)
{
data.loadArff(szFilename);
}
else if(_stricmp(input_type, "csv") == 0)
{
GCSVParser parser;
parser.parse(data, szFilename);
cerr << "\nParsing Report:\n";
for(size_t i = 0; i < data.cols(); i++)
cerr << to_str(i) << ") " << parser.report(i) << "\n";
}
else if(_stricmp(input_type, "dat") == 0)
{
GCSVParser parser;
parser.setSeparator('\0');
parser.parse(data, szFilename);
cerr << "\nParsing Report:\n";
for(size_t i = 0; i < data.cols(); i++)
cerr << to_str(i) << ") " << parser.report(i) << "\n";
}
else
{
throw Ex("Unsupported file format: ", szFilename + pd.extStart);
}
// Split data into a feature matrix and a label matrix
GMatrix* pFeatures = data.cloneSub(0, 0, data.rows(), data.cols());
hOutput.reset(pFeatures);
}
示例7: dropRandomValues
void dropRandomValues(GArgReader& args)
{
GMatrix* pData = loadData(args.pop_string());
double portion = args.pop_double();
// Parse the options
unsigned int seed = getpid() * (unsigned int)time(NULL);
while(args.next_is_flag())
{
if(args.if_pop("-seed"))
seed = args.pop_uint();
else
ThrowError("Invalid option: ", args.peek());
}
GRand rand(seed);
size_t n = pData->rows() * pData->cols();
size_t k = size_t(portion * n);
for(size_t i = 0; i < pData->cols(); i++)
{
size_t vals = pData->relation()->valueCount(i);
if(vals == 0)
{
for(size_t j = 0; j < pData->rows(); j++)
{
if(rand.next(n) < k)
{
pData->row(j)[i] = UNKNOWN_REAL_VALUE;
k--;
}
n--;
}
}
else
{
for(size_t j = 0; j < pData->rows(); j++)
{
if(rand.next(n) < k)
{
pData->row(j)[i] = UNKNOWN_DISCRETE_VALUE;
k--;
}
n--;
}
}
}
pData->print(cout);
}
示例8: refine
void GLinearRegressor::refine(GMatrix& features, GMatrix& labels, double learningRate, size_t epochs, double learningRateDecayFactor)
{
size_t fDims = features.cols();
size_t lDims = labels.cols();
size_t* pIndexes = new size_t[features.rows()];
ArrayHolder<size_t> hIndexes(pIndexes);
GIndexVec::makeIndexVec(pIndexes, features.rows());
for(size_t i = 0; i < epochs; i++)
{
GIndexVec::shuffle(pIndexes, features.rows(), &m_rand);
size_t* pIndex = pIndexes;
for(size_t j = 0; j < features.rows(); j++)
{
double* pFeat = features[*pIndex];
double* pLab = labels[*pIndex];
double* pBias = m_pEpsilon;
for(size_t k = 0; k < lDims; k++)
{
double err = *pLab - (GVec::dotProduct(pFeat, m_pBeta->row(k), fDims) + *pBias);
double* pF = pFeat;
double lr = learningRate;
double mag = 0.0;
for(size_t l = 0; l < fDims; l++)
{
double d = *pF * err;
mag += (d * d);
pF++;
}
mag += err * err;
if(mag > 1.0)
lr /= mag;
pF = pFeat;
double* pW = m_pBeta->row(k);
for(size_t l = 0; l < fDims; l++)
{
*pW += *pF * lr * err;
pF++;
pW++;
}
*pBias += learningRate * err;
pLab++;
pBias++;
}
pIndex++;
}
learningRate *= learningRateDecayFactor;
}
}
示例9: autoCorrelation
void autoCorrelation(GArgReader& args)
{
GMatrix* pData = loadData(args.pop_string());
Holder<GMatrix> hData(pData);
size_t lag = std::min((size_t)256, pData->rows() / 2);
size_t dims = pData->cols();
GTEMPBUF(double, mean, dims);
pData->centroid(mean);
GMatrix ac(0, dims + 1);
for(size_t i = 1; i <= lag; i++)
{
double* pRow = ac.newRow();
*(pRow++) = (double)i;
for(size_t j = 0; j < dims; j++)
{
*pRow = 0;
size_t k;
for(k = 0; k + i < pData->rows(); k++)
{
double* pA = pData->row(k);
double* pB = pData->row(k + i);
*pRow += (pA[j] - mean[j]) * (pB[j] - mean[j]);
}
*pRow /= k;
pRow++;
}
}
ac.print(cout);
}
示例10: addNoise
void addNoise(GArgReader& args)
{
GMatrix* pData = loadData(args.pop_string());
Holder<GMatrix> hData(pData);
double dev = args.pop_double();
// Parse the options
unsigned int seed = getpid() * (unsigned int)time(NULL);
int excludeLast = 0;
while(args.next_is_flag())
{
if(args.if_pop("-seed"))
seed = args.pop_uint();
else if(args.if_pop("-excludelast"))
excludeLast = args.pop_uint();
else
ThrowError("Invalid neighbor finder option: ", args.peek());
}
GRand prng(seed);
size_t cols = pData->cols() - excludeLast;
for(size_t r = 0; r < pData->rows(); r++)
{
double* pRow = pData->row(r);
for(size_t c = 0; c < cols; c++)
*(pRow++) += dev * prng.normal();
}
pData->print(cout);
}
示例11: curviness2
void curviness2(GArgReader& args)
{
GMatrix* pData = loadData(args.pop_string());
Holder<GMatrix> hData(pData);
GNormalize norm;
GMatrix* pDataNormalized = norm.doit(*pData);
Holder<GMatrix> hDataNormalized(pDataNormalized);
hData.reset();
pData = NULL;
// Parse Options
size_t maxEigs = 10;
unsigned int seed = getpid() * (unsigned int)time(NULL);
Holder<GMatrix> hControlData(NULL);
while(args.size() > 0)
{
if(args.if_pop("-seed"))
seed = args.pop_uint();
else if(args.if_pop("-maxeigs"))
maxEigs = args.pop_uint();
else
throw Ex("Invalid option: ", args.peek());
}
GRand rand(seed);
size_t targetDims = std::min(maxEigs, pDataNormalized->cols());
// Do linear PCA
GNeuroPCA np1(targetDims, &rand);
np1.setActivation(new GActivationIdentity());
np1.computeEigVals();
GMatrix* pResults1 = np1.doit(*pDataNormalized);
Holder<GMatrix> hResults1(pResults1);
double* pEigVals1 = np1.eigVals();
for(size_t i = 0; i + 1 < targetDims; i++)
pEigVals1[i] = sqrt(pEigVals1[i]) - sqrt(pEigVals1[i + 1]);
size_t max1 = GVec::indexOfMax(pEigVals1, targetDims - 1, &rand);
double v1 = (double)max1;
if(max1 > 0 && max1 + 2 < targetDims)
v1 += (pEigVals1[max1 - 1] - pEigVals1[max1 + 1]) / (2.0 * (pEigVals1[max1 - 1] + pEigVals1[max1 + 1] - 2.0 * pEigVals1[max1]));
// Do non-linear PCA
GNeuroPCA np2(targetDims, &rand);
np1.setActivation(new GActivationLogistic());
np2.computeEigVals();
GMatrix* pResults2 = np2.doit(*pDataNormalized);
Holder<GMatrix> hResults2(pResults2);
double* pEigVals2 = np2.eigVals();
for(size_t i = 0; i + 1 < targetDims; i++)
pEigVals2[i] = sqrt(pEigVals2[i]) - sqrt(pEigVals2[i + 1]);
size_t max2 = GVec::indexOfMax(pEigVals2, targetDims - 1, &rand);
double v2 = (double)max2;
if(max2 > 0 && max2 + 2 < targetDims)
v2 += (pEigVals2[max2 - 1] - pEigVals2[max2 + 1]) / (2.0 * (pEigVals2[max2 - 1] + pEigVals2[max2 + 1] - 2.0 * pEigVals2[max2]));
// Compute the difference in where the eigenvalues fall
cout.precision(14);
cout << (v1 - v2) << "\n";
}
示例12: blendEmbeddings
void blendEmbeddings(GArgReader& args)
{
// Load the files and params
GMatrix* pDataOrig = loadData(args.pop_string());
Holder<GMatrix> hDataOrig(pDataOrig);
unsigned int seed = getpid() * (unsigned int)time(NULL);
GRand prng(seed);
GNeighborFinder* pNF = instantiateNeighborFinder(pDataOrig, &prng, args);
Holder<GNeighborFinder> hNF(pNF);
GMatrix* pDataA = loadData(args.pop_string());
Holder<GMatrix> hDataA(pDataA);
GMatrix* pDataB = loadData(args.pop_string());
Holder<GMatrix> hDataB(pDataB);
if(pDataA->rows() != pDataOrig->rows() || pDataB->rows() != pDataOrig->rows())
throw Ex("mismatching number of rows");
if(pDataA->cols() != pDataB->cols())
throw Ex("mismatching number of cols");
// Parse Options
while(args.size() > 0)
{
if(args.if_pop("-seed"))
prng.setSeed(args.pop_uint());
else
throw Ex("Invalid option: ", args.peek());
}
// Get a neighbor table
if(!pNF->isCached())
{
GNeighborFinderCacheWrapper* pNF2 = new GNeighborFinderCacheWrapper(hNF.release(), true);
hNF.reset(pNF2);
pNF = pNF2;
}
((GNeighborFinderCacheWrapper*)pNF)->fillCache();
size_t* pNeighborTable = ((GNeighborFinderCacheWrapper*)pNF)->cache();
// Do the blending
size_t startPoint = (size_t)prng.next(pDataA->rows());
double* pRatios = new double[pDataA->rows()];
ArrayHolder<double> hRatios(pRatios);
GVec::setAll(pRatios, 0.5, pDataA->rows());
GMatrix* pDataC = GManifold::blendEmbeddings(pDataA, pRatios, pDataB, pNF->neighborCount(), pNeighborTable, startPoint);
Holder<GMatrix> hDataC(pDataC);
pDataC->print(cout);
}
示例13: test_transform_mergevert
void test_transform_mergevert()
{
// Make some input files
TempFileMaker tempFile1("a.arff",
"@RELATION test\n"
"@ATTRIBUTE a1 continuous\n"
"@ATTRIBUTE a2 { alice, bob }\n"
"@ATTRIBUTE a3 { true, false }\n"
"@DATA\n"
"1.2, alice, true\n"
"2.3, bob, false\n"
);
TempFileMaker tempFile2("b.arff",
"@RELATION test\n"
"@ATTRIBUTE a1 continuous\n"
"@ATTRIBUTE a2 { charlie, bob }\n"
"@ATTRIBUTE a3 { false, true }\n"
"@DATA\n"
"3.4, bob, true\n"
"4.5, charlie, false\n"
);
// Execute the command
GPipe pipeStdOut;
if(sysExec("waffles_transform", "mergevert a.arff b.arff", &pipeStdOut) != 0)
throw Ex("exit status indicates failure");
char buf[512];
size_t len = pipeStdOut.read(buf, 512);
if(len == 512)
throw Ex("need a bigger buffer");
buf[len] = '\0';
// Check the results
GMatrix M;
M.parseArff(buf, strlen(buf));
if(M.rows() != 4 || M.cols() != 3)
throw Ex("failed");
if(M.relation().valueCount(0) != 0)
throw Ex("failed");
if(M.relation().valueCount(1) != 3)
throw Ex("failed");
if(M.relation().valueCount(2) != 2)
throw Ex("failed");
std::ostringstream oss;
const GArffRelation* pRel = (const GArffRelation*)&M.relation();
pRel->printAttrValue(oss, 1, 2.0);
string s = oss.str();
if(strcmp(s.c_str(), "charlie") != 0)
throw Ex("failed");
if(M[0][0] != 1.2 || M[1][0] != 2.3 || M[2][0] != 3.4 || M[3][0] != 4.5)
throw Ex("failed");
if(M[0][1] != 0 || M[1][1] != 1 || M[2][1] != 1 || M[3][1] != 2)
throw Ex("failed");
if(M[0][2] != 0 || M[1][2] != 1 || M[2][2] != 0 || M[3][2] != 1)
throw Ex("failed");
}
示例14: rotate
void rotate(GArgReader& args)
{
GMatrix* pA = loadData(args.pop_string());
Holder<GMatrix> hA(pA);
sp_relation relation = pA->relation();
unsigned colx = args.pop_uint();
if(colx >= pA->cols()){
ThrowError("Rotation first column index (",to_str(colx),") "
"should not be greater "
"than the largest index, which is ", to_str(pA->cols()-1),
".");
}
if(!relation->areContinuous(colx,1)){
ThrowError("Rotation first column index (",to_str(colx),") "
"should be continuous and it is not.");
}
unsigned coly = args.pop_uint();
if(coly >= pA->cols()){
ThrowError("Rotation second column index (",to_str(coly),") "
"should not be greater "
"than the largest index, which is ", to_str(pA->cols()-1),
".");
}
if(!relation->areContinuous(coly,1)){
ThrowError("Rotation second column index (",to_str(coly),") "
"should be continuous and it is not.");
}
double angle = args.pop_double();
angle = angle * M_PI / 180; //Convert from degrees to radians
double cosAngle = std::cos(angle);
double sinAngle = std::sin(angle);
for(std::size_t rowIdx = 0; rowIdx < pA->rows(); ++rowIdx){
double* row = (*pA)[rowIdx];
double x = row[colx];
double y = row[coly];
row[colx]=x*cosAngle-y*sinAngle;
row[coly]=x*sinAngle+y*cosAngle;
}
pA->print(cout);
}
示例15: determineWeights
// virtual
void GBayesianModelCombination::determineWeights(GMatrix& features, GMatrix& labels)
{
double* pWeights = new double[m_models.size()];
ArrayHolder<double> hWeights(pWeights);
GVec::setAll(pWeights, 0.0, m_models.size());
double sumWeight = 0.0;
double maxLogProb = -1e38;
GTEMPBUF(double, results, labels.cols());
for(size_t i = 0; i < m_samples; i++)
{
// Set weights randomly from a dirichlet distribution with unifrom probabilities
for(vector<GWeightedModel*>::iterator it = m_models.begin(); it != m_models.end(); it++)
(*it)->m_weight = m_rand.exponential();
normalizeWeights();
// Evaluate accuracy
accuracy(features, labels, results);
double d = GVec::sumElements(results, labels.cols()) / labels.cols();
double logProbEnsembleGivenData;
if(d == 0.0)
logProbEnsembleGivenData = -1e38;
else if(d == 1.0)
logProbEnsembleGivenData = 0.0;
else
logProbEnsembleGivenData = features.rows() * (d * log(d) + (1.0 - d) * log(1.0 - d));
// Update the weights
if(logProbEnsembleGivenData > maxLogProb)
{
GVec::multiply(pWeights, exp(maxLogProb - logProbEnsembleGivenData), m_models.size());
maxLogProb = logProbEnsembleGivenData;
}
double w = exp(logProbEnsembleGivenData - maxLogProb);
GVec::multiply(pWeights, sumWeight / (sumWeight + w), m_models.size());
double* pW = pWeights;
for(vector<GWeightedModel*>::iterator it = m_models.begin(); it != m_models.end(); it++)
*(pW++) += w * (*it)->m_weight;
sumWeight += w;
}
double* pW = pWeights;
for(vector<GWeightedModel*>::iterator it = m_models.begin(); it != m_models.end(); it++)
(*it)->m_weight = *(pW++);
}