本文整理汇总了C++中GMatrix::columnMax方法的典型用法代码示例。如果您正苦于以下问题:C++ GMatrix::columnMax方法的具体用法?C++ GMatrix::columnMax怎么用?C++ GMatrix::columnMax使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类GMatrix
的用法示例。
在下文中一共展示了GMatrix::columnMax方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: loadSparseData
GSparseMatrix* GRecommenderLib::loadSparseData(const char* szFilename)
{
// Load the dataset by extension
PathData pd;
GFile::parsePath(szFilename, &pd);
if(_stricmp(szFilename + pd.extStart, ".arff") == 0)
{
// Convert a 3-column dense ARFF file to a sparse matrix
GMatrix data;
data.loadArff(szFilename);
if(data.cols() != 3)
throw Ex("Expected 3 columns: 0) user or row-index, 1) item or col-index, 2) value or rating");
double m0 = data.columnMin(0);
double r0 = data.columnMax(0) - m0;
double m1 = data.columnMin(1);
double r1 = data.columnMax(1) - m1;
if(m0 < 0 || m0 > 1e10 || r0 < 2 || r0 > 1e10)
throw Ex("Invalid row indexes");
if(m1 < 0 || m1 > 1e10 || r1 < 2 || r1 > 1e10)
throw Ex("Invalid col indexes");
GSparseMatrix* pMatrix = new GSparseMatrix(size_t(m0 + r0) + 1, size_t(m1 + r1) + 1, UNKNOWN_REAL_VALUE);
std::unique_ptr<GSparseMatrix> hMatrix(pMatrix);
for(size_t i = 0; i < data.rows(); i++)
{
GVec& row = data.row(i);
pMatrix->set(size_t(row[0]), size_t(row[1]), row[2]);
}
return hMatrix.release();
}
else if(_stricmp(szFilename + pd.extStart, ".sparse") == 0)
{
GDom doc;
doc.loadJson(szFilename);
return new GSparseMatrix(doc.root());
}
throw Ex("Unsupported file format: ", szFilename + pd.extStart);
return NULL;
}
示例2: sqrt
GHistogram::GHistogram(GMatrix& data, size_t col, double xMin, double xMax, size_t maxBuckets)
{
double dataMin = data.columnMin(col);
double dataRange = data.columnMax(col) - dataMin;
double mean = data.columnMean(col);
double median = data.columnMedian(col);
double dev = sqrt(data.columnVariance(col, mean));
if(xMin == UNKNOWN_REAL_VALUE)
m_min = std::max(dataMin, median - 4 * dev);
else
m_min = xMin;
if(xMax == UNKNOWN_REAL_VALUE)
m_max = std::min(dataMin + dataRange, median + 4 * dev);
else
m_max = xMax;
m_binCount = std::max((size_t)1, std::min(maxBuckets, (size_t)floor(sqrt((double)data.rows()))));
m_bins = new double[m_binCount];
GVec::setAll(m_bins, 0.0, m_binCount);
m_sum = 0.0;
for(size_t i = 0; i < data.rows(); i++)
addSample(data[i][col], 1.0);
}
示例3: doit
void doit()
{
// Load the data
GMatrix trainLab;
GMatrix testLab;
if (chdir("../bin") != 0)
{
}
trainLab.loadArff("train.arff");
testLab.loadArff("test.arff");
double dataMin = trainLab.columnMin(0);
double dataMax = trainLab.columnMax(0);
trainLab.normalizeColumn(0, dataMin, dataMax, -5.0, 5.0);
testLab.normalizeColumn(0, dataMin, dataMax, -5.0, 5.0);
GMatrix trainFeat(trainLab.rows(), 1);
for(size_t i = 0; i < trainLab.rows(); i++)
trainFeat[i][0] = (double)i / trainLab.rows() - 0.5;
GMatrix testFeat(testLab.rows(), 1);
for(size_t i = 0; i < testLab.rows(); i++)
testFeat[i][0] = (double)(i + trainLab.rows()) / trainLab.rows() - 0.5;
// Make a neural network
GNeuralNet nn;
GUniformRelation relOne(1);
nn.beginIncrementalLearning(relOne, relOne);
// Initialize the weights of the sine units to match the frequencies used by the Fourier transform.
GLayerClassic* pSine2 = new GLayerClassic(1, 64, new GActivationSin());
GMatrix& wSin = pSine2->weights();
GVec& bSin = pSine2->bias();
for(size_t i = 0; i < pSine2->outputs() / 2; i++)
{
wSin[0][2 * i] = 2.0 * M_PI * (i + 1);
bSin[2 * i] = 0.5 * M_PI;
wSin[0][2 * i + 1] = 2.0 * M_PI * (i + 1);
bSin[2 * i + 1] = M_PI;
}
// Make the hidden layer
GLayerMixed* pMix2 = new GLayerMixed();
pSine2->resize(1, pSine2->outputs(), &nn.rand(), PERTURBATION);
pMix2->addComponent(pSine2);
GLayerClassic* pSoftPlus2 = new GLayerClassic(1, SOFTPLUS_NODES, new GActivationSoftPlus());
pMix2->addComponent(pSoftPlus2);
GLayerClassic* pIdentity2 = new GLayerClassic(1, IDENTITY_NODES, new GActivationIdentity());
pMix2->addComponent(pIdentity2);
nn.addLayer(pMix2);
// Make the output layer
GLayerClassic* pIdentity3 = new GLayerClassic(FLEXIBLE_SIZE, trainLab.cols(), new GActivationIdentity());
pIdentity3->resize(pMix2->outputs(), pIdentity3->outputs(), &nn.rand(), PERTURBATION);
nn.addLayer(pIdentity3);
// Initialize all the non-periodic nodes to approximate the identity function, then perturb a little bit
pSoftPlus2->setWeightsToIdentity();
for(size_t i = 0; i < SOFTPLUS_NODES; i++)
{
pSoftPlus2->bias()[i] += SOFTPLUS_SHIFT;
pIdentity3->renormalizeInput(pSine2->outputs() + i, 0.0, 1.0, SOFTPLUS_SHIFT, SOFTPLUS_SHIFT + 1.0);
}
pIdentity2->setWeightsToIdentity();
pSoftPlus2->perturbWeights(nn.rand(), PERTURBATION);
pIdentity2->perturbWeights(nn.rand(), PERTURBATION);
// Randomly initialize the weights on the output layer
pIdentity3->weights().setAll(0.0);
pIdentity3->perturbWeights(nn.rand(), PERTURBATION);
// Open Firefox to view the progress
GApp::systemCall("firefox ./view.html#progress.svg", false, true);
// Do some training
GRandomIndexIterator ii(trainLab.rows(), nn.rand());
nn.setLearningRate(LEARNING_RATE);
for(size_t epoch = 0; epoch < TRAINING_EPOCHS; epoch++)
{
// Visit each sample in random order
ii.reset();
size_t i;
while(ii.next(i))
{
// Regularize
pIdentity3->scaleWeights(1.0 - nn.learningRate() * REGULARIZATION_TERM, true);
pIdentity3->diminishWeights(nn.learningRate() * REGULARIZATION_TERM, true);
// Train
nn.trainIncremental(trainFeat[i], trainLab[i]); // One iteration of stochastic gradient descent
}
// Report progress
double rmse = sqrt(nn.sumSquaredError(trainFeat, trainLab) / trainLab.rows());
if(epoch % (TRAINING_EPOCHS / 100) == 0)
{
double val = sqrt(nn.sumSquaredError(testFeat, testLab) / testLab.rows());
cout << "prog=" << to_str((double)epoch * 100.0 / TRAINING_EPOCHS) << "% rmse=" << to_str(rmse) << " val=" << to_str(val) << "\n";
plot_it("progress.svg", nn, trainFeat, trainLab, testFeat, testLab);
}
}
}
示例4: plot_it
void plot_it(const char* filename, GNeuralNet& nn, GMatrix& trainFeat, GMatrix& trainLab, GMatrix& testFeat, GMatrix& testLab)
{
GSVG svg(1000, 500);
double xmin = trainFeat[0][0];
double xmax = testFeat[testFeat.rows() - 1][0];
svg.newChart(xmin, std::min(trainLab.columnMin(0), testLab.columnMin(0)), xmax, std::max(trainLab.columnMax(0), testLab.columnMax(0)));
svg.horizMarks(20);
svg.vertMarks(20);
double prevx = xmin;
double prevy = 0.0;
double step = (xmax - xmin) / 500.0;
GVec x(1);
GVec y(1);
for(x[0] = prevx; x[0] < xmax; x[0] += step)
{
nn.predict(x, y);
if(prevx != x[0])
svg.line(prevx, prevy, x[0], y[0], 0.3);
prevx = x[0];
prevy = y[0];
}
for(size_t i = 0; i < trainLab.rows(); i++)
svg.dot(trainFeat[i][0], trainLab[i][0], 0.4, 0xff000080);
for(size_t i = 0; i < testLab.rows(); i++)
svg.dot(testFeat[i][0], testLab[i][0], 0.4, 0xff800000);
std::ofstream ofs;
ofs.open(filename);
svg.print(ofs);
}