本文整理汇总了C++中xvec_t::at方法的典型用法代码示例。如果您正苦于以下问题:C++ xvec_t::at方法的具体用法?C++ xvec_t::at怎么用?C++ xvec_t::at使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类xvec_t
的用法示例。
在下文中一共展示了xvec_t::at方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: clone
double
SvmSgd::my_evaluateEta(int imin, int imax, const xvec_t &xp, const yvec_t &yp, double eta00)
{
SvmSgd clone(*this); // take a copy of the current state
cout << "[my_evaluateEta: clone.wDivisor: ]" << setprecision(12) << clone.wDivisor << " clone.t: " << clone.t << " clone.eta0: " << clone.eta0 << endl;
cout << "Trying eta=" << eta00 ;
assert(imin <= imax);
double _t = 0;
double eta = 0;
for (int i=imin; i<=imax; i++){
eta = eta00 / (1 + lambda * eta00 * _t);
//cout << "[my_evaluateEta:] Eta: " << eta << endl;
clone.trainOne(xp.at(i), yp.at(i), eta);
_t++;
}
double loss = 0;
double cost = 0;
for (int i=imin; i<=imax; i++)
clone.testOne(xp.at(i), yp.at(i), &loss, 0);
loss = loss / (imax - imin + 1);
cost = loss + 0.5 * lambda * clone.wnorm();
cout <<" yields loss " << loss << endl;
// cout << "Trying eta=" << eta << " yields cost " << cost << endl;
return cost;
}
示例2: assert
void
SvmSgd::test(int imin, int imax,
const xvec_t &xp, const yvec_t &yp,
const char *prefix)
{
cout << prefix << "Testing on [" << imin << ", " << imax << "]." << endl;
assert(imin <= imax);
int nerr = 0;
double cost = 0;
for (int i=imin; i<=imax; i++)
{
const SVector &x = xp.at(i);
double y = yp.at(i);
double wx = dot(w,x);
double z = y * (wx + bias);
if (z <= 0)
nerr += 1;
#if LOSS < LOGLOSS
if (z < 1)
#endif
cost += loss(z);
}
int n = imax - imin + 1;
double loss = cost / n;
cost = loss + 0.5 * lambda * dot(w,w);
cout << prefix << setprecision(4)
<< "Misclassification: " << (double)nerr * 100.0 / n << "%." << endl;
cout << prefix << setprecision(12)
<< "Cost: " << cost << "." << endl;
cout << prefix << setprecision(12)
<< "Loss: " << loss << "." << endl;
}
示例3: clone
double
SvmAisgd::evaluateEta(int imin, int imax, const xvec_t &xp, const yvec_t &yp, double eta)
{
SvmAisgd clone(*this); // take a copy of the current state
assert(imin <= imax);
for (int i=imin; i<=imax; i++)
clone.trainOne(xp.at(i), yp.at(i), eta, 1.0);
double loss = 0;
double cost = 0;
for (int i=imin; i<=imax; i++)
clone.testOne(xp.at(i), yp.at(i), &loss, 0);
loss = loss / (imax - imin + 1);
cost = loss + 0.5 * lambda * clone.wnorm();
// cout << "Trying eta=" << eta << " yields cost " << cost << endl;
return cost;
}
示例4: assert
/// Perform a test pass
void
SvmAisgd::test(int imin, int imax, const xvec_t &xp, const yvec_t &yp, const char *prefix)
{
cout << prefix << "Testing on [" << imin << ", " << imax << "]." << endl;
assert(imin <= imax);
double nerr = 0;
double loss = 0;
for (int i=imin; i<=imax; i++)
testOne(xp.at(i), yp.at(i), &loss, &nerr);
nerr = nerr / (imax - imin + 1);
loss = loss / (imax - imin + 1);
double cost = loss + 0.5 * lambda * anorm();
cout << prefix
<< "Loss=" << setprecision(12) << loss
<< " Cost=" << setprecision(12) << cost
<< " Misclassification=" << setprecision(4) << 100 * nerr << "%."
<< endl;
}
示例5: generator
/// Perform a SAG training epoch
void
SvmSag::trainSag(int imin, int imax, const xvec_t &xp, const yvec_t &yp, const char *prefix)
{
cout << prefix << "Training on [" << imin << ", " << imax << "]." << endl;
assert(imin <= imax);
assert(imin >= sdimin);
assert(imax <= sdimax);
assert(eta > 0);
uniform_int_generator generator(imin, imax);
for (int i=imin; i<=imax; i++)
{
int ii = generator();
trainOne(xp.at(ii), yp.at(ii), eta, ii);
t += 1;
}
cout << prefix << setprecision(6) << "wNorm=" << wnorm();
#if BIAS
cout << " wBias=" << wBias;
#endif
cout << endl;
}
示例6: assert
/// Perform initial training epoch
void
SvmSag::trainInit(int imin, int imax, const xvec_t &xp, const yvec_t &yp, const char *prefix)
{
cout << prefix << "Training on [" << imin << ", " << imax << "]." << endl;
assert(imin <= imax);
assert(eta > 0);
assert(m == 0);
sd.resize(imax - imin + 1);
sdimin = imin;
sdimax = imax;
for (int i=imin; i<=imax; i++)
{
m += 1;
trainOne(xp.at(i), yp.at(i), eta, i);
t += 1;
}
cout << prefix << setprecision(6) << "wNorm=" << wnorm();
#if BIAS
cout << " wBias=" << wBias;
#endif
cout << endl;
}
示例7: assert
/// Perform a training epoch
void
SvmSgd::train(int imin, int imax, const xvec_t &xp, const yvec_t &yp, const char *prefix)
{
#if VERBOSE
cout << prefix << "Training on [" << imin << ", " << imax << "]." << endl;
#endif
assert(imin <= imax);
assert(eta0 > 0);
for (int i=imin; i<=imax; i++)
{
double eta = eta0 / (1 + lambda * eta0 * t);
trainOne(xp.at(i), yp.at(i), eta);
t += 1;
}
#if VERBOSE
cout << prefix << setprecision(6) << "wNorm=" << wnorm();
#if BIAS
cout << " wBias=" << wBias;
#endif
cout << endl;
#endif
}
示例8: setprecision
/// Perform a training epoch
void
SvmSgd::train(int imin, int imax, const xvec_t &xp, const yvec_t &yp, const char *prefix)
{
cout << prefix << "Training on [" << imin << ", " << imax << "]." << endl;
assert(imin <= imax);
assert(eta0 > 0);
//cout << "wDivisor: " << wDivisor << " wBias: " << wBias<< endl;
for (int i=imin; i<=imax; i++)
{
double eta = eta0 / (1 + lambda * eta0 * t);
//cout << "[my_evaluateEta:] Eta: " << eta << endl;
trainOne(xp.at(i), yp.at(i), eta);
t += 1;
}
//cout << "\nAfter training: \n wDivisor: " << wDivisor << " wBias: " << wBias<< endl;
cout << prefix << setprecision(6) << "wNorm=" << wnorm();
#if BIAS
cout << " wBias=" << wBias;
#endif
cout << endl;
}
示例9: c
void
SvmSgd::calibrate(int imin, int imax,
const xvec_t &xp, const yvec_t &yp)
{
cout << "Estimating sparsity and bscale." << endl;
int j;
// compute average gradient size
double n = 0;
double m = 0;
double r = 0;
FVector c(w.size());
for (j=imin; j<=imax && m<=1000; j++,n++)
{
const SVector &x = xp.at(j);
n += 1;
r += x.npairs();
const SVector::Pair *p = x;
while (p->i >= 0 && p->i < c.size())
{
double z = c.get(p->i) + fabs(p->v);
c.set(p->i, z);
m = max(m, z);
p += 1;
}
}
// bias update scaling
bscale = m/n;
// compute weight decay skip
skip = (int) ((8 * n * w.size()) / r);
cout << " using " << n << " examples." << endl;
cout << " skip: " << skip
<< " bscale: " << setprecision(6) << bscale << endl;
}