本文整理汇总了C++中TheMatrix::Set方法的典型用法代码示例。如果您正苦于以下问题:C++ TheMatrix::Set方法的具体用法?C++ TheMatrix::Set怎么用?C++ TheMatrix::Set使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TheMatrix
的用法示例。
在下文中一共展示了TheMatrix::Set方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: LossAndGrad
/**
* Compute loss and gradient of Huber hinge loss.
* CAUTION: f is passed by reference and is changed within this
* function. This is done for efficiency reasons, otherwise we would
* have had to create a new copy of f.
*
* @param loss [write] loss value computed.
* @param f [read/write] prediction vector.
* @param l [write] partial derivative of loss function w.r.t. f
*/
void CHuberHingeLoss::LossAndGrad(double& loss, TheMatrix& f, TheMatrix& l)
{
f.ElementWiseMult(_data->labels());
double* yf = f.Data();
double* Y = _data->labels().Data();
int len = f.Length();
loss = 0.0;
l.Zero();
for(int i=0; i < len; i++)
{
double v = 1-yf[i];
if(h < v)
{
loss += v;
l.Set(i,-Y[i]);
}
else if(-h > v) {}
else
{
loss += (v+h)*(v+h)/4/h;
l.Set(i, -Y[i]*(v+h)/2/h);
}
}
}
示例2: LossAndGrad
/**
* Compute loss and partial derivative of hinge loss w.r.t f
*
* @param loss [write] loss value computed.
* @param f [r/w] = X*w
* @param l [write] partial derivative of loss w.r.t. f
*/
void CLogisticLoss::LossAndGrad(double& loss, TheMatrix& f, TheMatrix& l)
{
l.Zero(); // for gradient computation i.e. grad := l'*X
f.ElementWiseMult(_data->labels());
double* f_array = f.Data(); // pointer to memory location of f (faster element access)
int len = f.Length();
double exp_yf = 0.0;
for(int i=0; i < len; i++)
{
if(fabs(f_array[i]) == 0.0)
{
loss += LN2;
l.Set(i,-0.5);
}
else if (f_array[i] > 0.0)
{
exp_yf = exp(-f_array[i]);
loss += log(1+exp_yf);
l.Set(i,-exp_yf/(1+exp_yf));
}
else
{
exp_yf = exp(f_array[i]);
loss += log(1+exp_yf) - f_array[i];
l.Set(i,-1.0/(1+exp_yf));
}
}
l.ElementWiseMult(_data->labels());
}
示例3:
/** The subgradient is chosen as sgn(w)
*/
void CL1N1::ComputeRegAndGradient(CModel& model, double& reg, TheMatrix& grad)
{
reg = 0;
TheMatrix &w = model.GetW();
w.Norm1(reg);
grad.Zero();
for(int i=0; i<w.Length(); i++)
{
double val = 0;
w.Get(i,val);
grad.Set(i,SML::sgn(val));
}
}
示例4: LossAndGrad
/**
* Compute loss and gradient of Least Absolute Deviation loss w.r.t f
*
* @param loss [write] loss value computed.
* @param f [r/w] = X*w
* @param l [write] partial derivative of loss w.r.t. f
*/
void CLeastAbsDevLoss::LossAndGrad(double& loss, TheMatrix& f, TheMatrix& l)
{
loss = 0;
l.Zero();
double *Y_array = _data->labels().Data();
double* f_array = f.Data();
int len = f.Length();
for(int i=0; i < len; i++)
{
double f_minus_y = f_array[i] - Y_array[i];
loss += fabs(f_minus_y);
l.Set(i, SML::sgn(f_minus_y));
}
}
示例5: LossAndGrad
/**
* Compute loss and gradient of novelty detection loss.
* CAUTION: f is passed by reference and is changed within this
* function. This is done for efficiency reasons, otherwise we would
* have had to create a new copy of f.
*
* @param loss [write] loss value computed.
* @param f [read/write] prediction vector.
* @param l [write] partial derivative of loss function w.r.t. f
*/
void CNoveltyLoss::LossAndGrad(double& loss, TheMatrix& f, TheMatrix& l)
{
double* f_array = f.Data(); // pointer to memory location of f (faster element access)
int len = f.Length();
l.Zero(); // grad := l'*X
for(int i=0; i < len; i++)
{
if(rho > f_array[i])
{
loss += rho - f_array[i];
l.Set(i, -1.0);
}
}
}
示例6: add
void CNDCGRankLoss::add(TheMatrix &l, int offset, int i, double value){
Scalar temp;
l.Get(offset + current_ideal_pi[i], temp);
l.Set(offset + current_ideal_pi[i], temp + value);
}