本文整理汇总了C++中TheMatrix::Minus方法的典型用法代码示例。如果您正苦于以下问题:C++ TheMatrix::Minus方法的具体用法?C++ TheMatrix::Minus怎么用?C++ TheMatrix::Minus使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TheMatrix
的用法示例。
在下文中一共展示了TheMatrix::Minus方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ComputeLossAndGradient
//.........这里部分代码省略.........
find_best_label_grammer(Y[i].pos,Y[i].type, X[i], w, ybar, ybarlabel, marginloss, labelloss, 0, _data->getNumOfClass());
else
find_best_label(Y[i].pos,Y[i].type, X[i], w, ybar, ybarlabel, marginloss, labelloss, 0, _data->getNumOfClass());
double labelloss_y = 0;
double marginloss_y = 0;
double labelloss_ybar = 0;
double marginloss_ybar = 0;
ComputeLoss(Y[i].pos,Y[i].type,ybar,ybarlabel,X[i],w,marginloss_ybar,labelloss_ybar,1);
if(lossw[0]!=0)
labelloss+=lossw[0];
if(lastDuration>0)
{
marginloss = marginloss_ybar;
labelloss = labelloss_ybar;
}
if(verbosity>=3)
{
ComputeLoss(Y[i].pos,Y[i].type,Y[i].pos,Y[i].type,X[i],w,marginloss_y,labelloss_y,1);
printf("dp------marginloss:%2.4f---labelloss:%2.4f------\n",marginloss,labelloss);
printf("ybar----marginloss:%2.4f---labelloss:%2.4f------\n",marginloss_ybar,labelloss_ybar);
printf("y-------marginloss:%2.4f---labelloss:%2.4f------\n",marginloss_y,labelloss_y);
if(abs(labelloss_ybar-labelloss)>1e-5)
{
printf("labelloss doesn't match!\n");
//exit(0);
}
if(abs(marginloss_ybar-marginloss)>1e-5)
{
printf("marginloss_ybar_dp:%2.4f != marginloss_ybar_computeLoss:%2.4f\n",marginloss,marginloss_ybar);
printf("marginloss doesn't match!\n");
}
}
// construct the gradient vector for the part of true y
const vector<unsigned int> &y = Y[i].pos;
const vector<unsigned int> &ylabel = Y[i].type;
g.Zero();
for(unsigned int j=0; j < y.size(); j++)
{
//g.Add(*(X[i].phi_1[y[j]]));
//g.Add(*(X[i].phi_2[y[j-1]][y[j]-y[j-1]-1]));
_data->TensorPhi1(X[i].phi_1[y[j]],ylabel[j],0,tphi_1);
g.Add(*tphi_1);
if(j > 0)
{
_data->TensorPhi2(X[i].phi_2[y[j-1]][y[j]-y[j-1]-1], ylabel[j-1], ylabel[j], 0,0,tphi_2);
g.Add(*tphi_2);
}
}
if(y.size() > 0)
{
//g.Add(*(X[i].phi_2[y[y.size()-1]][X[i].len-1 - y[y.size()-1]-1]));////
_data->TensorPhi2(X[i].phi_2[y[y.size()-1]][X[i].len - y[y.size()-1]-1 ], ylabel[y.size()-1], 0,0,0,tphi_2);
g.Add(*tphi_2);
}
// for predicted y'
for(unsigned int j=0; j < ybar.size(); j++)
{
//grad.Add(*(X[i].phi_1[ybar[j]]));
//grad.Add(*(X[i].phi_2[ybar[j-1]][ybar[j]-ybar[j-1]-1]));
_data->TensorPhi1(X[i].phi_1[ybar[j]],ybarlabel[j],0,tphi_1);
grad.Add(*tphi_1);
if(j>0)
{
_data->TensorPhi2(X[i].phi_2[ybar[j-1]][ybar[j]-ybar[j-1]-1], ybarlabel[j-1], ybarlabel[j], 0,0,tphi_2);
grad.Add(*tphi_2); ////
}
}
if(ybar.size() > 0)
{
//grad.Add(*(X[i].phi_2[ybar[ybar.size()-1]][X[i].len-1 - ybar[ybar.size()-1]-1]));
_data->TensorPhi2(X[i].phi_2[ybar[ybar.size()-1]][X[i].len - ybar[ybar.size()-1]-1 ], ybarlabel[ybar.size()-1], 0, 0,0,tphi_2);
grad.Add(*tphi_2);
}
grad.Minus(g);
// accumulate the loss
w.Dot(g, w_dot_g);
loss = loss - w_dot_g + marginloss + labelloss;
}
scalingFactor = 1.0/trainExNum;
grad.Scale(scalingFactor);
loss *= scalingFactor;
if(verbosity)
{
double gnorm = 0.0;
grad.Norm2(gnorm);
cout << "gradient norm=" << gnorm << endl;
}
//Evaluate(_model);
}