本文整理汇总了C++中PBowDocBs::GetDocWId方法的典型用法代码示例。如果您正苦于以下问题:C++ PBowDocBs::GetDocWId方法的具体用法?C++ PBowDocBs::GetDocWId怎么用?C++ PBowDocBs::GetDocWId使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类PBowDocBs
的用法示例。
在下文中一共展示了PBowDocBs::GetDocWId方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: New
PBowMd TBowWinnowMd::New(
const PBowDocBs& BowDocBs, const TStr& CatNm, const double& Beta){
// create model
TBowWinnowMd* WinnowMd=new TBowWinnowMd(BowDocBs); PBowMd BowMd(WinnowMd);
WinnowMd->CatNm=CatNm;
WinnowMd->Beta=Beta;
WinnowMd->VoteTsh=0.5;
// prepare Winnow parameters
const double MnExpertWgtSum=1e-15;
// get cat-id
int CId=BowDocBs->GetCId(CatNm);
if (CId==-1){
TExcept::Throw(TStr::GetStr(CatNm, "Invalid Category Name ('%s')!"));}
// get training documents
TIntV TrainDIdV; BowDocBs->GetAllDIdV(TrainDIdV);
int TrainDocs=TrainDIdV.Len();
// prepare mini-experts
int Words=BowDocBs->GetWords();
WinnowMd->PosExpertWgtV.Gen(Words); WinnowMd->PosExpertWgtV.PutAll(1);
WinnowMd->NegExpertWgtV.Gen(Words); WinnowMd->NegExpertWgtV.PutAll(1);
// winnow loop
double PrevAcc=0; double PrevPrec=0; double PrevRec=0; double PrevF1=0;
const double MxDiff=-0.005; const int MxWorseIters=3; int WorseIters=0;
const int MxIters=50; int IterN=0;
while ((IterN<MxIters)&&(WorseIters<MxWorseIters)){
IterN++;
int FalsePos=0; int FalseNeg=0; int TruePos=0; int TrueNeg=0;
for (int DIdN=0; DIdN<TrainDocs; DIdN++){
int DId=TrainDIdV[DIdN];
bool ClassVal=BowDocBs->IsCatInDoc(DId, CId);
double PosWgt=0; double NegWgt=0;
double OldSum=0; double NewSum=0;
int WIds=BowDocBs->GetDocWIds(DId);
// change only experts of words that occur in the document
for (int WIdN=0; WIdN<WIds; WIdN++){
int WId=BowDocBs->GetDocWId(DId, WIdN);
OldSum+=WinnowMd->PosExpertWgtV[WId]+WinnowMd->NegExpertWgtV[WId];
// penalize expert giving wrong class prediction
if (ClassVal){
WinnowMd->NegExpertWgtV[WId]*=Beta;
} else {
WinnowMd->PosExpertWgtV[WId]*=Beta;
}
NewSum+=WinnowMd->PosExpertWgtV[WId]+WinnowMd->NegExpertWgtV[WId];
PosWgt+=WinnowMd->PosExpertWgtV[WId];
NegWgt+=WinnowMd->NegExpertWgtV[WId];
}
// normalize all experts
if (NewSum>MnExpertWgtSum){
for (int WIdN=0; WIdN<WIds; WIdN++){
int WId=BowDocBs->GetDocWId(DId, WIdN);
WinnowMd->PosExpertWgtV[WId]*=OldSum/NewSum;
WinnowMd->NegExpertWgtV[WId]*=OldSum/NewSum;
}
}
bool PredClassVal;
if (PosWgt+NegWgt==0){PredClassVal=TBool::GetRnd();}
else {PredClassVal=(PosWgt/(PosWgt+NegWgt))>WinnowMd->VoteTsh;}
if (PredClassVal==ClassVal){
if (PredClassVal){TruePos++;} else {TrueNeg++;}
} else {
if (PredClassVal){FalsePos++;} else {FalseNeg++;}
}
}
// calculate temporary results
if (TrainDocs==0){break;}
double Acc=0; double Prec=0; double Rec=0; double F1=0;
if (TrainDocs>0){
Acc=100*(TruePos+TrueNeg)/double(TrainDocs);
if (TruePos+FalsePos>0){
Prec=(TruePos/double(TruePos+FalsePos));
Rec=(TruePos/double(TruePos+FalseNeg));
if (Prec+Rec>0){
F1=(2*Prec*Rec/(Prec+Rec));
}
}
}
// check if the current iteration gave worse results then the previous
if (((Acc-PrevAcc)<MxDiff)||((F1-PrevF1)<MxDiff)||(((Prec-PrevPrec)<MxDiff)&&
((Rec-PrevRec)<MxDiff))){WorseIters++;}
else {WorseIters=0;}
PrevAcc=Acc; PrevPrec=Prec; PrevRec=Rec; PrevF1=F1;
printf("%d. Precision:%0.3f Recall:%0.3f F1:%0.3f Accuracy:%0.3f%%\n",
IterN, Prec, Rec, F1, Acc);
}
// return model
return BowMd;
}