本文整理汇总了C++中TIntFltH::Defrag方法的典型用法代码示例。如果您正苦于以下问题:C++ TIntFltH::Defrag方法的具体用法?C++ TIntFltH::Defrag怎么用?C++ TIntFltH::Defrag使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TIntFltH
的用法示例。
在下文中一共展示了TIntFltH::Defrag方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: MLEGradAscentParallel
int TAGMFast::MLEGradAscentParallel(const double& Thres, const int& MaxIter, const int ChunkNum, const int ChunkSize, const TStr PlotNm, const double StepAlpha, const double StepBeta) {
//parallel
time_t InitTime = time(NULL);
uint64 StartTm = TSecTm::GetCurTm().GetAbsSecs();
TExeTm ExeTm, CheckTm;
double PrevL = Likelihood(true);
TIntFltPrV IterLV;
int PrevIter = 0;
int iter = 0;
TIntV NIdxV(F.Len(), 0);
for (int i = 0; i < F.Len(); i++) { NIdxV.Add(i); }
TIntV NIDOPTV(F.Len()); //check if a node needs optimization or not 1: does not require optimization
NIDOPTV.PutAll(0);
TVec<TIntFltH> NewF(ChunkNum * ChunkSize);
TIntV NewNIDV(ChunkNum * ChunkSize);
for (iter = 0; iter < MaxIter; iter++) {
NIdxV.Clr(false);
for (int i = 0; i < F.Len(); i++) {
if (NIDOPTV[i] == 0) { NIdxV.Add(i); }
}
IAssert (NIdxV.Len() <= F.Len());
NIdxV.Shuffle(Rnd);
// compute gradient for chunk of nodes
#pragma omp parallel for schedule(static, 1)
for (int TIdx = 0; TIdx < ChunkNum; TIdx++) {
TIntFltH GradV;
for (int ui = TIdx * ChunkSize; ui < (TIdx + 1) * ChunkSize; ui++) {
NewNIDV[ui] = -1;
if (ui > NIdxV.Len()) { continue; }
int u = NIdxV[ui]; //
//find set of candidate c (we only need to consider c to which a neighbor of u belongs to)
TUNGraph::TNodeI UI = G->GetNI(u);
TIntSet CIDSet(5 * UI.GetDeg());
TIntFltH CurFU = F[u];
for (int e = 0; e < UI.GetDeg(); e++) {
if (HOVIDSV[u].IsKey(UI.GetNbrNId(e))) { continue; }
TIntFltH& NbhCIDH = F[UI.GetNbrNId(e)];
for (TIntFltH::TIter CI = NbhCIDH.BegI(); CI < NbhCIDH.EndI(); CI++) {
CIDSet.AddKey(CI.GetKey());
}
}
if (CIDSet.Empty()) {
CurFU.Clr();
}
else {
for (TIntFltH::TIter CI = CurFU.BegI(); CI < CurFU.EndI(); CI++) { //remove the community membership which U does not share with its neighbors
if (! CIDSet.IsKey(CI.GetKey())) {
CurFU.DelIfKey(CI.GetKey());
}
}
GradientForRow(u, GradV, CIDSet);
if (Norm2(GradV) < 1e-4) { NIDOPTV[u] = 1; continue; }
double LearnRate = GetStepSizeByLineSearch(u, GradV, GradV, StepAlpha, StepBeta, 5);
if (LearnRate <= 1e-5) { NewNIDV[ui] = -2; continue; }
for (int ci = 0; ci < GradV.Len(); ci++) {
int CID = GradV.GetKey(ci);
double Change = LearnRate * GradV.GetDat(CID);
double NewFuc = CurFU.IsKey(CID)? CurFU.GetDat(CID) + Change : Change;
if (NewFuc <= 0.0) {
CurFU.DelIfKey(CID);
} else {
CurFU.AddDat(CID) = NewFuc;
}
}
CurFU.Defrag();
}
//store changes
NewF[ui] = CurFU;
NewNIDV[ui] = u;
}
}
int NumNoChangeGrad = 0;
int NumNoChangeStepSize = 0;
for (int ui = 0; ui < NewNIDV.Len(); ui++) {
int NewNID = NewNIDV[ui];
if (NewNID == -1) { NumNoChangeGrad++; continue; }
if (NewNID == -2) { NumNoChangeStepSize++; continue; }
for (TIntFltH::TIter CI = F[NewNID].BegI(); CI < F[NewNID].EndI(); CI++) {
SumFV[CI.GetKey()] -= CI.GetDat();
}
}
#pragma omp parallel for
for (int ui = 0; ui < NewNIDV.Len(); ui++) {
int NewNID = NewNIDV[ui];
if (NewNID < 0) { continue; }
F[NewNID] = NewF[ui];
}
for (int ui = 0; ui < NewNIDV.Len(); ui++) {
int NewNID = NewNIDV[ui];
if (NewNID < 0) { continue; }
for (TIntFltH::TIter CI = F[NewNID].BegI(); CI < F[NewNID].EndI(); CI++) {
SumFV[CI.GetKey()] += CI.GetDat();
}
}
// update the nodes who are optimal
for (int ui = 0; ui < NewNIDV.Len(); ui++) {
int NewNID = NewNIDV[ui];
if (NewNID < 0) { continue; }
TUNGraph::TNodeI UI = G->GetNI(NewNID);
NIDOPTV[NewNID] = 0;
//.........这里部分代码省略.........