本文整理汇总了C++中CFactor::UpdateStatisticsML方法的典型用法代码示例。如果您正苦于以下问题:C++ CFactor::UpdateStatisticsML方法的具体用法?C++ CFactor::UpdateStatisticsML怎么用?C++ CFactor::UpdateStatisticsML使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CFactor
的用法示例。
在下文中一共展示了CFactor::UpdateStatisticsML方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Learn
void CParEMLearningEngine::Learn()
{
CStaticGraphicalModel *pGrModel = this->GetStaticModel();
PNL_CHECK_IS_NULL_POINTER(pGrModel);
PNL_CHECK_LEFT_BORDER(GetNumEv() - GetNumberProcEv() , 1);
CJtreeInfEngine *pCurrentInfEng = NULL;
CFactor *parameter = NULL;
int exit = 0;
int numberOfParameters = pGrModel->GetNumberOfParameters();
int domainNodes;
int infIsNeed = 0;
int itsML = 0;
// !!!
float loglik = -FLT_MAX;
float loglikOld = -FLT_MAX;
float epsilon = GetPrecisionEM();
float stopExpression = epsilon + 1.0f;
int iteration = 0;
int currentEvidNumber;
int bMaximize = 0;
int bSumOnMixtureNode = 0;
const CEvidence* pCurrentEvid;
int start_mpi, finish_mpi;
int NumberOfProcesses, MyRank;
int numSelfEvidences;
MPI_Comm_size(MPI_COMM_WORLD, &NumberOfProcesses);
MPI_Comm_rank(MPI_COMM_WORLD, &MyRank);
int d = 0;
do
{
iteration++;
numSelfEvidences = (GetNumEv() - GetNumberProcEv()) / NumberOfProcesses;
start_mpi = GetNumberProcEv() + numSelfEvidences * MyRank; // !!!
if (MyRank < NumberOfProcesses - 1)
finish_mpi = start_mpi + numSelfEvidences; // !!!
else
finish_mpi = GetNumEv(); // !!!
for(int ev = start_mpi; ev < finish_mpi; ev++)
{
infIsNeed = 0;
currentEvidNumber = ev; // !!!
pCurrentEvid = m_Vector_pEvidences[currentEvidNumber];
if( !pCurrentEvid)
{
PNL_THROW(CNULLPointer, "evidence")
}
infIsNeed = !GetObsFlags(ev)->empty(); // !!!
if(infIsNeed)
{
// create inference engine
if(!pCurrentInfEng)
{
pCurrentInfEng = CJtreeInfEngine::Create(pGrModel);
}
pCurrentInfEng->EnterEvidence(pCurrentEvid, bMaximize,
bSumOnMixtureNode);
}
for(domainNodes = 0; domainNodes < numberOfParameters; domainNodes++)
{
parameter = pGrModel->GetFactor(domainNodes);
if(infIsNeed)
{
int DomainSize;
const int *domain;
parameter->GetDomain(&DomainSize, &domain);
if (IsDomainObserved(DomainSize, domain, currentEvidNumber))
{
const CEvidence *pEvidences[] = { pCurrentEvid };
parameter->UpdateStatisticsML(pEvidences, 1);
}
else
{
pCurrentInfEng->MarginalNodes(domain, DomainSize, 1);
const CPotential * pMargPot = pCurrentInfEng->GetQueryJPD();
parameter ->UpdateStatisticsEM(pMargPot, pCurrentEvid);
}
}
else
{
const CEvidence *pEvidences[] = { pCurrentEvid };
parameter->UpdateStatisticsML(pEvidences, 1);
}
}
itsML = itsML || !infIsNeed;
}
for(domainNodes = 0; domainNodes < numberOfParameters; domainNodes++ )
{
parameter = pGrModel->GetFactor(domainNodes);
//.........这里部分代码省略.........
示例2: LearnOMP
//.........这里部分代码省略.........
int bSumOnMixtureNode = 0;
int infIsNeed = 0;
int currentEvidNumber = ev; // !!!
const CEvidence* pCurrentEvid = m_Vector_pEvidences[currentEvidNumber];
infIsNeed = !GetObsFlags(ev)->empty(); // !!!
int Num_thread = omp_get_thread_num();
if (infIsNeed)
{
if (!pCurrentInfEng[Num_thread])
{
pCurrentInfEng[Num_thread] = CJtreeInfEngine::Create(
(const CStaticGraphicalModel *)pGrModel);
}
pCurrentInfEng[Num_thread]->EnterEvidence(pCurrentEvid, bMaximize,
bSumOnMixtureNode);
}
for (DomainNodes_new = 0; DomainNodes_new < numberOfParameters;
DomainNodes_new++)
{
parameter = ppAllFactors[DomainNodes_new +
Num_thread * numberOfParameters];
if (infIsNeed)
{
int DomainSize;
const int *domain;
parameter->GetDomain(&DomainSize, &domain);
if (IsDomainObserved(DomainSize, domain, currentEvidNumber))
{
const CEvidence *pEvidences[] = { pCurrentEvid };
parameter->UpdateStatisticsML(pEvidences, 1);
was_updated[DomainNodes_new+Num_thread*numberOfParameters]= true;
}
else
{
pCurrentInfEng[Num_thread]->MarginalNodes(domain, DomainSize, 1);
const CPotential * pMargPot =
pCurrentInfEng[Num_thread]->GetQueryJPD();
parameter ->UpdateStatisticsEM(pMargPot, pCurrentEvid);
was_updated[DomainNodes_new+Num_thread*numberOfParameters]= true;
}
}
else
{
const CEvidence *pEvidences[] = { pCurrentEvid };
parameter->UpdateStatisticsML(pEvidences, 1);
was_updated[DomainNodes_new+Num_thread*numberOfParameters]= true;
}
}
itsML[Num_thread] = itsML[Num_thread] || !infIsNeed;
} // end of parallel for
for (int delta = 1; delta < numberOfThreads; delta++)
{
itsML[0] = itsML[0] || itsML[delta];
};
//to join factors
#pragma omp parallel for private(factor) default(shared)
for (factor = 0; factor < numberOfParameters; factor++)
{
for (int proc = 1; proc < numberOfThreads; proc++)
{
示例3: LearnExtraCPDs
void CEMLearningEngine::LearnExtraCPDs(int nMaxFamily, pCPDVector* additionalCPDs, floatVector* additionalLLs)
{
CStaticGraphicalModel *pGrModel = this->GetStaticModel();
PNL_CHECK_IS_NULL_POINTER(pGrModel);
PNL_CHECK_LEFT_BORDER(GetNumEv(), 1);
int numberOfFactors = pGrModel->GetNumberOfFactors();
int numberOfAddFactors = additionalCPDs->size();
additionalLLs->resize(numberOfAddFactors);
additionalLLs->clear();
m_vFamilyLogLik.resize(numberOfFactors);
float loglik = 0.0f, ll;
int i, ev;
int iteration = 0;
const CEvidence* pEv;
CFactor *factor = NULL;
int nnodes;
const int * domain;
bool bInfIsNeed;
CInfEngine *pInfEng = m_pInfEngine;
if (IsAllObserved())
{
for (i = 0; i < numberOfFactors; i++)
{
factor = pGrModel->GetFactor(i);
factor->UpdateStatisticsML(&m_Vector_pEvidences[GetNumberProcEv()],
GetNumEv() - GetNumberProcEv());
}
for( ev = 0; ev < GetNumEv() ; ev++)
{
pEv = m_Vector_pEvidences[ev];
for( i = 0; i < numberOfAddFactors; i++ )
{
factor = static_cast<CFactor*>((*additionalCPDs)[i]);
factor->UpdateStatisticsML( &pEv, 1 );
}
}
switch (pGrModel->GetModelType())
{
case mtBNet:
{
for( i = 0; i<numberOfFactors; i++ )
{
factor = pGrModel->GetFactor(i);
ll = factor->ProcessingStatisticalData( GetNumEv());
m_vFamilyLogLik[i] = ll;
loglik += ll;
}
for( i = 0; i < numberOfAddFactors; i++ )
{
factor = static_cast<CFactor*>((*additionalCPDs)[i]);
ll = factor->ProcessingStatisticalData( GetNumEv());
(*additionalLLs)[i] = ll;
}
break;
}
case mtMRF2:
case mtMNet:
{
break;
}
default:
{
PNL_THROW(CBadConst, "model type" )
break;
}
}
m_critValue.push_back(loglik);
}
else
{
示例4: LearnContMPI
void CParEMLearningEngine::LearnContMPI()
{
CStaticGraphicalModel *pGrModel = this->GetStaticModel();
PNL_CHECK_IS_NULL_POINTER(pGrModel);
PNL_CHECK_LEFT_BORDER(GetNumEv() - GetNumberProcEv() , 1);
CInfEngine *pInfEng = NULL;
pInfEng = CJtreeInfEngine::Create(pGrModel);
float loglik = 0.0f;
int domainNodes;
CFactor *parameter = NULL;
int numberOfParameters = pGrModel->GetNumberOfParameters();
int nFactors = pGrModel->GetNumberOfFactors();
const CEvidence *pEv;
CFactor *pFactor;
int iteration = 0;
int ev;
int i,numSelfEvidences,NumberOfProcesses, MyRank;
int start_mpi, finish_mpi;
MPI_Comm_size(MPI_COMM_WORLD, &NumberOfProcesses);
MPI_Comm_rank(MPI_COMM_WORLD, &MyRank);
if (IsAllObserved())
{
int i;
float **evid = NULL;
EDistributionType dt;
CFactor *factor = NULL;
for (i = 0; i < nFactors; i++)
{
factor = pGrModel->GetFactor(i);
factor->UpdateStatisticsML(&m_Vector_pEvidences[GetNumberProcEv()],
GetNumEv() - GetNumberProcEv());
}
m_critValue.push_back(UpdateModel());
}
else
{
bool bContinue;
const CPotential * pot;
do
{
ClearStatisticData();
iteration++;
numSelfEvidences = (GetNumEv() - GetNumberProcEv()) / NumberOfProcesses;
start_mpi = GetNumberProcEv() + numSelfEvidences * MyRank;
if (MyRank < NumberOfProcesses - 1)
finish_mpi = start_mpi + numSelfEvidences;
else
finish_mpi = GetNumEv();
for(int ev = start_mpi; ev < finish_mpi; ev++)
{
bool bInfIsNeed = !GetObsFlags(ev)->empty();
pEv = m_Vector_pEvidences[ev];
if( bInfIsNeed )
{
pInfEng->EnterEvidence(pEv, 0, 0);
}
int i;
for( i = 0; i < nFactors; i++ )
{
pFactor = pGrModel->GetFactor(i);
int nnodes;
const int * domain;
pFactor->GetDomain( &nnodes, &domain );
if( bInfIsNeed && !IsDomainObserved(nnodes, domain, ev ) )
{
pInfEng->MarginalNodes( domain, nnodes, 1 );
pot = pInfEng->GetQueryJPD();
pFactor->UpdateStatisticsEM( /*pInfEng->GetQueryJPD */ pot, pEv );
}
else
{
pFactor->UpdateStatisticsML( &pEv, 1 );
}
}
}
for(domainNodes = 0; domainNodes < numberOfParameters; domainNodes++ )
{
parameter = pGrModel->GetFactor(domainNodes);
C2DNumericDenseMatrix<float> *matMeanForSending;
C2DNumericDenseMatrix<float> *matCovForSending;
int dataLengthM,dataLengthC;
//.........这里部分代码省略.........
示例5: Learn
void CEMLearningEngine::Learn()
{
CStaticGraphicalModel *pGrModel = this->GetStaticModel();
PNL_CHECK_IS_NULL_POINTER(pGrModel);
PNL_CHECK_LEFT_BORDER(GetNumEv() - GetNumberProcEv() , 1);
CInfEngine *pInfEng = NULL;
if (m_pInfEngine)
{
pInfEng = m_pInfEngine;
}
else
{
if (!m_bAllObserved)
{
pInfEng = CJtreeInfEngine::Create(pGrModel);
m_pInfEngine = pInfEng;
}
}
float loglik = 0.0f;
int nFactors = pGrModel->GetNumberOfFactors();
const CEvidence *pEv;
CFactor *pFactor;
int iteration = 0;
int ev;
bool IsCastNeed = false;
int i;
for( i = 0; i < nFactors; i++ )
{
pFactor = pGrModel->GetFactor(i);
EDistributionType dt = pFactor->GetDistributionType();
if ( dt == dtSoftMax ) IsCastNeed = true;
}
float ** full_evid = NULL;
if (IsCastNeed)
{
BuildFullEvidenceMatrix(&full_evid);
}
if (IsAllObserved())
{
int i;
float **evid = NULL;
EDistributionType dt;
CFactor *factor = NULL;
for (i = 0; i < nFactors; i++)
{
factor = pGrModel->GetFactor(i);
dt = factor->GetDistributionType();
if (dt != dtSoftMax)
{
factor->UpdateStatisticsML(&m_Vector_pEvidences[GetNumberProcEv()],
GetNumEv() - GetNumberProcEv());
}
else
{
intVector family;
family.resize(0);
pGrModel->GetGraph()->GetParents(i, &family);
family.push_back(i);
CSoftMaxCPD* SoftMaxFactor = static_cast<CSoftMaxCPD*>(factor);
SoftMaxFactor->BuildCurrentEvidenceMatrix(&full_evid,
&evid,family,m_Vector_pEvidences.size());
SoftMaxFactor->InitLearnData();
SoftMaxFactor->SetMaximizingMethod(m_MaximizingMethod);
SoftMaxFactor->MaximumLikelihood(evid, m_Vector_pEvidences.size(),
0.00001f, 0.01f);
SoftMaxFactor->CopyLearnDataToDistrib();
for (int k = 0; k < factor->GetDomainSize(); k++)
{
delete [] evid[k];
}
delete [] evid;
}
}
m_critValue.push_back(UpdateModel());
}
else
{
bool bContinue;
const CPotential * pot;
/* bool IsCastNeed = false;
int i;
for( i = 0; i < nFactors; i++ )
{
pFactor = pGrModel->GetFactor(i);
EDistributionType dt = pFactor->GetDistributionType();
if ( dt == dtSoftMax ) IsCastNeed = true;
}
float ** full_evid;
if (IsCastNeed)
//.........这里部分代码省略.........