本文整理汇总了C++中CFactor::GetDistribFun方法的典型用法代码示例。如果您正苦于以下问题:C++ CFactor::GetDistribFun方法的具体用法?C++ CFactor::GetDistribFun怎么用?C++ CFactor::GetDistribFun使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CFactor
的用法示例。
在下文中一共展示了CFactor::GetDistribFun方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Learn
void CBayesLearningEngine::Learn()
{
if(!m_pGrModel)
{
PNL_THROW( CNULLPointer, "no graphical model")
}
CStaticGraphicalModel *grmodel = this->GetStaticModel();
CFactor *factor = NULL;
int numberOfFactors = grmodel->GetNumberOfFactors();
int domainNodes;
if(m_numberOfLearnedEvidences == m_numberOfAllEvidences)
{
PNL_THROW(COutOfRange, "number of unlearned evidences must be positive")
}
int currentEvidNumber;
const CEvidence* pCurrentEvid;
//below code is intended to work on tabular CPD and gaussian CPD
//later we will generalize it for other distribution types
if ((grmodel->GetFactor(0))->GetDistributionType() == dtTabular)
{
for( int ev = m_numberOfLearnedEvidences; ev < m_numberOfAllEvidences; ev++)
{
currentEvidNumber = ev;
pCurrentEvid = m_Vector_pEvidences[currentEvidNumber];
if( !pCurrentEvid)
{
PNL_THROW(CNULLPointer, "evidence")
}
for( domainNodes = 0; domainNodes < numberOfFactors; domainNodes++ )
{
factor = grmodel->GetFactor( domainNodes );
int DomainSize;
const int *domain;
factor->GetDomain( &DomainSize, &domain );
const CEvidence *pEvidences[] = { pCurrentEvid };
CTabularDistribFun* pDistribFun = (CTabularDistribFun*)(factor->GetDistribFun());
pDistribFun->BayesUpdateFactor(pEvidences, 1, domain);
}
}
}
else
{
for( domainNodes = 0; domainNodes < numberOfFactors; domainNodes++ )
示例2: Learn
//.........这里部分代码省略.........
const int *domain;
parameter->GetDomain(&DomainSize, &domain);
if (IsDomainObserved(DomainSize, domain, currentEvidNumber))
{
const CEvidence *pEvidences[] = { pCurrentEvid };
parameter->UpdateStatisticsML(pEvidences, 1);
}
else
{
pCurrentInfEng->MarginalNodes(domain, DomainSize, 1);
const CPotential * pMargPot = pCurrentInfEng->GetQueryJPD();
parameter ->UpdateStatisticsEM(pMargPot, pCurrentEvid);
}
}
else
{
const CEvidence *pEvidences[] = { pCurrentEvid };
parameter->UpdateStatisticsML(pEvidences, 1);
}
}
itsML = itsML || !infIsNeed;
}
for(domainNodes = 0; domainNodes < numberOfParameters; domainNodes++ )
{
parameter = pGrModel->GetFactor(domainNodes);
CNumericDenseMatrix<float> *matForSending;
int matDim;
const int *pMatRanges;
int dataLength;
const float *pDataForSending;
matForSending = static_cast<CNumericDenseMatrix<float>*>
((parameter->GetDistribFun())->GetStatisticalMatrix(stMatTable));
matForSending->GetRanges(&matDim, &pMatRanges);
matForSending->GetRawData(&dataLength, &pDataForSending);
float *pDataRecv = new float[dataLength];
float *pDataRecv_copy = new float[dataLength];
MPI_Status status;
MPI_Allreduce((void*)pDataForSending, pDataRecv, dataLength, MPI_FLOAT, MPI_SUM,
MPI_COMM_WORLD);
CNumericDenseMatrix<float> *RecvMatrix =
static_cast<CNumericDenseMatrix<float>*>
(parameter->GetDistribFun()->GetStatisticalMatrix(stMatTable));
int dataLength_new;
float *pData_new;
RecvMatrix->GetRawData(&dataLength_new, (const float**)(&pData_new));
for(int t=0;t<dataLength_new;t++)
pData_new[t]=pDataRecv[t];
}
switch (pGrModel->GetModelType())
{
case mtBNet:
{
loglikOld = loglik;
loglik = 0.0f;
for(domainNodes = 0; domainNodes < numberOfParameters; domainNodes++)
{
parameter = pGrModel->GetFactor(domainNodes);
loglik += parameter->ProcessingStatisticalData(m_numberOfAllEvidences);
}
break;
}
case mtMRF2:
case mtMNet:
{
loglikOld = loglik;
loglik = _LearnPotentials();
break;
}
default:
{
PNL_THROW(CBadConst, "model type")
break;
}
}
stopExpression =
float(fabs(2 * (loglikOld - loglik) / (loglikOld + loglik)));
exit = ((stopExpression > epsilon) && (iteration <= GetMaxIterEM())) && !itsML;
if(exit)
{
ClearStatisticData();
}
delete pCurrentInfEng;
pCurrentInfEng = NULL;
}while(exit);
if(iteration > GetMaxIterEM())
{
PNL_THROW(CNotConverged, "maximum number of iterations")
}
SetNumProcEv( GetNumEv() );
}
示例3: LearnContMPI
//.........这里部分代码省略.........
const CPotential * pot;
do
{
ClearStatisticData();
iteration++;
numSelfEvidences = (GetNumEv() - GetNumberProcEv()) / NumberOfProcesses;
start_mpi = GetNumberProcEv() + numSelfEvidences * MyRank;
if (MyRank < NumberOfProcesses - 1)
finish_mpi = start_mpi + numSelfEvidences;
else
finish_mpi = GetNumEv();
for(int ev = start_mpi; ev < finish_mpi; ev++)
{
bool bInfIsNeed = !GetObsFlags(ev)->empty();
pEv = m_Vector_pEvidences[ev];
if( bInfIsNeed )
{
pInfEng->EnterEvidence(pEv, 0, 0);
}
int i;
for( i = 0; i < nFactors; i++ )
{
pFactor = pGrModel->GetFactor(i);
int nnodes;
const int * domain;
pFactor->GetDomain( &nnodes, &domain );
if( bInfIsNeed && !IsDomainObserved(nnodes, domain, ev ) )
{
pInfEng->MarginalNodes( domain, nnodes, 1 );
pot = pInfEng->GetQueryJPD();
pFactor->UpdateStatisticsEM( /*pInfEng->GetQueryJPD */ pot, pEv );
}
else
{
pFactor->UpdateStatisticsML( &pEv, 1 );
}
}
}
for(domainNodes = 0; domainNodes < numberOfParameters; domainNodes++ )
{
parameter = pGrModel->GetFactor(domainNodes);
C2DNumericDenseMatrix<float> *matMeanForSending;
C2DNumericDenseMatrix<float> *matCovForSending;
int dataLengthM,dataLengthC;
const float *pMeanDataForSending;
const float *pCovDataForSending;
matMeanForSending = static_cast<C2DNumericDenseMatrix<float>*>
((parameter->GetDistribFun())->GetStatisticalMatrix(stMatMu));
matMeanForSending->GetRawData(&dataLengthM, &pMeanDataForSending);
matCovForSending = static_cast<C2DNumericDenseMatrix<float>*>
((parameter->GetDistribFun())->GetStatisticalMatrix(stMatSigma));
matCovForSending->GetRawData(&dataLengthC, &pCovDataForSending);
float *pMeanDataRecv = new float[dataLengthM];
float *pCovDataRecv = new float[dataLengthC];
MPI_Status status;
MPI_Allreduce((void*)pMeanDataForSending, pMeanDataRecv, dataLengthM, MPI_FLOAT, MPI_SUM,
MPI_COMM_WORLD);
MPI_Allreduce((void*)pCovDataForSending, pCovDataRecv, dataLengthC, MPI_FLOAT, MPI_SUM,
MPI_COMM_WORLD);
memcpy((void*)pMeanDataForSending,pMeanDataRecv,dataLengthM*sizeof(float));
memcpy((void*)pCovDataForSending,pCovDataRecv,dataLengthC*sizeof(float));
}
loglik = UpdateModel();
if( GetMaxIterEM() != 1)
{
bool flag = iteration == 1 ? true :
(fabs(2*(m_critValue.back()-loglik)/(m_critValue.back() + loglik)) > GetPrecisionEM() );
bContinue = GetMaxIterEM() > iteration && flag;
}
else
{
bContinue = false;
}
m_critValue.push_back(loglik);
}while(bContinue);
}
SetNumProcEv( GetNumEv() );
}
示例4: CInfEngine
CExInfEngine< INF_ENGINE, MODEL, FLAV, FALLBACK_ENGINE1, FALLBACK_ENGINE2 >::CExInfEngine( CStaticGraphicalModel const *gm )
: CInfEngine( itEx, gm ), evidence_mine( false ),
maximize( 0 ), MPE_ev( 0 ), query_JPD( 0 ), graphical_model( gm )
{
int i, j, k;
intVector dom;
intVector conv;
CFactor *fac;
PNL_MAKE_LOCAL( CGraph *, gr, gm, GetGraph() );
PNL_MAKE_LOCAL( int, sz, gr, GetNumberOfNodes() );
gr->GetConnectivityComponents( &decomposition );
for ( i = decomposition.size(); i--; )
{
std::sort( decomposition[i].begin(), decomposition[i].end() );
}
if ( PNL_IS_EXINFENGINEFLAVOUR_UNSORTED( FLAV ) )
{
gr->GetTopologicalOrder( &conv );
}
orig2comp.resize( sz );
orig2idx.resize( sz );
for ( k = 2; k--; )
{
for ( i = decomposition.size(); i--; )
{
for ( j = decomposition[i].size(); j--; )
{
orig2comp[decomposition[i][j]] = i;
orig2idx[decomposition[i][j]] = j;
}
}
if ( PNL_IS_EXINFENGINEFLAVOUR_UNSORTED( FLAV ) && k )
{
for ( i = sz; i--; )
{
decomposition[orig2comp[conv[i]]][orig2idx[conv[i]]] = i;
}
}
else
{
break;
}
}
graphs.resize( decomposition.size() );
models.resize( decomposition.size() );
engines.resize( decomposition.size() );
for ( i = decomposition.size(); i--; )
{
graphs[i] = gr->ExtractSubgraph( decomposition[i] );
#if 0
std::cout << "graph " << i << std::endl;
graphs[i]->Dump();
#endif
}
node_types.resize( decomposition.size() );
node_assoc.resize( decomposition.size() );
for ( i = 0, k = 0; i < decomposition.size(); ++i )
{
node_types[i].resize( decomposition[i].size() );
node_assoc[i].resize( decomposition[i].size() );
for ( j = 0; j < decomposition[i].size(); ++j )
{
node_types[i][j] = *gm->GetNodeType( decomposition[i][j] );
node_assoc[i][j] = j;
}
}
for ( i = decomposition.size(); i--; )
{
models[i] = MODEL::Create( decomposition[i].size(), node_types[i], node_assoc[i], graphs[i] );
}
for ( i = 0; i < gm->GetNumberOfFactors(); ++i )
{
fac = gm->GetFactor( i );
fac->GetDomain( &dom );
#if 0
std::cout << "Ex received orig factor" << std::endl;
fac->GetDistribFun()->Dump();
#endif
k = orig2comp[dom[0]];
for ( j = dom.size(); j--; )
{
dom[j] = orig2idx[dom[j]];
}
fac = CFactor::CopyWithNewDomain( fac, dom, models[k]->GetModelDomain() );
#if 0
std::cout << "Ex mangled it to" << std::endl;
fac->GetDistribFun()->Dump();
#endif
models[k]->AttachFactor( fac );
}
for ( i = decomposition.size(); i--; )
{
//.........这里部分代码省略.........
示例5: _LearnPotentials
float CMlLearningEngine::_LearnPotentials()
{
int iteration = 1;
float log_lik = 0.0f;
CStaticGraphicalModel *grmodel = this->GetStaticModel();
CFactor *parameter = NULL;
float epsilon = m_precisionIPF;
const CPotential *joint_prob = NULL;
CPotential *clique_jpd = NULL;
CMatrix<float> *itogMatrix;
CInfEngine *m_pInfEngine =
CNaiveInfEngine::Create(grmodel);
intVector obsNodes(0);
valueVector obsValues(0);
CEvidence *emptyEvidence = CEvidence::Create(grmodel->GetModelDomain(), obsNodes, obsValues);
m_pInfEngine -> EnterEvidence( emptyEvidence );
int querySize = grmodel->GetNumberOfNodes();
int *query;
query = new int [querySize];
int i;
for( i = 0; i < querySize; i++ )
{
query[i] = i;
}
m_pInfEngine -> MarginalNodes( query, querySize );
joint_prob = m_pInfEngine->GetQueryJPD();
CPotential *itog_joint_prob =
static_cast<CPotential *>(joint_prob ->Marginalize(query, querySize));
int DomainSize;
const int *domain;
potsPVector learn_factors;
CPotential *tmp_factor;
for (i = 0; i < grmodel -> GetNumberOfFactors(); i++)
{
factor = grmodel -> GetFactor(i);
factor -> GetDomain( &DomainSize, &domain );
CDistribFun *correspData= factor -> GetDistribFun();
CMatrix<float> *learnMatrix = correspData ->
GetStatisticalMatrix(stMatTable);
CPotential *factor = CTabularPotential::Create( domain, DomainSize,
parameter->GetModelDomain());
learn_factors.push_back(factor);
learn_factors[i] -> AttachMatrix(learnMatrix->NormalizeAll(), matTable);
}
int data_length;
float *old_itog_data = NULL;
const float *itog_data;
delete [] query;
int convergence = 0;
while( !convergence && (iteration <= m_maxIterIPF))
{
iteration++;
itogMatrix = (itog_joint_prob->GetDistribFun())
-> GetMatrix(matTable);
static_cast<CNumericDenseMatrix<float>*>(itogMatrix)->
GetRawData(&data_length, &itog_data);
old_itog_data = new float[data_length];
for( i = 0; i < data_length; i++)
{
old_itog_data[i] = itog_data[i];
}
for( int clique = 0; clique < grmodel->GetNumberOfFactors(); clique++)
{
factor = grmodel -> GetFactor(clique);
factor -> GetDomain( &DomainSize, &domain );
clique_jpd = static_cast<CPotential *>
(itog_joint_prob -> Marginalize( domain, DomainSize ));
tmp_factor = itog_joint_prob -> Multiply(learn_factors[clique]);
delete (itog_joint_prob);
itog_joint_prob = tmp_factor;
tmp_factor = itog_joint_prob -> Divide(clique_jpd);
delete (itog_joint_prob);
delete (clique_jpd);
itog_joint_prob = tmp_factor;
}
itogMatrix = (itog_joint_prob->GetDistribFun())
-> GetMatrix(matTable);
static_cast<CNumericDenseMatrix<float>*>(itogMatrix)->
GetRawData(&data_length, &itog_data);
convergence = true;
for (int j = 0; j < data_length; j++)
{
if( fabs( itog_data[j] - old_itog_data[j] ) > epsilon)
{
//.........这里部分代码省略.........