本文整理汇总了C++中CFactor::GetDomain方法的典型用法代码示例。如果您正苦于以下问题:C++ CFactor::GetDomain方法的具体用法?C++ CFactor::GetDomain怎么用?C++ CFactor::GetDomain使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CFactor
的用法示例。
在下文中一共展示了CFactor::GetDomain方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Learn
void CBayesLearningEngine::Learn()
{
if(!m_pGrModel)
{
PNL_THROW( CNULLPointer, "no graphical model")
}
CStaticGraphicalModel *grmodel = this->GetStaticModel();
CFactor *factor = NULL;
int numberOfFactors = grmodel->GetNumberOfFactors();
int domainNodes;
if(m_numberOfLearnedEvidences == m_numberOfAllEvidences)
{
PNL_THROW(COutOfRange, "number of unlearned evidences must be positive")
}
int currentEvidNumber;
const CEvidence* pCurrentEvid;
//below code is intended to work on tabular CPD and gaussian CPD
//later we will generalize it for other distribution types
if ((grmodel->GetFactor(0))->GetDistributionType() == dtTabular)
{
for( int ev = m_numberOfLearnedEvidences; ev < m_numberOfAllEvidences; ev++)
{
currentEvidNumber = ev;
pCurrentEvid = m_Vector_pEvidences[currentEvidNumber];
if( !pCurrentEvid)
{
PNL_THROW(CNULLPointer, "evidence")
}
for( domainNodes = 0; domainNodes < numberOfFactors; domainNodes++ )
{
factor = grmodel->GetFactor( domainNodes );
int DomainSize;
const int *domain;
factor->GetDomain( &DomainSize, &domain );
const CEvidence *pEvidences[] = { pCurrentEvid };
CTabularDistribFun* pDistribFun = (CTabularDistribFun*)(factor->GetDistribFun());
pDistribFun->BayesUpdateFactor(pEvidences, 1, domain);
}
}
}
else
{
for( domainNodes = 0; domainNodes < numberOfFactors; domainNodes++ )
示例2: Learn
void CParEMLearningEngine::Learn()
{
CStaticGraphicalModel *pGrModel = this->GetStaticModel();
PNL_CHECK_IS_NULL_POINTER(pGrModel);
PNL_CHECK_LEFT_BORDER(GetNumEv() - GetNumberProcEv() , 1);
CJtreeInfEngine *pCurrentInfEng = NULL;
CFactor *parameter = NULL;
int exit = 0;
int numberOfParameters = pGrModel->GetNumberOfParameters();
int domainNodes;
int infIsNeed = 0;
int itsML = 0;
// !!!
float loglik = -FLT_MAX;
float loglikOld = -FLT_MAX;
float epsilon = GetPrecisionEM();
float stopExpression = epsilon + 1.0f;
int iteration = 0;
int currentEvidNumber;
int bMaximize = 0;
int bSumOnMixtureNode = 0;
const CEvidence* pCurrentEvid;
int start_mpi, finish_mpi;
int NumberOfProcesses, MyRank;
int numSelfEvidences;
MPI_Comm_size(MPI_COMM_WORLD, &NumberOfProcesses);
MPI_Comm_rank(MPI_COMM_WORLD, &MyRank);
int d = 0;
do
{
iteration++;
numSelfEvidences = (GetNumEv() - GetNumberProcEv()) / NumberOfProcesses;
start_mpi = GetNumberProcEv() + numSelfEvidences * MyRank; // !!!
if (MyRank < NumberOfProcesses - 1)
finish_mpi = start_mpi + numSelfEvidences; // !!!
else
finish_mpi = GetNumEv(); // !!!
for(int ev = start_mpi; ev < finish_mpi; ev++)
{
infIsNeed = 0;
currentEvidNumber = ev; // !!!
pCurrentEvid = m_Vector_pEvidences[currentEvidNumber];
if( !pCurrentEvid)
{
PNL_THROW(CNULLPointer, "evidence")
}
infIsNeed = !GetObsFlags(ev)->empty(); // !!!
if(infIsNeed)
{
// create inference engine
if(!pCurrentInfEng)
{
pCurrentInfEng = CJtreeInfEngine::Create(pGrModel);
}
pCurrentInfEng->EnterEvidence(pCurrentEvid, bMaximize,
bSumOnMixtureNode);
}
for(domainNodes = 0; domainNodes < numberOfParameters; domainNodes++)
{
parameter = pGrModel->GetFactor(domainNodes);
if(infIsNeed)
{
int DomainSize;
const int *domain;
parameter->GetDomain(&DomainSize, &domain);
if (IsDomainObserved(DomainSize, domain, currentEvidNumber))
{
const CEvidence *pEvidences[] = { pCurrentEvid };
parameter->UpdateStatisticsML(pEvidences, 1);
}
else
{
pCurrentInfEng->MarginalNodes(domain, DomainSize, 1);
const CPotential * pMargPot = pCurrentInfEng->GetQueryJPD();
parameter ->UpdateStatisticsEM(pMargPot, pCurrentEvid);
}
}
else
{
const CEvidence *pEvidences[] = { pCurrentEvid };
parameter->UpdateStatisticsML(pEvidences, 1);
}
}
itsML = itsML || !infIsNeed;
}
for(domainNodes = 0; domainNodes < numberOfParameters; domainNodes++ )
{
parameter = pGrModel->GetFactor(domainNodes);
//.........这里部分代码省略.........
示例3: LearnOMP
void CParEMLearningEngine::LearnOMP()
{
CStaticGraphicalModel *pGrModel = this->GetStaticModel();
PNL_CHECK_IS_NULL_POINTER(pGrModel);
PNL_CHECK_LEFT_BORDER(GetNumEv() - GetNumberProcEv() , 1);
//omp_set_num_threads(2);
int numberOfThreads = omp_get_num_procs();
//CParPearlInfEngine **pCurrentInfEng = new CParPearlInfEngine*[numberOfThreads];
CJtreeInfEngine **pCurrentInfEng = new CJtreeInfEngine*[numberOfThreads];
for (int i = 0; i < numberOfThreads; i++)
pCurrentInfEng[i] = NULL;
CFactor *parameter1 = NULL;
int exit = 0;
int numberOfParameters = pGrModel->GetNumberOfParameters();
int domainNodes;
//int itsML = 0;
// !!!
float loglik = -FLT_MAX;
float loglikOld = -FLT_MAX;
float epsilon = GetPrecisionEM();
float stopExpression = epsilon + 1.0f;
int iteration = 0;
int ev;
// to create additional factors
CFactor **ppAllFactors = new CFactor*[numberOfParameters*numberOfThreads];
bool *was_updated = new bool[numberOfParameters*numberOfThreads];
int factor;
#pragma omp parallel for private(factor) default(shared)
for (factor = 0; factor < numberOfParameters; factor++)
{
ppAllFactors[factor] = pGrModel->GetFactor(factor);
ppAllFactors[factor]->GetDistribFun()->ClearStatisticalData();
was_updated[factor] = false;
for (int proc = 1; proc < numberOfThreads; proc++)
{
ppAllFactors[factor + proc * numberOfParameters] =
ppAllFactors[factor]->Clone();
ppAllFactors[factor + proc * numberOfParameters]->GetDistribFun()->
ClearStatisticalData();
was_updated[factor + proc * numberOfParameters]= false;
};
};
int* itsML = new int[numberOfThreads];
for (int delta = 0; delta < numberOfThreads; delta++)
{
itsML[delta] = 0;
};
int start_ev, end_ev;
do
{
iteration++;
start_ev = GetNumberProcEv();
end_ev = GetNumEv();
#pragma omp parallel for schedule(dynamic) private(ev)
for (ev = start_ev; ev < end_ev ; ev++)
{
CFactor *parameter = NULL;
int DomainNodes_new;
int bMaximize = 0;
int bSumOnMixtureNode = 0;
int infIsNeed = 0;
int currentEvidNumber = ev; // !!!
const CEvidence* pCurrentEvid = m_Vector_pEvidences[currentEvidNumber];
infIsNeed = !GetObsFlags(ev)->empty(); // !!!
int Num_thread = omp_get_thread_num();
if (infIsNeed)
{
if (!pCurrentInfEng[Num_thread])
{
pCurrentInfEng[Num_thread] = CJtreeInfEngine::Create(
(const CStaticGraphicalModel *)pGrModel);
}
pCurrentInfEng[Num_thread]->EnterEvidence(pCurrentEvid, bMaximize,
bSumOnMixtureNode);
}
for (DomainNodes_new = 0; DomainNodes_new < numberOfParameters;
DomainNodes_new++)
{
parameter = ppAllFactors[DomainNodes_new +
Num_thread * numberOfParameters];
if (infIsNeed)
{
int DomainSize;
const int *domain;
parameter->GetDomain(&DomainSize, &domain);
if (IsDomainObserved(DomainSize, domain, currentEvidNumber))
//.........这里部分代码省略.........
示例4: LearnContMPI
void CParEMLearningEngine::LearnContMPI()
{
CStaticGraphicalModel *pGrModel = this->GetStaticModel();
PNL_CHECK_IS_NULL_POINTER(pGrModel);
PNL_CHECK_LEFT_BORDER(GetNumEv() - GetNumberProcEv() , 1);
CInfEngine *pInfEng = NULL;
pInfEng = CJtreeInfEngine::Create(pGrModel);
float loglik = 0.0f;
int domainNodes;
CFactor *parameter = NULL;
int numberOfParameters = pGrModel->GetNumberOfParameters();
int nFactors = pGrModel->GetNumberOfFactors();
const CEvidence *pEv;
CFactor *pFactor;
int iteration = 0;
int ev;
int i,numSelfEvidences,NumberOfProcesses, MyRank;
int start_mpi, finish_mpi;
MPI_Comm_size(MPI_COMM_WORLD, &NumberOfProcesses);
MPI_Comm_rank(MPI_COMM_WORLD, &MyRank);
if (IsAllObserved())
{
int i;
float **evid = NULL;
EDistributionType dt;
CFactor *factor = NULL;
for (i = 0; i < nFactors; i++)
{
factor = pGrModel->GetFactor(i);
factor->UpdateStatisticsML(&m_Vector_pEvidences[GetNumberProcEv()],
GetNumEv() - GetNumberProcEv());
}
m_critValue.push_back(UpdateModel());
}
else
{
bool bContinue;
const CPotential * pot;
do
{
ClearStatisticData();
iteration++;
numSelfEvidences = (GetNumEv() - GetNumberProcEv()) / NumberOfProcesses;
start_mpi = GetNumberProcEv() + numSelfEvidences * MyRank;
if (MyRank < NumberOfProcesses - 1)
finish_mpi = start_mpi + numSelfEvidences;
else
finish_mpi = GetNumEv();
for(int ev = start_mpi; ev < finish_mpi; ev++)
{
bool bInfIsNeed = !GetObsFlags(ev)->empty();
pEv = m_Vector_pEvidences[ev];
if( bInfIsNeed )
{
pInfEng->EnterEvidence(pEv, 0, 0);
}
int i;
for( i = 0; i < nFactors; i++ )
{
pFactor = pGrModel->GetFactor(i);
int nnodes;
const int * domain;
pFactor->GetDomain( &nnodes, &domain );
if( bInfIsNeed && !IsDomainObserved(nnodes, domain, ev ) )
{
pInfEng->MarginalNodes( domain, nnodes, 1 );
pot = pInfEng->GetQueryJPD();
pFactor->UpdateStatisticsEM( /*pInfEng->GetQueryJPD */ pot, pEv );
}
else
{
pFactor->UpdateStatisticsML( &pEv, 1 );
}
}
}
for(domainNodes = 0; domainNodes < numberOfParameters; domainNodes++ )
{
parameter = pGrModel->GetFactor(domainNodes);
C2DNumericDenseMatrix<float> *matMeanForSending;
C2DNumericDenseMatrix<float> *matCovForSending;
int dataLengthM,dataLengthC;
//.........这里部分代码省略.........
示例5: CInfEngine
CExInfEngine< INF_ENGINE, MODEL, FLAV, FALLBACK_ENGINE1, FALLBACK_ENGINE2 >::CExInfEngine( CStaticGraphicalModel const *gm )
: CInfEngine( itEx, gm ), evidence_mine( false ),
maximize( 0 ), MPE_ev( 0 ), query_JPD( 0 ), graphical_model( gm )
{
int i, j, k;
intVector dom;
intVector conv;
CFactor *fac;
PNL_MAKE_LOCAL( CGraph *, gr, gm, GetGraph() );
PNL_MAKE_LOCAL( int, sz, gr, GetNumberOfNodes() );
gr->GetConnectivityComponents( &decomposition );
for ( i = decomposition.size(); i--; )
{
std::sort( decomposition[i].begin(), decomposition[i].end() );
}
if ( PNL_IS_EXINFENGINEFLAVOUR_UNSORTED( FLAV ) )
{
gr->GetTopologicalOrder( &conv );
}
orig2comp.resize( sz );
orig2idx.resize( sz );
for ( k = 2; k--; )
{
for ( i = decomposition.size(); i--; )
{
for ( j = decomposition[i].size(); j--; )
{
orig2comp[decomposition[i][j]] = i;
orig2idx[decomposition[i][j]] = j;
}
}
if ( PNL_IS_EXINFENGINEFLAVOUR_UNSORTED( FLAV ) && k )
{
for ( i = sz; i--; )
{
decomposition[orig2comp[conv[i]]][orig2idx[conv[i]]] = i;
}
}
else
{
break;
}
}
graphs.resize( decomposition.size() );
models.resize( decomposition.size() );
engines.resize( decomposition.size() );
for ( i = decomposition.size(); i--; )
{
graphs[i] = gr->ExtractSubgraph( decomposition[i] );
#if 0
std::cout << "graph " << i << std::endl;
graphs[i]->Dump();
#endif
}
node_types.resize( decomposition.size() );
node_assoc.resize( decomposition.size() );
for ( i = 0, k = 0; i < decomposition.size(); ++i )
{
node_types[i].resize( decomposition[i].size() );
node_assoc[i].resize( decomposition[i].size() );
for ( j = 0; j < decomposition[i].size(); ++j )
{
node_types[i][j] = *gm->GetNodeType( decomposition[i][j] );
node_assoc[i][j] = j;
}
}
for ( i = decomposition.size(); i--; )
{
models[i] = MODEL::Create( decomposition[i].size(), node_types[i], node_assoc[i], graphs[i] );
}
for ( i = 0; i < gm->GetNumberOfFactors(); ++i )
{
fac = gm->GetFactor( i );
fac->GetDomain( &dom );
#if 0
std::cout << "Ex received orig factor" << std::endl;
fac->GetDistribFun()->Dump();
#endif
k = orig2comp[dom[0]];
for ( j = dom.size(); j--; )
{
dom[j] = orig2idx[dom[j]];
}
fac = CFactor::CopyWithNewDomain( fac, dom, models[k]->GetModelDomain() );
#if 0
std::cout << "Ex mangled it to" << std::endl;
fac->GetDistribFun()->Dump();
#endif
models[k]->AttachFactor( fac );
}
for ( i = decomposition.size(); i--; )
{
//.........这里部分代码省略.........
示例6: Learn
void CEMLearningEngine::Learn()
{
CStaticGraphicalModel *pGrModel = this->GetStaticModel();
PNL_CHECK_IS_NULL_POINTER(pGrModel);
PNL_CHECK_LEFT_BORDER(GetNumEv() - GetNumberProcEv() , 1);
CInfEngine *pInfEng = NULL;
if (m_pInfEngine)
{
pInfEng = m_pInfEngine;
}
else
{
if (!m_bAllObserved)
{
pInfEng = CJtreeInfEngine::Create(pGrModel);
m_pInfEngine = pInfEng;
}
}
float loglik = 0.0f;
int nFactors = pGrModel->GetNumberOfFactors();
const CEvidence *pEv;
CFactor *pFactor;
int iteration = 0;
int ev;
bool IsCastNeed = false;
int i;
for( i = 0; i < nFactors; i++ )
{
pFactor = pGrModel->GetFactor(i);
EDistributionType dt = pFactor->GetDistributionType();
if ( dt == dtSoftMax ) IsCastNeed = true;
}
float ** full_evid = NULL;
if (IsCastNeed)
{
BuildFullEvidenceMatrix(&full_evid);
}
if (IsAllObserved())
{
int i;
float **evid = NULL;
EDistributionType dt;
CFactor *factor = NULL;
for (i = 0; i < nFactors; i++)
{
factor = pGrModel->GetFactor(i);
dt = factor->GetDistributionType();
if (dt != dtSoftMax)
{
factor->UpdateStatisticsML(&m_Vector_pEvidences[GetNumberProcEv()],
GetNumEv() - GetNumberProcEv());
}
else
{
intVector family;
family.resize(0);
pGrModel->GetGraph()->GetParents(i, &family);
family.push_back(i);
CSoftMaxCPD* SoftMaxFactor = static_cast<CSoftMaxCPD*>(factor);
SoftMaxFactor->BuildCurrentEvidenceMatrix(&full_evid,
&evid,family,m_Vector_pEvidences.size());
SoftMaxFactor->InitLearnData();
SoftMaxFactor->SetMaximizingMethod(m_MaximizingMethod);
SoftMaxFactor->MaximumLikelihood(evid, m_Vector_pEvidences.size(),
0.00001f, 0.01f);
SoftMaxFactor->CopyLearnDataToDistrib();
for (int k = 0; k < factor->GetDomainSize(); k++)
{
delete [] evid[k];
}
delete [] evid;
}
}
m_critValue.push_back(UpdateModel());
}
else
{
bool bContinue;
const CPotential * pot;
/* bool IsCastNeed = false;
int i;
for( i = 0; i < nFactors; i++ )
{
pFactor = pGrModel->GetFactor(i);
EDistributionType dt = pFactor->GetDistributionType();
if ( dt == dtSoftMax ) IsCastNeed = true;
}
float ** full_evid;
if (IsCastNeed)
//.........这里部分代码省略.........
示例7: CreateNeighborCPDs
void CStaticStructLearnSEM::CreateNeighborCPDs(CBNet* pBNet,
pCPDVector* vNeighborCPDs, EDGEOPVECTOR* vValidMoves, intVector* RevCorrespDel)
{
CGraph* pGraph = pBNet->GetGraph();
CDAG* pDAG = CDAG::Create(*pGraph);
CModelDomain* pMD = pBNet->GetModelDomain();
intVector vDiscrete, vContinuous;
intVector vAncestor, vDescent;
intVector vMixture, vMix;
const CNodeType* nt;
CFactor* factor;
int i, j, position;
vAncestor.assign(m_vAncestor.begin(), m_vAncestor.end());
vDescent.assign(m_vDescent.begin(), m_vDescent.end());
pBNet->FindMixtureNodes(&vMix);
for(i=0; i<vMix.size(); i++)
{
factor = pBNet->GetFactor(vMix[i]);
j = static_cast<CMixtureGaussianCPD*>(factor) -> GetNumberOfMixtureNode();
vMixture.push_back(j);
}
for(i=0; i<m_nNodes; i++)
{
nt = pMD->GetVariableType(i);
if( nt->IsDiscrete() )
{
vDiscrete.push_back(i);
}
else
vContinuous.push_back(i);
}
vValidMoves->clear();
vNeighborCPDs->clear();
RevCorrespDel->clear();
pDAG->GetAllValidMove(vValidMoves, &vMixture.front(), vMixture.size(), m_nMaxFanIn,
&vDiscrete, &vContinuous, &vDescent, &vAncestor );
int nMoves = vValidMoves->size();
intVector domain;
EDGEOP curMove;
int start, end;
for(i=0; i<nMoves; i++)
{
domain.clear();
curMove = (*vValidMoves)[i];
switch (curMove.DAGChangeType)
{
case DAG_DEL :
start = curMove.originalEdge.startNode;
end = curMove.originalEdge.endNode;
factor = pBNet->GetFactor(end);
factor->GetDomain(&domain);
position = std::find(domain.begin(), domain.end(), start)
- domain.begin();
domain.erase(domain.begin()+position);
vNeighborCPDs->push_back(CreateRandomCPD(domain.size(), &domain.front(), pBNet));
break;
case DAG_ADD :
start = curMove.originalEdge.startNode;
end = curMove.originalEdge.endNode;
factor = pBNet->GetFactor(end);
factor->GetDomain(&domain);
domain.insert(domain.begin(), start);
vNeighborCPDs->push_back(CreateRandomCPD(domain.size(), &domain.front(), pBNet));
break;
case DAG_REV :
end = curMove.originalEdge.startNode;
start = curMove.originalEdge.endNode;
factor = pBNet->GetFactor(end);
factor->GetDomain(&domain);
domain.insert(domain.begin(), start);
vNeighborCPDs->push_back(CreateRandomCPD(domain.size(), &domain.front(), pBNet));
break;
}
}
RevCorrespDel->assign(nMoves, -1);
EDGEOP pre_move;
for(i=0; i<nMoves; i++)
{
curMove = (*vValidMoves)[i];
if(curMove.DAGChangeType == DAG_REV)
{
start = curMove.originalEdge.startNode;
end = curMove.originalEdge.endNode;
for(j=0; j<nMoves; j++)
{
pre_move = (*vValidMoves)[j];
if( (start == pre_move.originalEdge.startNode) &&
(end == pre_move.originalEdge.endNode) &&
(pre_move.DAGChangeType == DAG_DEL) )
{
(*RevCorrespDel)[i] = j;
break;
//.........这里部分代码省略.........