本文整理汇总了C++中Hypothesis类的典型用法代码示例。如果您正苦于以下问题:C++ Hypothesis类的具体用法?C++ Hypothesis怎么用?C++ Hypothesis使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Hypothesis类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: OutputSurface
/***
* print surface factor only for the given phrase
*/
void OutputSurface(std::ostream &out, const Hypothesis &edge, const std::vector<FactorType> &outputFactorOrder,
bool reportSegmentation, bool reportAllFactors)
{
CHECK(outputFactorOrder.size() > 0);
const Phrase& phrase = edge.GetCurrTargetPhrase();
if (reportAllFactors == true) {
out << phrase;
} else {
size_t size = phrase.GetSize();
for (size_t pos = 0 ; pos < size ; pos++) {
const Factor *factor = phrase.GetFactor(pos, outputFactorOrder[0]);
out << *factor;
CHECK(factor);
for (size_t i = 1 ; i < outputFactorOrder.size() ; i++) {
const Factor *factor = phrase.GetFactor(pos, outputFactorOrder[i]);
CHECK(factor);
out << "|" << *factor;
}
out << " ";
}
}
// trace option "-t"
if (reportSegmentation == true && phrase.GetSize() > 0) {
out << "|" << edge.GetCurrSourceWordsRange().GetStartPos()
<< "-" << edge.GetCurrSourceWordsRange().GetEndPos() << "| ";
}
}
示例2: OutputSurface
/***
* print surface factor only for the given phrase
*/
void OutputSurface(std::ostream &out, const Hypothesis &edge, const std::vector<FactorType> &outputFactorOrder,
char reportSegmentation, bool reportAllFactors)
{
CHECK(outputFactorOrder.size() > 0);
const Phrase& phrase = edge.GetCurrTargetPhrase();
bool markUnknown = StaticData::Instance().GetMarkUnknown();
if (reportAllFactors == true) {
out << phrase;
} else {
FactorType placeholderFactor = StaticData::Instance().GetPlaceholderFactor().second;
size_t size = phrase.GetSize();
for (size_t pos = 0 ; pos < size ; pos++) {
const Factor *factor = phrase.GetFactor(pos, outputFactorOrder[0]);
if (placeholderFactor != NOT_FOUND) {
const Factor *origFactor = phrase.GetFactor(pos, placeholderFactor);
if (origFactor) {
factor = origFactor;
}
}
CHECK(factor);
//preface surface form with UNK if marking unknowns
const Word &word = phrase.GetWord(pos);
if(markUnknown && word.IsOOV()) {
out << "UNK" << *factor;
}
else {
out << *factor;
}
for (size_t i = 1 ; i < outputFactorOrder.size() ; i++) {
const Factor *factor = phrase.GetFactor(pos, outputFactorOrder[i]);
CHECK(factor);
out << "|" << *factor;
}
out << " ";
}
}
// trace option "-t" / "-tt"
if (reportSegmentation > 0 && phrase.GetSize() > 0) {
const WordsRange &sourceRange = edge.GetCurrSourceWordsRange();
const int sourceStart = sourceRange.GetStartPos();
const int sourceEnd = sourceRange.GetEndPos();
out << "|" << sourceStart << "-" << sourceEnd;
// enriched "-tt"
if (reportSegmentation == 2) {
out << ",0, ";
const AlignmentInfo &ai = edge.GetCurrTargetPhrase().GetAlignTerm();
OutputAlignment(out, ai, 0, 0);
}
out << "| ";
}
}
示例3: EmptyHypothesisState
void LexicalReordering::EmptyHypothesisState(FFState &state,
const ManagerBase &mgr, const InputType &input,
const Hypothesis &hypo) const
{
BidirectionalReorderingState &stateCast =
static_cast<BidirectionalReorderingState&>(state);
stateCast.Init(NULL, hypo.GetTargetPhrase(), hypo.GetInputPath(), true,
&hypo.GetBitmap());
}
示例4: add_phrase_aln_info
/// add phrase alignment information from a Hypothesis
void
TranslationRequest::
add_phrase_aln_info(Hypothesis const& h, vector<xmlrpc_c::value>& aInfo) const
{
if (!m_withAlignInfo) return;
WordsRange const& trg = h.GetCurrTargetWordsRange();
WordsRange const& src = h.GetCurrSourceWordsRange();
std::map<std::string, xmlrpc_c::value> pAlnInfo;
pAlnInfo["tgt-start"] = xmlrpc_c::value_int(trg.GetStartPos());
pAlnInfo["src-start"] = xmlrpc_c::value_int(src.GetStartPos());
pAlnInfo["src-end"] = xmlrpc_c::value_int(src.GetEndPos());
aInfo.push_back(xmlrpc_c::value_struct(pAlnInfo));
}
示例5: EvaluateWhenApplied
void LanguageModel::EvaluateWhenApplied(const ManagerBase &mgr,
const Hypothesis &hypo, const FFState &prevState, Scores &scores,
FFState &state) const
{
const LMState &prevLMState = static_cast<const LMState &>(prevState);
size_t numWords = prevLMState.numWords;
// context is held backwards
vector<const Factor*> context(numWords);
for (size_t i = 0; i < numWords; ++i) {
context[i] = prevLMState.lastWords[i];
}
//DebugContext(context);
SCORE score = 0;
std::pair<SCORE, void*> fromScoring;
const TargetPhrase<Moses2::Word> &tp = hypo.GetTargetPhrase();
for (size_t i = 0; i < tp.GetSize(); ++i) {
const Word &word = tp[i];
const Factor *factor = word[m_factorType];
ShiftOrPush(context, factor);
fromScoring = Score(context);
score += fromScoring.first;
}
const Bitmap &bm = hypo.GetBitmap();
if (bm.IsComplete()) {
// everything translated
ShiftOrPush(context, m_eos);
fromScoring = Score(context);
score += fromScoring.first;
fromScoring.second = NULL;
context.clear();
} else {
assert(context.size());
if (context.size() == m_order) {
context.resize(context.size() - 1);
}
}
scores.PlusEquals(mgr.system, *this, score);
// return state
//DebugContext(context);
LMState &stateCast = static_cast<LMState&>(state);
MemPool &pool = mgr.GetPool();
stateCast.Set(pool, fromScoring.second, context);
}
示例6: GetFromCacheOrScorePhrase
void GlobalLexicalModel::Evaluate
(const Hypothesis& hypo,
ScoreComponentCollection* accumulator) const
{
accumulator->PlusEquals( this,
GetFromCacheOrScorePhrase(hypo.GetCurrTargetPhrase()) );
}
示例7: generate_hypotheses
void generate_hypotheses(const int order, const Hypothesis & h,
const vector<ME_Model> & vme,
list<Hypothesis> & vh)
{
int n = h.vt.size();
int pred_position = -1;
double min_ent = 999999;
string pred = "";
double pred_prob = 0;
for (int j = 0; j < n; j++) {
if (h.vt[j].cprd != "") continue;
double ent = h.vent[j];
if (ent < min_ent) {
// pred = h.vvp[j].begin()->first;
// pred_prob = h.vvp[j].begin()->second;
min_ent = ent;
pred_position = j;
}
}
assert(pred_position >= 0 && pred_position < n);
for (vector<pair<string, double> >::const_iterator k = h.vvp[pred_position].begin();
k != h.vvp[pred_position].end(); k++) {
Hypothesis newh = h;
newh.vt[pred_position].cprd = k->first;
newh.order[pred_position] = order + 1;
newh.prob = h.prob * k->second;
// if (newh.IsErroneous()) {
// cout << "*errorneous" << endl;
// newh.Print();
// continue;
// }
// update the neighboring predictions
for (int j = pred_position - TAG_WINDOW_SIZE; j <= pred_position + TAG_WINDOW_SIZE; j++) {
if (j < 0 || j > n-1) continue;
if (newh.vt[j].cprd == "") newh.Update(j, vme);
}
vh.push_back(newh);
}
}
示例8: CalculateDistortionScore
float
DistortionScoreProducer::
CalculateDistortionScore(const Hypothesis& hypo,
const Range &prev, const Range &curr, const int FirstGap)
{
// if(!StaticData::Instance().UseEarlyDistortionCost()) {
if(!hypo.GetManager().options()->reordering.use_early_distortion_cost) {
return - (float) hypo.GetInput().ComputeDistortionDistance(prev, curr);
} // else {
/* Pay distortion score as soon as possible, from Moore and Quirk MT Summit 2007
Definitions:
S : current source range
S' : last translated source phrase range
S'' : longest fully-translated initial segment
*/
int prefixEndPos = (int)FirstGap-1;
if((int)FirstGap==-1)
prefixEndPos = -1;
// case1: S is adjacent to S'' => return 0
if ((int) curr.GetStartPos() == prefixEndPos+1) {
IFVERBOSE(4) std::cerr<< "MQ07disto:case1" << std::endl;
return 0;
}
// case2: S is to the left of S' => return 2(length(S))
if ((int) curr.GetEndPos() < (int) prev.GetEndPos()) {
IFVERBOSE(4) std::cerr<< "MQ07disto:case2" << std::endl;
return (float) -2*(int)curr.GetNumWordsCovered();
}
// case3: S' is a subsequence of S'' => return 2(nbWordBetween(S,S'')+length(S))
if ((int) prev.GetEndPos() <= prefixEndPos) {
IFVERBOSE(4) std::cerr<< "MQ07disto:case3" << std::endl;
int z = (int)curr.GetStartPos()-prefixEndPos - 1;
return (float) -2*(z + (int)curr.GetNumWordsCovered());
}
// case4: otherwise => return 2(nbWordBetween(S,S')+length(S))
IFVERBOSE(4) std::cerr<< "MQ07disto:case4" << std::endl;
return (float) -2*((int)curr.GetNumWordsBetween(prev) + (int)curr.GetNumWordsCovered());
}
示例9: combine
double combine(const Hypothesis & a, const Hypothesis & b, Hypothesis & ret) const {
ret.hook = a.hook;
ret.right_side = b.right_side;
for (int i=0;i<a.prev_hyp.size();i++) {
ret.prev_hyp.push_back(a.prev_hyp[i]);
}
ret.prev_hyp.push_back(b.id());
return 0.0;
}
示例10: GetPlaceholders
std::map<size_t, const Factor*> GetPlaceholders(const Hypothesis &hypo, FactorType placeholderFactor)
{
const InputPath &inputPath = hypo.GetTranslationOption().GetInputPath();
const Phrase &inputPhrase = inputPath.GetPhrase();
std::map<size_t, const Factor*> ret;
for (size_t sourcePos = 0; sourcePos < inputPhrase.GetSize(); ++sourcePos) {
const Factor *factor = inputPhrase.GetFactor(sourcePos, placeholderFactor);
if (factor) {
std::set<size_t> targetPos = hypo.GetTranslationOption().GetTargetPhrase().GetAlignTerm().GetAlignmentsForSource(sourcePos);
CHECK(targetPos.size() == 1);
ret[*targetPos.begin()] = factor;
}
}
return ret;
}
示例11:
ControlRecombinationState::ControlRecombinationState(const Hypothesis &hypo, const ControlRecombination &ff)
:m_ff(ff)
{
if (ff.GetType() == SameOutput) {
//UTIL_THROW(util::Exception, "Implemented not yet completed for phrase-based model. Need to take into account the coverage");
hypo.GetOutputPhrase(m_outputPhrase);
} else {
m_hypo = &hypo;
}
}
示例12: Expand
void PhraseBasedReorderingState::Expand(const ManagerBase &mgr,
const LexicalReordering &ff, const Hypothesis &hypo, size_t phraseTableInd,
Scores &scores, FFState &state) const
{
if ((m_direction != LRModel::Forward) || !m_first) {
LRModel const& lrmodel = m_configuration;
Range const &cur = hypo.GetInputPath().range;
LRModel::ReorderingType reoType = (
m_first ?
lrmodel.GetOrientation(cur) :
lrmodel.GetOrientation(prevPath->range, cur));
CopyScores(mgr.system, scores, hypo.GetTargetPhrase(), reoType);
}
PhraseBasedReorderingState &stateCast =
static_cast<PhraseBasedReorderingState&>(state);
stateCast.Init(this, hypo.GetTargetPhrase(), hypo.GetInputPath(), false,
NULL);
}
示例13: GetPlaceholders
std::map<size_t, const Factor*>
Hypothesis::
GetPlaceholders(const Hypothesis &hypo, FactorType placeholderFactor) const
{
const InputPath &inputPath = hypo.GetTranslationOption().GetInputPath();
const Phrase &inputPhrase = inputPath.GetPhrase();
std::map<size_t, const Factor*> ret;
for (size_t sourcePos = 0; sourcePos < inputPhrase.GetSize(); ++sourcePos) {
const Factor *factor = inputPhrase.GetFactor(sourcePos, placeholderFactor);
if (factor) {
std::set<size_t> targetPos = hypo.GetTranslationOption().GetTargetPhrase().GetAlignTerm().GetAlignmentsForSource(sourcePos);
UTIL_THROW_IF2(targetPos.size() != 1,
"Placeholder should be aligned to 1, and only 1, word");
ret[*targetPos.begin()] = factor;
}
}
return ret;
}
示例14: hypothesisToMsg
bool WorldModelROS::hypothesisToMsg(const Hypothesis& hyp, wire_msgs::WorldState& msg) const {
ros::Time time = ros::Time::now();
msg.header.frame_id = world_model_frame_id_;
msg.header.stamp = time;
for(list<SemanticObject*>::const_iterator it = hyp.getObjects().begin(); it != hyp.getObjects().end(); ++it) {
SemanticObject* obj_clone = (*it)->clone();
obj_clone->propagate(time.toSec());
wire_msgs::ObjectState obj_msg;
if (objectToMsg(*obj_clone, obj_msg)) {
msg.objects.push_back(obj_msg);
}
delete obj_clone;
}
return true;
}
示例15: Evaluate
size_t LM::Evaluate(
const Hypothesis& hypo,
size_t prevState,
Scores &scores) const
{
if (m_order <= 1) {
return 0; // not sure if returning NULL is correct
}
if (hypo.targetPhrase.GetSize() == 0) {
return 0; // not sure if returning NULL is correct
}
PhraseVec m_phraseVec(m_order);
const size_t currEndPos = hypo.targetRange.endPos;
const size_t startPos = hypo.targetRange.startPos;
size_t index = 0;
for (int currPos = (int) startPos - (int) m_order + 1 ; currPos <= (int) startPos ; currPos++) {
if (currPos >= 0)
m_phraseVec[index++] = &hypo.GetWord(currPos);
else {
m_phraseVec[index++] = &m_bos;
}
}
SCORE lmScore = GetValueCache(m_phraseVec);
// main loop
size_t endPos = std::min(startPos + m_order - 2
, currEndPos);
for (size_t currPos = startPos + 1 ; currPos <= endPos ; currPos++) {
// shift all args down 1 place
for (size_t i = 0 ; i < m_order - 1 ; i++)
m_phraseVec[i] = m_phraseVec[i + 1];
// add last factor
m_phraseVec.back() = &hypo.GetWord(currPos);
lmScore += GetValueCache(m_phraseVec);
}
// end of sentence
if (hypo.GetCoverage().IsComplete()) {
const size_t size = hypo.GetSize();
m_phraseVec.back() = &m_eos;
for (size_t i = 0 ; i < m_order - 1 ; i ++) {
int currPos = (int)(size - m_order + i + 1);
if (currPos < 0)
m_phraseVec[i] = &m_bos;
else
m_phraseVec[i] = &hypo.GetWord((size_t)currPos);
}
lmScore += GetValueCache(m_phraseVec);
} else {
if (endPos < currEndPos) {
//need to get the LM state (otherwise the last LM state is fine)
for (size_t currPos = endPos+1; currPos <= currEndPos; currPos++) {
for (size_t i = 0 ; i < m_order - 1 ; i++)
m_phraseVec[i] = m_phraseVec[i + 1];
m_phraseVec.back() = &hypo.GetWord(currPos);
}
}
}
size_t state = GetLastState();
return state;
}