本文整理汇总了C++中Hypothesis::GetTargetPhrase方法的典型用法代码示例。如果您正苦于以下问题:C++ Hypothesis::GetTargetPhrase方法的具体用法?C++ Hypothesis::GetTargetPhrase怎么用?C++ Hypothesis::GetTargetPhrase使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Hypothesis
的用法示例。
在下文中一共展示了Hypothesis::GetTargetPhrase方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: EmptyHypothesisState
void LexicalReordering::EmptyHypothesisState(FFState &state,
const ManagerBase &mgr, const InputType &input,
const Hypothesis &hypo) const
{
BidirectionalReorderingState &stateCast =
static_cast<BidirectionalReorderingState&>(state);
stateCast.Init(NULL, hypo.GetTargetPhrase(), hypo.GetInputPath(), true,
&hypo.GetBitmap());
}
示例2: Expand
void PhraseBasedReorderingState::Expand(const ManagerBase &mgr,
const LexicalReordering &ff, const Hypothesis &hypo, size_t phraseTableInd,
Scores &scores, FFState &state) const
{
if ((m_direction != LRModel::Forward) || !m_first) {
LRModel const& lrmodel = m_configuration;
Range const &cur = hypo.GetInputPath().range;
LRModel::ReorderingType reoType = (
m_first ?
lrmodel.GetOrientation(cur) :
lrmodel.GetOrientation(prevPath->range, cur));
CopyScores(mgr.system, scores, hypo.GetTargetPhrase(), reoType);
}
PhraseBasedReorderingState &stateCast =
static_cast<PhraseBasedReorderingState&>(state);
stateCast.Init(this, hypo.GetTargetPhrase(), hypo.GetInputPath(), false,
NULL);
}
示例3: EvaluateWhenApplied
void LanguageModel::EvaluateWhenApplied(const ManagerBase &mgr,
const Hypothesis &hypo, const FFState &prevState, Scores &scores,
FFState &state) const
{
const LMState &prevLMState = static_cast<const LMState &>(prevState);
size_t numWords = prevLMState.numWords;
// context is held backwards
vector<const Factor*> context(numWords);
for (size_t i = 0; i < numWords; ++i) {
context[i] = prevLMState.lastWords[i];
}
//DebugContext(context);
SCORE score = 0;
std::pair<SCORE, void*> fromScoring;
const TargetPhrase<Moses2::Word> &tp = hypo.GetTargetPhrase();
for (size_t i = 0; i < tp.GetSize(); ++i) {
const Word &word = tp[i];
const Factor *factor = word[m_factorType];
ShiftOrPush(context, factor);
fromScoring = Score(context);
score += fromScoring.first;
}
const Bitmap &bm = hypo.GetBitmap();
if (bm.IsComplete()) {
// everything translated
ShiftOrPush(context, m_eos);
fromScoring = Score(context);
score += fromScoring.first;
fromScoring.second = NULL;
context.clear();
} else {
assert(context.size());
if (context.size() == m_order) {
context.resize(context.size() - 1);
}
}
scores.PlusEquals(mgr.system, *this, score);
// return state
//DebugContext(context);
LMState &stateCast = static_cast<LMState&>(state);
MemPool &pool = mgr.GetPool();
stateCast.Set(pool, fromScoring.second, context);
}
示例4: TranslateID
void KENLM<Model>::EvaluateWhenApplied(const ManagerBase &mgr,
const Hypothesis &hypo, const FFState &prevState, Scores &scores,
FFState &state) const
{
KenLMState &stateCast = static_cast<KenLMState&>(state);
const System &system = mgr.system;
const lm::ngram::State &in_state =
static_cast<const KenLMState&>(prevState).state;
if (!hypo.GetTargetPhrase().GetSize()) {
stateCast.state = in_state;
return;
}
const std::size_t begin = hypo.GetCurrTargetWordsRange().GetStartPos();
//[begin, end) in STL-like fashion.
const std::size_t end = hypo.GetCurrTargetWordsRange().GetEndPos() + 1;
const std::size_t adjust_end = std::min(end, begin + m_ngram->Order() - 1);
std::size_t position = begin;
typename Model::State aux_state;
typename Model::State *state0 = &stateCast.state, *state1 = &aux_state;
float score = m_ngram->Score(in_state, TranslateID(hypo.GetWord(position)),
*state0);
++position;
for (; position < adjust_end; ++position) {
score += m_ngram->Score(*state0, TranslateID(hypo.GetWord(position)),
*state1);
std::swap(state0, state1);
}
if (hypo.GetBitmap().IsComplete()) {
// Score end of sentence.
std::vector<lm::WordIndex> indices(m_ngram->Order() - 1);
const lm::WordIndex *last = LastIDs(hypo, &indices.front());
score += m_ngram->FullScoreForgotState(&indices.front(), last,
m_ngram->GetVocabulary().EndSentence(), stateCast.state).prob;
} else if (adjust_end < end) {
// Get state after adding a long phrase.
std::vector<lm::WordIndex> indices(m_ngram->Order() - 1);
const lm::WordIndex *last = LastIDs(hypo, &indices.front());
m_ngram->GetState(&indices.front(), last, stateCast.state);
} else if (state0 != &stateCast.state) {
// Short enough phrase that we can just reuse the state.
stateCast.state = *state0;
}
score = TransformLMScore(score);
bool OOVFeatureEnabled = false;
if (OOVFeatureEnabled) {
std::vector<float> scoresVec(2);
scoresVec[0] = score;
scoresVec[1] = 0.0;
scores.PlusEquals(system, *this, scoresVec);
} else {
scores.PlusEquals(system, *this, score);
}
}