本文整理汇总了C++中TargetPhrase::SetAlignTerm方法的典型用法代码示例。如果您正苦于以下问题:C++ TargetPhrase::SetAlignTerm方法的具体用法?C++ TargetPhrase::SetAlignTerm怎么用?C++ TargetPhrase::SetAlignTerm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TargetPhrase
的用法示例。
在下文中一共展示了TargetPhrase::SetAlignTerm方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: DecodeCollection
//.........这里部分代码省略.........
{
Phrase subPhrase = sourcePhrase.GetSubString(WordsRange(srcStart, srcEnd));
subTpv = CreateTargetPhraseCollection(subPhrase, false);
}
else {
// false positive consistency check
if(rank >= tpv->size()-1)
return TargetPhraseVectorPtr();
}
// false positive consistency check
if(subTpv != NULL && rank < subTpv->size())
{
// insert the subphrase into the main target phrase
TargetPhrase& subTp = subTpv->at(rank);
if(m_phraseDictionary.m_useAlignmentInfo)
{
// reconstruct the alignment data based on the alignment of the subphrase
for(AlignmentInfo::const_iterator it = subTp.GetAlignTerm().begin();
it != subTp.GetAlignTerm().end(); it++)
{
alignment.insert(AlignPointSizeT(srcStart + it->first,
targetPhrase->GetSize() + it->second));
}
}
targetPhrase->Append(subTp);
}
else
return TargetPhraseVectorPtr();
}
}
else
{
Word word;
word.CreateFromString(Output, *m_output,
GetTargetSymbol(symbol), false);
targetPhrase->AddWord(word);
}
}
}
else if(state == Score)
{
size_t idx = m_multipleScoreTrees ? scores.size() : 0;
float score = m_scoreTrees[idx]->Read(encodedBitStream);
scores.push_back(score);
if(scores.size() == m_numScoreComponent)
{
targetPhrase->SetScore(m_feature, scores, ScoreComponentCollection() /*sparse*/,*m_weight, m_weightWP, *m_languageModels);
if(m_containsAlignmentInfo)
state = Alignment;
else
state = Add;
}
}
else if(state == Alignment)
{
AlignPoint alignPoint = m_alignTree->Read(encodedBitStream);
if(alignPoint == alignStopSymbol)
{
state = Add;
}
else
{
if(m_phraseDictionary.m_useAlignmentInfo)
alignment.insert(AlignPointSizeT(alignPoint));
}
}
if(state == Add)
{
if(m_phraseDictionary.m_useAlignmentInfo) {
targetPhrase->SetAlignTerm(alignment);
}
if(m_coding == PREnc)
{
if(!m_maxRank || tpv->size() <= m_maxRank)
bitsLeft = encodedBitStream.TellFromEnd();
if(!topLevel && m_maxRank && tpv->size() >= m_maxRank)
break;
}
if(encodedBitStream.TellFromEnd() <= 8)
break;
state = New;
}
}
if(m_coding == PREnc && !extending)
{
bitsLeft = bitsLeft > 8 ? bitsLeft : 0;
m_decodingCache.Cache(sourcePhrase, tpv, bitsLeft, m_maxRank);
}
return tpv;
}
示例2: combinedScores
const TargetPhraseCollection*
PhraseDictionaryInterpolated::GetTargetPhraseCollection(const Phrase& src) const {
delete m_targetPhrases;
m_targetPhrases = new TargetPhraseCollection();
PhraseSet allPhrases;
vector<PhraseSet> phrasesByTable(m_dictionaries.size());
for (size_t i = 0; i < m_dictionaries.size(); ++i) {
const TargetPhraseCollection* phrases = m_dictionaries[i]->GetTargetPhraseCollection(src);
if (phrases) {
for (TargetPhraseCollection::const_iterator j = phrases->begin();
j != phrases->end(); ++j) {
allPhrases.insert(*j);
phrasesByTable[i].insert(*j);
}
}
}
ScoreComponentCollection sparseVector;
for (PhraseSet::const_iterator i = allPhrases.begin(); i != allPhrases.end(); ++i) {
TargetPhrase* combinedPhrase = new TargetPhrase((Phrase)**i);
//combinedPhrase->ResetScore();
//cerr << *combinedPhrase << " " << combinedPhrase->GetScoreBreakdown() << endl;
combinedPhrase->SetSourcePhrase((*i)->GetSourcePhrase());
combinedPhrase->SetAlignTerm(&((*i)->GetAlignTerm()));
combinedPhrase->SetAlignNonTerm(&((*i)->GetAlignTerm()));
Scores combinedScores(GetFeature()->GetNumScoreComponents());
for (size_t j = 0; j < phrasesByTable.size(); ++j) {
PhraseSet::const_iterator tablePhrase = phrasesByTable[j].find(combinedPhrase);
if (tablePhrase != phrasesByTable[j].end()) {
Scores tableScores = (*tablePhrase)->GetScoreBreakdown()
.GetScoresForProducer(GetFeature());
//cerr << "Scores from " << j << " table: ";
for (size_t k = 0; k < tableScores.size()-1; ++k) {
//cerr << tableScores[k] << "(" << exp(tableScores[k]) << ") ";
combinedScores[k] += m_weights[k][j] * exp(tableScores[k]);
//cerr << m_weights[k][j] * exp(tableScores[k]) << " ";
}
//cerr << endl;
}
}
//map back to log space
//cerr << "Combined ";
for (size_t k = 0; k < combinedScores.size()-1; ++k) {
//cerr << combinedScores[k] << " ";
combinedScores[k] = log(combinedScores[k]);
//cerr << combinedScores[k] << " ";
}
//cerr << endl;
combinedScores.back() = 1; //assume last is penalty
combinedPhrase->SetScore(
GetFeature(),
combinedScores,
sparseVector,
m_weightT,
m_weightWP,
*m_languageModels);
//cerr << *combinedPhrase << " " << combinedPhrase->GetScoreBreakdown() << endl;
m_targetPhrases->Add(combinedPhrase);
}
m_targetPhrases->Prune(true,m_tableLimit);
return m_targetPhrases;
}