本文整理汇总了C++中Token::GetPrecToken方法的典型用法代码示例。如果您正苦于以下问题:C++ Token::GetPrecToken方法的具体用法?C++ Token::GetPrecToken怎么用?C++ Token::GetPrecToken使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Token
的用法示例。
在下文中一共展示了Token::GetPrecToken方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: TestPrecNextGraph
void TestToken::TestPrecNextGraph()
{
SpeechSet* speechSet = new SpeechSet();
Speech* speech = new Speech(speechSet);
Segment* segment = Segment::CreateWithDuration(0, 10000, speech);
/* initialize tokens with a graph like
* B
* / \
* --A D--
* \ /
* C
*/
Token* tokenA = Token::CreateWithDuration(0, 0, segment);
tokenA->SetSourceText("A");
Token* tokenB = Token::CreateWithDuration(0, 0, segment);
tokenB->SetSourceText("B");
Token* tokenC = Token::CreateWithDuration(0, 0, segment);
tokenC->SetSourceText("C");
Token* tokenD = Token::CreateWithDuration(0, 0, segment);
tokenD->SetSourceText("D");
tokenA->AddNextToken(tokenB);
tokenA->AddNextToken(tokenC);
tokenB->AddPrecToken(tokenA);
tokenB->AddNextToken(tokenD);
tokenC->AddPrecToken(tokenA);
tokenC->AddNextToken(tokenD);
tokenD->AddPrecToken(tokenB);
tokenD->AddPrecToken(tokenC);
// a few basics assert
assert(tokenA->GetNextToken(0)->GetNextToken(0) == tokenA->GetNextToken(1)->GetNextToken(0));
assert(tokenD->GetPrecToken(0)->GetPrecToken(0) == tokenA->GetPrecToken(1)->GetPrecToken(0));
//TODO need to be more complex
delete tokenA;
delete tokenB;
delete tokenC;
delete tokenD;
delete segment;
delete speech;
}
示例2: RemoveSegment
void Speech::RemoveSegment(Segment* currentSegment)
{
list<Token*> listPreviousTokenofFirstToken;
list<Token*> listNextTokenofLastToken;
// Remove links from the previous tokens of the first tokens of the segment
for(size_t f=0; f<currentSegment->GetNumberOfFirstToken(); ++f)
{
Token* firstToken = currentSegment->GetFirstToken(f);
if(firstToken)
{
for(size_t p=0; p<firstToken->GetNbOfPrecTokens(); ++p)
{
Token* previousTokenofFirstToken = firstToken->GetPrecToken(p);
listPreviousTokenofFirstToken.push_back(previousTokenofFirstToken);
previousTokenofFirstToken->UnlinkNextToken(firstToken);
}
}
}
// Remove links from the next tokens of the last tokens of the segment
for(size_t l=0; l<currentSegment->GetNumberOfLastToken(); ++l)
{
Token* lastToken = currentSegment->GetLastToken(l);
if(lastToken)
{
for(size_t n=0; n<lastToken->GetNbOfNextTokens(); ++n)
{
Token* nextTokenofLastToken = lastToken->GetNextToken(n);
listNextTokenofLastToken.push_back(nextTokenofLastToken);
nextTokenofLastToken->UnlinkPrevToken(lastToken);
}
}
}
// Re-attach the tokens
list<Token*>::iterator prev = listPreviousTokenofFirstToken.begin();
list<Token*>::iterator eprev = listPreviousTokenofFirstToken.end();
list<Token*>::iterator next = listNextTokenofLastToken.begin();
list<Token*>::iterator enext = listNextTokenofLastToken.end();
while(prev != eprev)
{
while(next != enext)
{
(*prev)->AddNextToken(*next);
(*next)->AddPrecToken(*prev);
++next;
}
++prev;
}
listPreviousTokenofFirstToken.clear();
listNextTokenofLastToken.clear();
// Remove Segment from vector
vector<Segment*>::iterator SegIter = m_segments.begin();
while (SegIter != m_segments.end() && (*SegIter) != currentSegment)
++SegIter;
if (SegIter == m_segments.end())
{
LOG_FATAL(m_pLogger, "Speech::RemoveSegment(), the segment is not at the right spot!!");
exit(E_INVALID);
}
m_segments.erase(SegIter);
// destroy! the segment now
delete currentSegment;
}
示例3: PreviousIndexes
/** returns the list of previous indexes */
void Graph::PreviousIndexes(list<size_t>& listPrev, const size_t& dim, const size_t& index)
{
listPrev.clear();
// Asking for the previous tokens of the last
if(index == 0)
{
listPrev.push_front(0);
return;
}
list<size_t>* listprevious = m_TabCacheDimPreviousIndex[dim][index];
if(listprevious)
{
listPrev = *listprevious;
return;
}
m_TabCacheDimPreviousIndex[dim][index] = new list<size_t>;
list<Token*>::iterator i, ei;
bool is0added = false;
// Asking for the first tokens to work on
if(index == GetDimensionDeep(dim)-1)
{
i = m_TabLastTokens[dim].begin();
ei = m_TabLastTokens[dim].end();
while(i != ei)
{
if( (*i == NULL) && (!is0added) )
{
is0added = true;
//listPrev.push_front(0);
m_TabCacheDimPreviousIndex[dim][index]->push_front(0);
}
else
{
//listPrev.push_front(m_TabMapTokenIndex[dim][*i]);
m_TabCacheDimPreviousIndex[dim][index]->push_front(m_TabMapTokenIndex[dim][*i]);
}
++i;
}
}
else
{
i = m_TabFirstTokens[dim].begin();
ei = m_TabFirstTokens[dim].end();
while(i != ei)
{
if( (*i == m_TabVecHypRef[dim][index]) && (!is0added) )
{
is0added = true;
//listPrev.push_front(0);
m_TabCacheDimPreviousIndex[dim][index]->push_front(0);
}
else
{
Token* tokenIndex = m_TabVecHypRef[dim][index];
size_t nbprevtokens = tokenIndex->GetNbOfPrecTokens();
if(nbprevtokens == 0)
{
//listPrev.push_front(0);
m_TabCacheDimPreviousIndex[dim][index]->push_front(0);
}
else
{
for(size_t j=0; j<nbprevtokens; ++j)
{
//listPrev.push_front(m_TabMapTokenIndex[dim][tokenIndex->GetPrecToken(j)]);
m_TabCacheDimPreviousIndex[dim][index]->push_front(m_TabMapTokenIndex[dim][tokenIndex->GetPrecToken(j)]);
}
}
}
++i;
}
}
listPrev = *(m_TabCacheDimPreviousIndex[dim][index]);
}