本文整理汇总了C++中Tokenizer::nextToken方法的典型用法代码示例。如果您正苦于以下问题:C++ Tokenizer::nextToken方法的具体用法?C++ Tokenizer::nextToken怎么用?C++ Tokenizer::nextToken使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Tokenizer
的用法示例。
在下文中一共展示了Tokenizer::nextToken方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: tokenize_inv_spec
static int tokenize_inv_spec(OE oe) {
Tokenizer tk = FunCallTokenizer_New(oe);
tk->start((byte*)"INV(1,2,3);",11);
Token xor = {0};tk->nextToken(&xor);
Token one = {0};tk->nextToken(&one);
Token two = {0};tk->nextToken(&two);
Token three = {0}; tk->nextToken(&three);
FunCallTokenizer_Destroy(&tk);
return xor.type == INV && one.type == NUM && one.value.num == 1 && two.type == NUM && two.value.num == 2 && three.type == NUM && three.value.num == 3;
}
示例2: setMatchMode
void Semantics::setMatchMode(){
Tokenizer tknzr = Tokenizer(this->ElementId,"-");
this->ElementId = tknzr.nextToken();
string str= tknzr.currToken();
if(str.empty() || ((!isdigit(str[0])) && (str[0] != '-') && (str[0] != '+'))) this->MatchMode=false ;
char * p ;
strtol(str.c_str(), &p, 10) ;
this->MatchMode = (*p == 0) ;
while(tknzr.hasMoreTokens()){
this->SemanticsId = tknzr.nextToken();
break;
}
}
示例3: tokenize_file
static int tokenize_file(OE oe) {
uint lbuffer = 1060365;
uint ands = 0, xors = 0, invs = 0, nums = 0, tokens = 0;
Tokenizer tk = 0;
Token tok = {0};
byte * buffer = oe->getmem(lbuffer);
uint fp = 0;
oe->open("file ../test/AES",&fp);
oe->read(fp,buffer,&lbuffer);
oe->close(fp);
tk = FunCallTokenizer_New(oe);
tk->start(buffer,lbuffer);
do {
tk->nextToken(&tok);
if (tok.type == INV) invs += 1;
if (tok.type == AND) ands +=1;
if (tok.type == XOR) xors += 1;
if (tok.type == NUM) nums += 1;
tokens++;
} while(tok.type != DONE);
DBG_P(oe,"\nANDS: %u\nXORS: %u\nINVS: %u\nNUMS: %u\nTOKENS: %u\n",ands,xors,invs,nums,tokens);
oe->putmem(buffer);
return ands == 6800 && xors == 24448 && nums == 139136 && tokens == 172076 && invs == 1691;
}
示例4: addTermsToDocument
void XapianIndex::addTermsToDocument(Tokenizer &tokens, Xapian::Document &doc,
const string &prefix, Xapian::termcount &termPos, StemmingMode mode) const
{
Xapian::Stem *pStemmer = NULL;
string term;
// Do we know what language to use for stemming ?
if (m_stemLanguage.empty() == false)
{
pStemmer = new Xapian::Stem(StringManip::toLowerCase(m_stemLanguage));
}
// Get the terms
while (tokens.nextToken(term) == true)
{
if (term.empty() == true)
{
continue;
}
// Does it start with a capital letter ?
if (isupper((int)term[0]) != 0)
{
// R-prefix the raw term
doc.add_posting(string("R") + term, termPos);
}
// Lower case the term
term = StringManip::toLowerCase(term);
// Stem the term ?
if ((mode == STORE_UNSTEM) ||
(pStemmer == NULL))
{
doc.add_posting(limitTermLength(prefix + term), termPos++);
}
else if (mode == STORE_STEM)
{
string stemmedTerm = pStemmer->stem_word(term);
doc.add_posting(limitTermLength(prefix + stemmedTerm), termPos++);
}
else if (mode == STORE_BOTH)
{
string stemmedTerm = pStemmer->stem_word(term);
// Add both
doc.add_posting(limitTermLength(prefix + term), termPos);
// ...at the same position
doc.add_posting(limitTermLength(prefix + stemmedTerm), termPos++);
}
}
#ifdef DEBUG
cout << "XapianIndex::addTermsToDocument: added " << termPos << " terms" << endl;
#endif
if (pStemmer != NULL)
{
delete pStemmer;
}
}
示例5: tokenize_inv
static int tokenize_inv(OE oe) {
Tokenizer tk = FunCallTokenizer_New(oe);
tk->start((byte*)"INV",3);
Token t = {0};
tk->nextToken(&t);
FunCallTokenizer_Destroy(&tk);
return t.type == INV;
}
示例6: infToPost
void Line::infToPost(Tokenizer& tokens, string& errs)
/*****************************************************************************/
{
stack<Operator*> ops;
//loop through all tokens and handle them
for ( ; !tokens.eol(); tokens.nextToken()) {
if (tokens.token().type() == FUNC) {
process_function(tokens, errs);
++_tempSize; //for the return value
}
else if (tokens.token().type() == DECL) {
process_newvar(tokens, errs);
}
else if (tokens.token().type() == CONSTANT) {
process_number(tokens);
}
else if (tokens.token().type() == VAR) {
process_existing_var(tokens, errs);
}
else if (tokens.token().type() == OPERATOR) {
process_operator(tokens, ops);
}
else if (tokens.token().type() == SEMICOLON ||
tokens.token().type() == OPENBRACE) {
tokens.nextToken();
assert(tokens.eol());
break;
}
else {
CHECKERR(true, syntax_err(tokens.token().value()))
}
if (errs != "") return;
}
//put remaining opps at end of postfixLine
while (!ops.empty()) {
addNewObject(ops.top());
ops.pop();
}
compile(errs, tokens);
performNumericOps();
}
示例7: setGmshCommand
void Semantics::setGmshCommand(const string& Command){
int nofTokens = 0;
string Gcommand = "", essiTag="";
Tokenizer inpString = Tokenizer(Command," {,;}()");
nofTokens = inpString.countTokens()-1;
this->NofGmshVariables = nofTokens-1;
Gcommand = Gcommand + inpString.nextToken() + "{ ";
essiTag = essiTag + inpString.currToken() + "{";
for( int i=0 ;i<nofTokens-1; i++){
string variable= this->delSpaces(inpString.nextToken());
vector<string>::iterator it;
it = find(this->VarList.begin(),this->VarList.end(),variable);
if (it != this->VarList.end())
*it = "variable";
Gcommand = Gcommand +variable+" ,";
essiTag = essiTag + " ,";
}
string variable= this->delSpaces(inpString.nextToken());
if(variable.compare("")){
this->NofGmshVariables++;
}
vector<string>::iterator it;
it = find(this->VarList.begin(),this->VarList.end(),variable);
if (it != this->VarList.end())
*it = "variable";
Gcommand = Gcommand +variable + " }";
essiTag = essiTag + " }"+to_string(this->NofGmshVariables);
// cout << Gcommand << endl;
// cout << essiTag << endl;
this->GmshCommand= Gcommand;
this->setEssiTag(essiTag);
}
示例8: removeFirstPostingsFromDocument
void XapianIndex::removeFirstPostingsFromDocument(Tokenizer &tokens, Xapian::Document &doc,
const string &prefix, const string &language, StemmingMode mode) const
{
Xapian::TermIterator termListIter = doc.termlist_begin();
Xapian::Stem *pStemmer = NULL;
string term;
// Do we know what language to use for stemming ?
if (language.empty() == false)
{
pStemmer = new Xapian::Stem(StringManip::toLowerCase(language));
}
// Get the terms and remove the first posting for each
while (tokens.nextToken(term) == true)
{
if (term.empty() == true)
{
continue;
}
// Does it start with a capital letter ?
if (isupper((int)term[0]) != 0)
{
// R-prefix the raw term
removeFirstPosting(doc, termListIter, string("R") + term);
}
// Lower case the term
term = StringManip::toLowerCase(term);
// Stem the term ?
if ((mode == STORE_UNSTEM) ||
(pStemmer == NULL))
{
removeFirstPosting(doc, termListIter, limitTermLength(prefix + term));
}
else if (mode == STORE_STEM)
{
removeFirstPosting(doc, termListIter, limitTermLength(prefix + pStemmer->stem_word(term)));
}
else if (mode == STORE_BOTH)
{
string stemmedTerm = pStemmer->stem_word(term);
removeFirstPosting(doc, termListIter, limitTermLength(prefix + term));
if (stemmedTerm != term)
{
removeFirstPosting(doc, termListIter, limitTermLength(prefix + stemmedTerm));
}
}
}
if (pStemmer != NULL)
{
delete pStemmer;
}
}
示例9: tokenize_num
static int tokenize_num(OE oe) {
char b[64] = {0};
Tokenizer tk = FunCallTokenizer_New(oe);
tk->start((byte*)"145",3);
Token t = {0};
tk->nextToken(&t);
FunCallTokenizer_Destroy(&tk);
return t.type == NUM && t.value.num == 145;
}
示例10: makeFunction
void Mapping::makeFunction(string Id, string GmshCommandList, string EssiCommand) {
Tokenizer str = Tokenizer(GmshCommandList,"|");
while(str.hasMoreTokens()) {
Semantics semantic = Semantics( str.nextToken(),EssiCommand);
semantic.setElementId(Id);
this->Function.insert(pair<string,Semantics> (semantic.getEssiTag(),semantic));
}
}
示例11: process_newvar
void Line::process_newvar(Tokenizer& tokens, string& errs)
/*****************************************************************************/
{
string type = tokens.token().value();
tokens.nextToken();
CHECKERR(tokens.eol() || tokens.token().type() != VAR, var_err())
string name = tokens.token().value();
tokens.nextToken();
bool isArray = false;
Line* sizePtr = NULL;
//if the following token is an open index, we know that our new variable
//is an array
if (!tokens.eol() && tokens.token().type() == OPENINDEX) {
vector<Token> size_expr;
tokens.nextToken(); //move past openindex
//get all the tokens that are part of the array's size expression
while (!tokens.eol() && tokens.token().type() != CLOSEINDEX) {
size_expr.push_back(tokens.token());
tokens.nextToken();
}
CHECKERR ((size_expr.size() == 0), ara_err(name))
CHECKERR ((tokens.token().type() != CLOSEINDEX), ara_err(name))
isArray = true;
Tokenizer tempToken(size_expr);
sizePtr = new Line(tempToken, _parent, errs, true);
}
else
tokens.previousToken();
if (_parent->getVar(name) == NULL)
add_newvar(type, name, sizePtr, isArray);
else
{ CHECKERR(true, dec_err(name)) }
}
示例12: setContents
void PhysicalGroup::setContents(const string& PhysicDesc){
this->PhysicDes = PhysicDesc;
Tokenizer tknzr = Tokenizer(PhysicDesc," \t\v\n\r\f\"$");
this->Type = stoi(tknzr.nextToken());
this->Id = stoi(tknzr.nextToken());
this->PhysicTag = tknzr.nextToken();
tknzr.setDelimiter("");
boost::sregex_iterator end;string gmESSI_Command = trim(tknzr.nextToken());
gmESSI_Command = gmESSI_Command.substr(0,gmESSI_Command.length()-1);
boost::regex CheckRegex("\\[([^(\\[\\])]|\\(*\\)*)*(\\[([^(\\[\\])]|\\(*\\)*)*\\])*([^(\\[\\])]|\\(*\\)*)*\\]");
boost::sregex_iterator its(gmESSI_Command.begin(), gmESSI_Command.end(), CheckRegex);
for (; its != end; ++its){
string SubgmESSI_Command = its->str();
if(SubgmESSI_Command.compare(""))
this->Process(SubgmESSI_Command.substr(1,SubgmESSI_Command.length()-2));
}
}
示例13: process_existing_var
void Line::process_existing_var(Tokenizer& tokens, string& errs)
/*****************************************************************************/
{
string temp = tokens.token().value();
Variable* v = _parent->getVar(temp);
CHECKERR ((v == NULL), und_err(temp))
//Note: we must allow for arrays to be passed to RTBoundFuncs without
//having to use braces [].
if (tokens.isArg()) {
addNewObject(v);
return;
}
//When we see an array variable, it must be followed by an index
if (v->getObjectType() == ArrayVarOT) {
tokens.nextToken();
CHECKERR((tokens.eol() || tokens.token().type()!=OPENINDEX),ara_err(temp))
tokens.nextToken(); //move past OPENINDEX
vector<Token> index_list;
//get all the tokens that are part of the array's index expression
while (!tokens.eol() && tokens.token().type() != CLOSEINDEX) {
index_list.push_back(tokens.token());
tokens.nextToken();
}
CHECKERR ((index_list.size() == 0), ara_err(temp))
CHECKERR ((tokens.eol()||tokens.token().type()!=CLOSEINDEX), ara_err(temp))
Tokenizer tempToken(index_list);
Line* indexPtr = new Line(tempToken, _parent, errs, true);
ArrayIndex* ai = new ArrayIndex(v, indexPtr);
addNewObject(ai);
}
else {
addNewObject(v);
}
}
示例14: Block
ConditionalBlock::
ConditionalBlock(map<string, Variable*> vars, Tokenizer& lines, string& errs)
: Block(vars)
/*****************************************************************************/
{
if (errs != "") return;
_executed = false;
_condition = NULL;
lines.nextToken(); //move past "if"
//create the conditional statement
_condition = new Line(lines, this, errs, true);
if (errs != "") return;
//create this block's substatements
createSubStatements(lines, errs);
}
示例15: main
int main()
{
// todo: add a welcome message with instructions
// todo: initialize your linked list and stack
string printCommand = "?";
string quitCommand = "quit";
string input;
Tokenizer tokenizer;
string token;
while(true)
{
getline(cin, input);
if(input == printCommand)
{
// todo: print all variables in the linked list
}
else if(input == quitCommand)
{
break;
}
else
{
tokenizer.tokenize(input);
while(tokenizer.nextToken(token))
{
// todo: use token and evaluate the expression
// cout << "string token" << token << endl; // prints full string over the loop no spaces
}
// todo: check the result of the expression and either print it out or store it in the linked list
}
}
return 0;
}