本文整理汇总了C++中token::type方法的典型用法代码示例。如果您正苦于以下问题:C++ token::type方法的具体用法?C++ token::type怎么用?C++ token::type使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类token
的用法示例。
在下文中一共展示了token::type方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: parse
virtual parse_result parse( string::const_iterator& i,
string::const_iterator e,
token& tok ) const
{
string::const_iterator j = i;
while ( j < e && *j != '\n' ) ++j;
tok = string(i, j); tok.type( tok_types::comment ); i = j;
return done;
}
示例2:
bool token::operator==(token compareToken)
{
if(tokenType == compareToken.type())
{
return true;
}
else
{
return false;
}
}
示例3: format_node
void dotfile_visitor::format_node(abstract_node *node, const char *name, const token &tok)
{
if (!m_connectTo)
{
if (tok.type() == token_types::STRING_LITERAL)
fprintf(m_file, "\tptr%p [label=\"[%s]\\n[%s]\\n\\\"%.*s\\\"\"];\n", node, name, token_types::names[tok.type()], tok.length() - 2, tok.text() + 1);
else if (tok.text())
fprintf(m_file, "\tptr%p [label=\"[%s]\\n[%s]\\n%.*s\"];\n", node, name, token_types::names[tok.type()], tok.length(), tok.text());
else
format_node(node, name);
}
}
示例4: validate
virtual void validate( const token& t ) const
{
using namespace tok_types;
switch ( t.type() ) {
case name: case num_lit: case open_paren: case close_paren:
case comma: case times: case assignment: case new_line: case semicolon:
case comment: case string_lit: case transp_lit: case pn_lit:
case def_assign: case logic_and: case logic_or:
return;
case regex_lit: case open_brace: case close_brace: case colon:
// These can only work when msiril comments are disabled
if ( !args.msiril_comments ) return;
break;
}
throw runtime_error( make_string() << "Unknown token in input: " << t );
}
示例5: insert_to_array
bool json_tree_builder::insert_to_array(const token &token_)
{
auto arr = std::static_pointer_cast< json_array >(_current);
switch (token_.type())
{
case T_COMMA:
if (_accept_comma)
{
_accept_comma = false;
_accept_end = false;
_accept_value = true;
return true;
}
else
{
return false;
}
break;
case T_RBRACKET:
if (_accept_end)
{
if (_parents.empty())
{ // parsing successfully finished
_finished = true;
return true;
}
_current = _parents.top();
_parents.pop();
_accept_colon = false;
_accept_comma = true;
_accept_end = true;
_accept_name = false;
_accept_value = _current->jtype() == json_type::J_ARRAY;
return true;
}
else
{
return false;
}
break;
case T_LBRACE:
case T_LBRACKET:
if (_accept_value)
{
auto new_val = to_value(token_);
_parents.push(_current);
_current = new_val;
arr->insert(new_val);
_accept_colon = false;
_accept_comma = false;
_accept_end = true;
_accept_name = true;
_accept_value = new_val->jtype() == json_type::J_ARRAY;
return true;
}
else
{
return false;
}
break;
case T_STR:
case T_NUM:
case T_TRUE:
case T_FALSE:
case T_NULL:
if (_accept_value)
{
arr->insert(to_value(token_));
_accept_comma = true;
_accept_end = true;
_accept_value = false;
return true;
}
else
{
return false;
}
break;
default:
return false; // unexpected token
}
return false; // should not get here
}
示例6: insert_to_object
bool json_tree_builder::insert_to_object(const token &token_)
{
auto obj = std::static_pointer_cast< json_object >(_current);
switch (token_.type())
{
case T_STR:
if (_accept_name)
{
if (obj->contains(token_.text()))
throw std::logic_error("duplicit key: " + token_.text()); // duplicit key name
obj->insert(token_.text());
_accept_colon = true;
_accept_comma = false;
_accept_name = false;
_accept_value = false;
_accept_end = false;
return true;
}
else if (_accept_value)
{
auto str = to_value(token_);
obj->insert(str);
_accept_colon = false;
_accept_comma = true;
_accept_name = false;
_accept_value = false;
_accept_end = true;
return true;
}
else
{ // unexpected token
return false;
}
break;
case T_COLON:
if (_accept_colon)
{
_accept_colon = false;
_accept_comma = false;
_accept_end = false;
_accept_name = false;
_accept_value = true;
return true;
}
else
{
return false;
}
break;
case T_LBRACE: // new object
if (_accept_value)
{
auto new_obj = to_value(token_);
_parents.push(_current);
_current = new_obj;
obj->insert(new_obj);
_accept_colon = false;
_accept_comma = false;
_accept_end = true;
_accept_name = true;
_accept_value = false;
return true;
}
else
{
return false;
}
break;
case T_COMMA:
if (_accept_comma)
{
_accept_colon = false;
_accept_comma = false;
_accept_end = true; // not valid JSON
_accept_name = true;
_accept_value = false;
return true;
}
else
{
return false;
}
break;
case T_LBRACKET: // new array
if (_accept_value)
{
auto new_arr = to_value(token_);
_parents.push(_current);
_current = new_arr;
obj->insert(new_arr);
//.........这里部分代码省略.........
示例7: calcLex
//this function finds the next token and outputs it
token calcLex()
{
curToken.clearData();
char currentChar;
while(1) //infinite loop, breaks through a return statement
{
//get the next char!
currentChar = filestream.get();
//used to ignore whitespace as a token
while( currentChar == ' ' || currentChar == '\t' || currentChar == '\n')
{
currentChar = filestream.get();
};
//if the current char is the end of a file, it returns the end of file symbol
if(currentChar == EOF)
{
curToken.type(endOfFileSym);
return curToken;
}
//this is the comment logic: if the next chars are /*...
if(followingChars("/*", currentChar, curToken))
{
//then while it isn't ending the line, or finding the end of comment sentinel...
while(currentChar != '\n' && !followingChars("*/", currentChar, curToken))
{
//move onto the next character
currentChar = filestream.get();
}
//move to the next character after the comment or line has ended
//hop back up to the top of the while loop
continue;
}
//if the chars ":=" are found, return the assignment symbol
if(followingChars(":=", currentChar, curToken))
{
curToken.type(assignSym);
curToken.data(":=");
return curToken;
}
//if the chars "read" are found, return the read symbol
if(followingChars("read", currentChar, curToken))
{
curToken.type(readSym);
curToken.data("read");
return curToken;
}
//if the chars "write" are found, return the write symbol
if(followingChars("write", currentChar, curToken))
{
curToken.type(writeSym);
curToken.data("write");
return curToken;
}
//if this is put above the followingChars, it adds a duplicate letter.
curToken.appendChar(currentChar);
//checks for identifier strings only starting with letters and underscores
if( isalpha(currentChar) || (currentChar == '_') )
{
while( isalnum(currentChar = filestream.get()) || (currentChar == '_'))
{
curToken.appendChar(currentChar);
}
//once the currentChar falls off the end of the identifier string, it must hop back to read the next one
filestream.unget();
//returns the identifier int
curToken.type(identifier);
return curToken;
}
//if the current char is a number
if(isdigit(currentChar))
{
while ( isdigit(currentChar = filestream.get()) || currentChar == '.')
{
if (currentChar == '.')
{
curToken.appendChar(currentChar);
currentChar = filestream.get();
if (!isdigit(currentChar))
{
curToken.type(numConstError);
return curToken;
}
while (isdigit(currentChar))
{
curToken.appendChar(currentChar);
currentChar = filestream.get();
}
filestream.unget();
curToken.type(numConst);
return curToken;
}
curToken.appendChar(currentChar);
//.........这里部分代码省略.........
示例8: to_string
std::string to_string(const token &t) {
switch(t.type()) {
case token::AND: return "&";
case token::AND_EQ: return "&=";
case token::ASSIGN: return "=";
case token::AUTO: return "auto";
case token::BREAK: return "break";
case token::CASE: return "case";
case token::CHAR: return "char";
case token::CMP: return "~";
case token::COLON: return ":";
case token::COMMA: return ",";
case token::CONTINUE: return "continue";
case token::DIV: return "/";
case token::DIV_EQ: return "/=";
case token::DO: return "do";
case token::DOT: return ".";
case token::DOUBLECOLON: return "::";
case token::ELSE: return "else";
case token::EQ: return "==";
case token::FOR: return "for";
case token::FUNCTION: return "function";
case token::GE: return ">=";
case token::GT: return ">";
case token::IF: return "if";
case token::INT: return "int";
case token::LBRACE: return "{";
case token::LBRACKET: return "[";
case token::LE: return "<=";
case token::LOGICAL_AND: return "&&";
case token::LOGICAL_OR: return "||";
case token::LPAREN: return "(";
case token::LSHIFT: return "<<";
case token::LSHIFT_EQ: return "<<=";
case token::LT: return "<";
case token::MINUS: return "-";
case token::MINUS_EQ: return "-=";
case token::MOD: return "%";
case token::MOD_EQ: return "%=";
case token::MUL: return "*";
case token::MUL_EQ: return "*=";
case token::NE: return "!=";
case token::NOT: return "!";
case token::OR: return "|";
case token::OR_EQ: return "|=";
case token::PLUS: return "+";
case token::PLUS_EQ: return "+=";
case token::RBRACE: return "}";
case token::RBRACKET: return "]";
case token::RETURN: return "return ";
case token::RPAREN: return ")";
case token::RSHIFT: return ">>";
case token::RSHIFT_EQ: return ">>=";
case token::SEMICOLON: return ";";
case token::SINGLEQUOTE: return "'";
case token::STRING: return "string";
case token::SWITCH: return "switch";
case token::WHILE: return "while";
case token::XOR: return "^";
case token::XOR_EQ: return "^=";
case token::FINISHED: return "<EOF>";
case token::IDENTIFIER:
case token::STRING_LITERAL:
case token::CHARACTER:
case token::INTEGER:
return t.string_;
default:
return "";
}
}
示例9: if
bool Foam::functionEntries::ifeqEntry::equalToken
(
const token& t1,
const token& t2
)
{
const bool eqType = (t1.type() == t2.type());
switch (t1.type())
{
case token::UNDEFINED:
return eqType;
case token::PUNCTUATION:
return (eqType && t1.pToken() == t2.pToken());
case token::WORD:
if (eqType)
{
return t1.wordToken() == t2.wordToken();
}
else if (t2.isString())
{
return t1.wordToken() == t2.stringToken();
}
else
{
return false;
}
case token::STRING:
case token::VARIABLE:
case token::VERBATIMSTRING:
if (eqType)
{
return t1.stringToken() == t2.stringToken();
}
else if (t2.isWord())
{
return t1.stringToken() == t2.wordToken();
}
else
{
return false;
}
case token::LABEL:
if (eqType)
{
return t1.labelToken() == t2.labelToken();
}
else if (t2.isScalar())
{
return t1.labelToken() == t2.scalarToken();
}
else
{
return false;
}
case token::FLOAT_SCALAR:
if (eqType)
{
return equal(t1.floatScalarToken(), t2.floatScalarToken());
}
else if (t2.isScalar())
{
return t1.scalarToken() == t2.scalarToken();
}
else
{
return false;
}
case token::DOUBLE_SCALAR:
if (eqType)
{
return equal(t1.doubleScalarToken(), t2.doubleScalarToken());
}
else if (t2.isScalar())
{
return t1.scalarToken() == t2.scalarToken();
}
else
{
return false;
}
case token::LONG_DOUBLE_SCALAR:
if (eqType)
{
return equal
(
t1.longDoubleScalarToken(),
t2.longDoubleScalarToken()
);
}
else if (t2.isScalar())
{
return t1.scalarToken() == t2.scalarToken();
//.........这里部分代码省略.........