本文整理汇总了C++中Token类的典型用法代码示例。如果您正苦于以下问题:C++ Token类的具体用法?C++ Token怎么用?C++ Token使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Token类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: switch
nsresult
txExprParser::createLocationStep(txExprLexer& lexer, txIParseContext* aContext,
Expr** aExpr)
{
*aExpr = nullptr;
//-- child axis is default
LocationStep::LocationStepType axisIdentifier = LocationStep::CHILD_AXIS;
nsAutoPtr<txNodeTest> nodeTest;
//-- get Axis Identifier or AbbreviatedStep, if present
Token* tok = lexer.peek();
switch (tok->mType) {
case Token::AXIS_IDENTIFIER:
{
//-- eat token
lexer.nextToken();
nsCOMPtr<nsIAtom> axis = do_GetAtom(tok->Value());
if (axis == nsGkAtoms::ancestor) {
axisIdentifier = LocationStep::ANCESTOR_AXIS;
}
else if (axis == nsGkAtoms::ancestorOrSelf) {
axisIdentifier = LocationStep::ANCESTOR_OR_SELF_AXIS;
}
else if (axis == nsGkAtoms::attribute) {
axisIdentifier = LocationStep::ATTRIBUTE_AXIS;
}
else if (axis == nsGkAtoms::child) {
axisIdentifier = LocationStep::CHILD_AXIS;
}
else if (axis == nsGkAtoms::descendant) {
axisIdentifier = LocationStep::DESCENDANT_AXIS;
}
else if (axis == nsGkAtoms::descendantOrSelf) {
axisIdentifier = LocationStep::DESCENDANT_OR_SELF_AXIS;
}
else if (axis == nsGkAtoms::following) {
axisIdentifier = LocationStep::FOLLOWING_AXIS;
}
else if (axis == nsGkAtoms::followingSibling) {
axisIdentifier = LocationStep::FOLLOWING_SIBLING_AXIS;
}
else if (axis == nsGkAtoms::_namespace) {
axisIdentifier = LocationStep::NAMESPACE_AXIS;
}
else if (axis == nsGkAtoms::parent) {
axisIdentifier = LocationStep::PARENT_AXIS;
}
else if (axis == nsGkAtoms::preceding) {
axisIdentifier = LocationStep::PRECEDING_AXIS;
}
else if (axis == nsGkAtoms::precedingSibling) {
axisIdentifier = LocationStep::PRECEDING_SIBLING_AXIS;
}
else if (axis == nsGkAtoms::self) {
axisIdentifier = LocationStep::SELF_AXIS;
}
else {
return NS_ERROR_XPATH_INVALID_AXIS;
}
break;
}
case Token::AT_SIGN:
//-- eat token
lexer.nextToken();
axisIdentifier = LocationStep::ATTRIBUTE_AXIS;
break;
case Token::PARENT_NODE :
//-- eat token
lexer.nextToken();
axisIdentifier = LocationStep::PARENT_AXIS;
nodeTest = new txNodeTypeTest(txNodeTypeTest::NODE_TYPE);
break;
case Token::SELF_NODE :
//-- eat token
lexer.nextToken();
axisIdentifier = LocationStep::SELF_AXIS;
nodeTest = new txNodeTypeTest(txNodeTypeTest::NODE_TYPE);
break;
default:
break;
}
//-- get NodeTest unless an AbbreviatedStep was found
nsresult rv = NS_OK;
if (!nodeTest) {
tok = lexer.peek();
if (tok->mType == Token::CNAME) {
lexer.nextToken();
// resolve QName
nsCOMPtr<nsIAtom> prefix, lName;
PRInt32 nspace;
rv = resolveQName(tok->Value(), getter_AddRefs(prefix),
aContext, getter_AddRefs(lName),
nspace, true);
NS_ENSURE_SUCCESS(rv, rv);
nodeTest =
new txNameTest(prefix, lName, nspace,
//.........这里部分代码省略.........
示例2: Lex
void Preprocessor::HandlePragmaIncludeAlias(Token &Tok) {
// We will either get a quoted filename or a bracketed filename, and we
// have to track which we got. The first filename is the source name,
// and the second name is the mapped filename. If the first is quoted,
// the second must be as well (cannot mix and match quotes and brackets).
// Get the open paren
Lex(Tok);
if (Tok.isNot(tok::l_paren)) {
Diag(Tok, diag::warn_pragma_include_alias_expected) << "(";
return;
}
// We expect either a quoted string literal, or a bracketed name
Token SourceFilenameTok;
CurPPLexer->LexIncludeFilename(SourceFilenameTok);
if (SourceFilenameTok.is(tok::eod)) {
// The diagnostic has already been handled
return;
}
StringRef SourceFileName;
SmallString<128> FileNameBuffer;
if (SourceFilenameTok.is(tok::string_literal) ||
SourceFilenameTok.is(tok::angle_string_literal)) {
SourceFileName = getSpelling(SourceFilenameTok, FileNameBuffer);
} else if (SourceFilenameTok.is(tok::less)) {
// This could be a path instead of just a name
FileNameBuffer.push_back('<');
SourceLocation End;
if (ConcatenateIncludeName(FileNameBuffer, End))
return; // Diagnostic already emitted
SourceFileName = FileNameBuffer.str();
} else {
Diag(Tok, diag::warn_pragma_include_alias_expected_filename);
return;
}
FileNameBuffer.clear();
// Now we expect a comma, followed by another include name
Lex(Tok);
if (Tok.isNot(tok::comma)) {
Diag(Tok, diag::warn_pragma_include_alias_expected) << ",";
return;
}
Token ReplaceFilenameTok;
CurPPLexer->LexIncludeFilename(ReplaceFilenameTok);
if (ReplaceFilenameTok.is(tok::eod)) {
// The diagnostic has already been handled
return;
}
StringRef ReplaceFileName;
if (ReplaceFilenameTok.is(tok::string_literal) ||
ReplaceFilenameTok.is(tok::angle_string_literal)) {
ReplaceFileName = getSpelling(ReplaceFilenameTok, FileNameBuffer);
} else if (ReplaceFilenameTok.is(tok::less)) {
// This could be a path instead of just a name
FileNameBuffer.push_back('<');
SourceLocation End;
if (ConcatenateIncludeName(FileNameBuffer, End))
return; // Diagnostic already emitted
ReplaceFileName = FileNameBuffer.str();
} else {
Diag(Tok, diag::warn_pragma_include_alias_expected_filename);
return;
}
// Finally, we expect the closing paren
Lex(Tok);
if (Tok.isNot(tok::r_paren)) {
Diag(Tok, diag::warn_pragma_include_alias_expected) << ")";
return;
}
// Now that we have the source and target filenames, we need to make sure
// they're both of the same type (angled vs non-angled)
StringRef OriginalSource = SourceFileName;
bool SourceIsAngled =
GetIncludeFilenameSpelling(SourceFilenameTok.getLocation(),
SourceFileName);
bool ReplaceIsAngled =
GetIncludeFilenameSpelling(ReplaceFilenameTok.getLocation(),
ReplaceFileName);
if (!SourceFileName.empty() && !ReplaceFileName.empty() &&
(SourceIsAngled != ReplaceIsAngled)) {
unsigned int DiagID;
if (SourceIsAngled)
DiagID = diag::warn_pragma_include_alias_mismatch_angle;
else
DiagID = diag::warn_pragma_include_alias_mismatch_quote;
Diag(SourceFilenameTok.getLocation(), DiagID)
<< SourceFileName
<< ReplaceFileName;
return;
}
//.........这里部分代码省略.........
示例3: SCALED
void StereotypeDefinitionParser::parseIconCommands(StereotypeIcon *stereotypeIcon)
{
Token token;
bool loop = true;
IconShape iconShape;
QList<ShapeValueF> parameters;
typedef QList<IconCommandParameter> Parameters;
static const IconCommandParameter SCALED(ShapeValueF::UnitScaled);
static const IconCommandParameter FIX(ShapeValueF::UnitRelative);
static const IconCommandParameter ABSOLUTE(ShapeValueF::UnitAbsolute);
while (loop) {
token = readNextToken();
if (token.type() != Token::TokenKeyword) {
loop = false;
} else {
switch (token.subtype()) {
case KEYWORD_CIRCLE:
parameters = parseIconCommandParameters(Parameters() << SCALED << SCALED << SCALED);
iconShape.addCircle(ShapePointF(parameters.at(0), parameters.at(1)), parameters.at(2));
expectSemicolonOrEndOfLine();
break;
case KEYWORD_ELLIPSE:
parameters = parseIconCommandParameters(Parameters() << SCALED << SCALED << SCALED << SCALED);
iconShape.addEllipse(ShapePointF(parameters.at(0), parameters.at(1)),
ShapeSizeF(parameters.at(2), parameters.at(3)));
expectSemicolonOrEndOfLine();
break;
case KEYWORD_LINE:
parameters = parseIconCommandParameters(Parameters() << SCALED << SCALED << SCALED << SCALED);
iconShape.addLine(ShapePointF(parameters.at(0), parameters.at(1)),
ShapePointF(parameters.at(2), parameters.at(3)));
expectSemicolonOrEndOfLine();
break;
case KEYWORD_RECT:
parameters = parseIconCommandParameters(Parameters() << SCALED << SCALED << SCALED << SCALED);
iconShape.addRect(ShapePointF(parameters.at(0), parameters.at(1)),
ShapeSizeF(parameters.at(2), parameters.at(3)));
expectSemicolonOrEndOfLine();
break;
case KEYWORD_ROUNDEDRECT:
parameters = parseIconCommandParameters(Parameters() << SCALED << SCALED << SCALED << SCALED << FIX);
iconShape.addRoundedRect(ShapePointF(parameters.at(0), parameters.at(1)),
ShapeSizeF(parameters.at(2), parameters.at(3)), parameters.at(4));
expectSemicolonOrEndOfLine();
break;
case KEYWORD_ARC:
{
parameters = parseIconCommandParameters(
Parameters() << SCALED << SCALED << SCALED << SCALED << ABSOLUTE << ABSOLUTE);
qreal startAngle = expectAbsoluteValue(parameters.at(4), d->m_scanner->sourcePos());
qreal spanAngle = expectAbsoluteValue(parameters.at(5), d->m_scanner->sourcePos());
iconShape.addArc(ShapePointF(parameters.at(0), parameters.at(1)),
ShapeSizeF(parameters.at(2), parameters.at(3)), startAngle, spanAngle);
expectSemicolonOrEndOfLine();
break;
}
case KEYWORD_MOVETO:
parameters = parseIconCommandParameters(Parameters() << SCALED << SCALED);
iconShape.moveTo(ShapePointF(parameters.at(0), parameters.at(1)));
expectSemicolonOrEndOfLine();
break;
case KEYWORD_LINETO:
parameters = parseIconCommandParameters(Parameters() << SCALED << SCALED);
iconShape.lineTo(ShapePointF(parameters.at(0), parameters.at(1)));
expectSemicolonOrEndOfLine();
break;
case KEYWORD_ARCMOVETO:
{
parameters = parseIconCommandParameters(
Parameters() << SCALED << SCALED << SCALED << SCALED << ABSOLUTE);
qreal angle = expectAbsoluteValue(parameters.at(4), d->m_scanner->sourcePos());
iconShape.arcMoveTo(ShapePointF(parameters.at(0), parameters.at(1)),
ShapeSizeF(parameters.at(2), parameters.at(3)), angle);
expectSemicolonOrEndOfLine();
break;
}
case KEYWORD_ARCTO:
{
parameters = parseIconCommandParameters(
Parameters() << SCALED << SCALED << SCALED << SCALED << ABSOLUTE << ABSOLUTE);
qreal startAngle = expectAbsoluteValue(parameters.at(4), d->m_scanner->sourcePos());
qreal sweepLength = expectAbsoluteValue(parameters.at(5), d->m_scanner->sourcePos());
iconShape.arcTo(ShapePointF(parameters.at(0), parameters.at(1)),
ShapeSizeF(parameters.at(2), parameters.at(3)), startAngle, sweepLength);
expectSemicolonOrEndOfLine();
break;
}
case KEYWORD_CLOSE:
iconShape.closePath();
expectSemicolonOrEndOfLine();
break;
default:
loop = false;
break;
}
}
}
stereotypeIcon->setIconShape(iconShape);
//.........这里部分代码省略.........
示例4: switch
// ---------------------------------------------------------------------------
// Token: Helper mthods
// ---------------------------------------------------------------------------
int Token::analyzeFirstCharacter(RangeToken* const rangeTok,
const int options,
TokenFactory* const tokFactory)
{
switch(fTokenType) {
case T_CONCAT:
{
int ret = FC_CONTINUE;
for (int i=0; i<size(); i++) {
Token* tok = getChild(i);
if (tok
&& (ret=tok->analyzeFirstCharacter(rangeTok,
options, tokFactory))!= FC_CONTINUE)
break;
}
return ret;
}
case T_UNION:
{
unsigned int childSize = size();
if (childSize == 0)
return FC_CONTINUE;
int ret = FC_CONTINUE;
bool hasEmpty = false;
for (unsigned int i=0; i < childSize; i++) {
ret = getChild(i)->analyzeFirstCharacter(rangeTok, options, tokFactory);
if (ret == FC_ANY)
break;
else
hasEmpty = true;
}
return hasEmpty ? FC_CONTINUE : ret;
}
case T_CONDITION:
{
int ret1 = getChild(0)->analyzeFirstCharacter(rangeTok, options, tokFactory);
if (size() == 1)
return FC_CONTINUE;
int ret2;
if (ret1 != FC_ANY) {
ret2 = getChild(1)->analyzeFirstCharacter(rangeTok, options, tokFactory);
}
if (ret1 == FC_ANY || ret2 == FC_ANY)
return FC_ANY;
if (ret1 == FC_CONTINUE || ret2 == FC_CONTINUE)
return FC_CONTINUE;
return FC_TERMINAL;
}
case T_CLOSURE:
case T_NONGREEDYCLOSURE:
{
Token* tok = getChild(0);
if (tok)
tok->analyzeFirstCharacter(rangeTok, options, tokFactory);
return FC_CONTINUE;
}
case T_DOT:
return FC_ANY;
case T_EMPTY:
case T_ANCHOR:
return FC_CONTINUE;
case T_CHAR:
{
XMLInt32 ch = getChar();
rangeTok->addRange(ch, ch);
if (ch < 0x1000 && isSet(options,RegularExpression::IGNORE_CASE)) {
//REVISIT
}
}
return FC_TERMINAL;
case T_RANGE:
{
if (isSet(options, RegularExpression::IGNORE_CASE)) {
rangeTok->mergeRanges(((RangeToken*)
this)->getCaseInsensitiveToken(tokFactory));
}
else {
rangeTok->mergeRanges(this);
}
return FC_TERMINAL;
}
case T_NRANGE:
{
if (isSet(options, RegularExpression::IGNORE_CASE)) {
RangeToken* caseITok = (((RangeToken*)
this)->getCaseInsensitiveToken(tokFactory));
//.........这里部分代码省略.........
示例5: LexAfterModuleImport
/// Lex a token following the 'import' contextual keyword.
///
void Preprocessor::LexAfterModuleImport(Token &Result) {
// Figure out what kind of lexer we actually have.
recomputeCurLexerKind();
// Lex the next token.
Lex(Result);
// The token sequence
//
// import identifier (. identifier)*
//
// indicates a module import directive. We already saw the 'import'
// contextual keyword, so now we're looking for the identifiers.
if (ModuleImportExpectsIdentifier && Result.getKind() == tok::identifier) {
// We expected to see an identifier here, and we did; continue handling
// identifiers.
ModuleImportPath.push_back(std::make_pair(Result.getIdentifierInfo(),
Result.getLocation()));
ModuleImportExpectsIdentifier = false;
CurLexerKind = CLK_LexAfterModuleImport;
return;
}
// If we're expecting a '.' or a ';', and we got a '.', then wait until we
// see the next identifier. (We can also see a '[[' that begins an
// attribute-specifier-seq here under the C++ Modules TS.)
if (!ModuleImportExpectsIdentifier && Result.getKind() == tok::period) {
ModuleImportExpectsIdentifier = true;
CurLexerKind = CLK_LexAfterModuleImport;
return;
}
// If we have a non-empty module path, load the named module.
if (!ModuleImportPath.empty()) {
// Under the Modules TS, the dot is just part of the module name, and not
// a real hierarchy separator. Flatten such module names now.
//
// FIXME: Is this the right level to be performing this transformation?
std::string FlatModuleName;
if (getLangOpts().ModulesTS) {
for (auto &Piece : ModuleImportPath) {
if (!FlatModuleName.empty())
FlatModuleName += ".";
FlatModuleName += Piece.first->getName();
}
SourceLocation FirstPathLoc = ModuleImportPath[0].second;
ModuleImportPath.clear();
ModuleImportPath.push_back(
std::make_pair(getIdentifierInfo(FlatModuleName), FirstPathLoc));
}
Module *Imported = nullptr;
if (getLangOpts().Modules) {
Imported = TheModuleLoader.loadModule(ModuleImportLoc,
ModuleImportPath,
Module::Hidden,
/*IsIncludeDirective=*/false);
if (Imported)
makeModuleVisible(Imported, ModuleImportLoc);
}
if (Callbacks && (getLangOpts().Modules || getLangOpts().DebuggerSupport))
Callbacks->moduleImport(ModuleImportLoc, ModuleImportPath, Imported);
}
}
示例6: EvaluateHasIncludeCommon
/// EvaluateHasIncludeCommon - Process a '__has_include("path")'
/// or '__has_include_next("path")' expression.
/// Returns true if successful.
static bool EvaluateHasIncludeCommon(Token &Tok,
IdentifierInfo *II, Preprocessor &PP,
const DirectoryLookup *LookupFrom) {
SourceLocation LParenLoc;
// Get '('.
PP.LexNonComment(Tok);
// Ensure we have a '('.
if (Tok.isNot(tok::l_paren)) {
PP.Diag(Tok.getLocation(), diag::err_pp_missing_lparen) << II->getName();
return false;
}
// Save '(' location for possible missing ')' message.
LParenLoc = Tok.getLocation();
// Get the file name.
PP.getCurrentLexer()->LexIncludeFilename(Tok);
// Reserve a buffer to get the spelling.
SmallString<128> FilenameBuffer;
StringRef Filename;
SourceLocation EndLoc;
switch (Tok.getKind()) {
case tok::eod:
// If the token kind is EOD, the error has already been diagnosed.
return false;
case tok::angle_string_literal:
case tok::string_literal: {
bool Invalid = false;
Filename = PP.getSpelling(Tok, FilenameBuffer, &Invalid);
if (Invalid)
return false;
break;
}
case tok::less:
// This could be a <foo/bar.h> file coming from a macro expansion. In this
// case, glue the tokens together into FilenameBuffer and interpret those.
FilenameBuffer.push_back('<');
if (PP.ConcatenateIncludeName(FilenameBuffer, EndLoc))
return false; // Found <eod> but no ">"? Diagnostic already emitted.
Filename = FilenameBuffer.str();
break;
default:
PP.Diag(Tok.getLocation(), diag::err_pp_expects_filename);
return false;
}
// Get ')'.
PP.LexNonComment(Tok);
// Ensure we have a trailing ).
if (Tok.isNot(tok::r_paren)) {
PP.Diag(Tok.getLocation(), diag::err_pp_missing_rparen) << II->getName();
PP.Diag(LParenLoc, diag::note_matching) << "(";
return false;
}
bool isAngled = PP.GetIncludeFilenameSpelling(Tok.getLocation(), Filename);
// If GetIncludeFilenameSpelling set the start ptr to null, there was an
// error.
if (Filename.empty())
return false;
// Search include directories.
const DirectoryLookup *CurDir;
const FileEntry *File =
PP.LookupFile(Filename, isAngled, LookupFrom, CurDir, NULL, NULL, NULL);
// Get the result value. A result of true means the file exists.
return File != 0;
}
示例7: LexEndOfFile
void MetaLexer::LexEndOfFile(char C, Token& Tok) {
if (C == '\0') {
Tok.setKind(tok::eof);
Tok.setLength(1);
}
}
示例8: EvaluateDirectiveSubExpr
/// EvaluateDirectiveSubExpr - Evaluate the subexpression whose first token is
/// PeekTok, and whose precedence is PeekPrec. This returns the result in LHS.
///
/// If ValueLive is false, then this value is being evaluated in a context where
/// the result is not used. As such, avoid diagnostics that relate to
/// evaluation, such as division by zero warnings.
static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
Token &PeekTok, bool ValueLive,
Preprocessor &PP) {
unsigned PeekPrec = getPrecedence(PeekTok.getKind());
// If this token isn't valid, report the error.
if (PeekPrec == ~0U) {
PP.Diag(PeekTok.getLocation(), diag::err_pp_expr_bad_token_binop)
<< LHS.getRange();
return true;
}
while (1) {
// If this token has a lower precedence than we are allowed to parse, return
// it so that higher levels of the recursion can parse it.
if (PeekPrec < MinPrec)
return false;
tok::TokenKind Operator = PeekTok.getKind();
// If this is a short-circuiting operator, see if the RHS of the operator is
// dead. Note that this cannot just clobber ValueLive. Consider
// "0 && 1 ? 4 : 1 / 0", which is parsed as "(0 && 1) ? 4 : (1 / 0)". In
// this example, the RHS of the && being dead does not make the rest of the
// expr dead.
bool RHSIsLive;
if (Operator == tok::ampamp && LHS.Val == 0)
RHSIsLive = false; // RHS of "0 && x" is dead.
else if (Operator == tok::pipepipe && LHS.Val != 0)
RHSIsLive = false; // RHS of "1 || x" is dead.
else if (Operator == tok::question && LHS.Val == 0)
RHSIsLive = false; // RHS (x) of "0 ? x : y" is dead.
else
RHSIsLive = ValueLive;
// Consume the operator, remembering the operator's location for reporting.
SourceLocation OpLoc = PeekTok.getLocation();
PP.LexNonComment(PeekTok);
PPValue RHS(LHS.getBitWidth());
// Parse the RHS of the operator.
DefinedTracker DT;
if (EvaluateValue(RHS, PeekTok, DT, RHSIsLive, PP)) return true;
// Remember the precedence of this operator and get the precedence of the
// operator immediately to the right of the RHS.
unsigned ThisPrec = PeekPrec;
PeekPrec = getPrecedence(PeekTok.getKind());
// If this token isn't valid, report the error.
if (PeekPrec == ~0U) {
PP.Diag(PeekTok.getLocation(), diag::err_pp_expr_bad_token_binop)
<< RHS.getRange();
return true;
}
// Decide whether to include the next binop in this subexpression. For
// example, when parsing x+y*z and looking at '*', we want to recursively
// handle y*z as a single subexpression. We do this because the precedence
// of * is higher than that of +. The only strange case we have to handle
// here is for the ?: operator, where the precedence is actually lower than
// the LHS of the '?'. The grammar rule is:
//
// conditional-expression ::=
// logical-OR-expression ? expression : conditional-expression
// where 'expression' is actually comma-expression.
unsigned RHSPrec;
if (Operator == tok::question)
// The RHS of "?" should be maximally consumed as an expression.
RHSPrec = getPrecedence(tok::comma);
else // All others should munch while higher precedence.
RHSPrec = ThisPrec+1;
if (PeekPrec >= RHSPrec) {
if (EvaluateDirectiveSubExpr(RHS, RHSPrec, PeekTok, RHSIsLive, PP))
return true;
PeekPrec = getPrecedence(PeekTok.getKind());
}
assert(PeekPrec <= ThisPrec && "Recursion didn't work!");
// Usual arithmetic conversions (C99 6.3.1.8p1): result is unsigned if
// either operand is unsigned.
llvm::APSInt Res(LHS.getBitWidth());
switch (Operator) {
case tok::question: // No UAC for x and y in "x ? y : z".
case tok::lessless: // Shift amount doesn't UAC with shift value.
case tok::greatergreater: // Shift amount doesn't UAC with shift value.
case tok::comma: // Comma operands are not subject to UACs.
case tok::pipepipe: // Logical || does not do UACs.
case tok::ampamp: // Logical && does not do UACs.
break; // No UAC
default:
Res.setIsUnsigned(LHS.isUnsigned()|RHS.isUnsigned());
// If this just promoted something from signed to unsigned, and if the
// value was negative, warn about it.
//.........这里部分代码省略.........
示例9: EvaluateDirectiveExpression
/// EvaluateDirectiveExpression - Evaluate an integer constant expression that
/// may occur after a #if or #elif directive. If the expression is equivalent
/// to "!defined(X)" return X in IfNDefMacro.
bool Preprocessor::
EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro) {
// Save the current state of 'DisableMacroExpansion' and reset it to false. If
// 'DisableMacroExpansion' is true, then we must be in a macro argument list
// in which case a directive is undefined behavior. We want macros to be able
// to recursively expand in order to get more gcc-list behavior, so we force
// DisableMacroExpansion to false and restore it when we're done parsing the
// expression.
bool DisableMacroExpansionAtStartOfDirective = DisableMacroExpansion;
DisableMacroExpansion = false;
// Peek ahead one token.
Token Tok;
LexNonComment(Tok);
// C99 6.10.1p3 - All expressions are evaluated as intmax_t or uintmax_t.
unsigned BitWidth = getTargetInfo().getIntMaxTWidth();
PPValue ResVal(BitWidth);
DefinedTracker DT;
if (EvaluateValue(ResVal, Tok, DT, true, *this)) {
// Parse error, skip the rest of the macro line.
if (Tok.isNot(tok::eod))
DiscardUntilEndOfDirective();
// Restore 'DisableMacroExpansion'.
DisableMacroExpansion = DisableMacroExpansionAtStartOfDirective;
return false;
}
// If we are at the end of the expression after just parsing a value, there
// must be no (unparenthesized) binary operators involved, so we can exit
// directly.
if (Tok.is(tok::eod)) {
// If the expression we parsed was of the form !defined(macro), return the
// macro in IfNDefMacro.
if (DT.State == DefinedTracker::NotDefinedMacro)
IfNDefMacro = DT.TheMacro;
// Restore 'DisableMacroExpansion'.
DisableMacroExpansion = DisableMacroExpansionAtStartOfDirective;
return ResVal.Val != 0;
}
// Otherwise, we must have a binary operator (e.g. "#if 1 < 2"), so parse the
// operator and the stuff after it.
if (EvaluateDirectiveSubExpr(ResVal, getPrecedence(tok::question),
Tok, true, *this)) {
// Parse error, skip the rest of the macro line.
if (Tok.isNot(tok::eod))
DiscardUntilEndOfDirective();
// Restore 'DisableMacroExpansion'.
DisableMacroExpansion = DisableMacroExpansionAtStartOfDirective;
return false;
}
// If we aren't at the tok::eod token, something bad happened, like an extra
// ')' token.
if (Tok.isNot(tok::eod)) {
Diag(Tok, diag::err_pp_expected_eol);
DiscardUntilEndOfDirective();
}
// Restore 'DisableMacroExpansion'.
DisableMacroExpansion = DisableMacroExpansionAtStartOfDirective;
return ResVal.Val != 0;
}
示例10: EvaluateValue
/// EvaluateValue - Evaluate the token PeekTok (and any others needed) and
/// return the computed value in Result. Return true if there was an error
/// parsing. This function also returns information about the form of the
/// expression in DT. See above for information on what DT means.
///
/// If ValueLive is false, then this value is being evaluated in a context where
/// the result is not used. As such, avoid diagnostics that relate to
/// evaluation.
static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
bool ValueLive, Preprocessor &PP) {
DT.State = DefinedTracker::Unknown;
if (PeekTok.is(tok::code_completion)) {
if (PP.getCodeCompletionHandler())
PP.getCodeCompletionHandler()->CodeCompletePreprocessorExpression();
PP.setCodeCompletionReached();
PP.LexNonComment(PeekTok);
}
// If this token's spelling is a pp-identifier, check to see if it is
// 'defined' or if it is a macro. Note that we check here because many
// keywords are pp-identifiers, so we can't check the kind.
if (IdentifierInfo *II = PeekTok.getIdentifierInfo()) {
// Handle "defined X" and "defined(X)".
if (II->isStr("defined"))
return(EvaluateDefined(Result, PeekTok, DT, ValueLive, PP));
// If this identifier isn't 'defined' or one of the special
// preprocessor keywords and it wasn't macro expanded, it turns
// into a simple 0, unless it is the C++ keyword "true", in which case it
// turns into "1".
if (ValueLive &&
II->getTokenID() != tok::kw_true &&
II->getTokenID() != tok::kw_false)
PP.Diag(PeekTok, diag::warn_pp_undef_identifier) << II;
Result.Val = II->getTokenID() == tok::kw_true;
Result.Val.setIsUnsigned(false); // "0" is signed intmax_t 0.
Result.setRange(PeekTok.getLocation());
PP.LexNonComment(PeekTok);
return false;
}
switch (PeekTok.getKind()) {
default: // Non-value token.
PP.Diag(PeekTok, diag::err_pp_expr_bad_token_start_expr);
return true;
case tok::eod:
case tok::r_paren:
// If there is no expression, report and exit.
PP.Diag(PeekTok, diag::err_pp_expected_value_in_expr);
return true;
case tok::numeric_constant: {
SmallString<64> IntegerBuffer;
bool NumberInvalid = false;
StringRef Spelling = PP.getSpelling(PeekTok, IntegerBuffer,
&NumberInvalid);
if (NumberInvalid)
return true; // a diagnostic was already reported
NumericLiteralParser Literal(Spelling, PeekTok.getLocation(), PP);
if (Literal.hadError)
return true; // a diagnostic was already reported.
if (Literal.isFloatingLiteral() || Literal.isImaginary) {
PP.Diag(PeekTok, diag::err_pp_illegal_floating_literal);
return true;
}
assert(Literal.isIntegerLiteral() && "Unknown ppnumber");
// Complain about, and drop, any ud-suffix.
if (Literal.hasUDSuffix())
PP.Diag(PeekTok, diag::err_pp_invalid_udl) << /*integer*/1;
// 'long long' is a C99 or C++11 feature.
if (!PP.getLangOpts().C99 && Literal.isLongLong) {
if (PP.getLangOpts().CPlusPlus)
PP.Diag(PeekTok,
PP.getLangOpts().CPlusPlus0x ?
diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong);
else
PP.Diag(PeekTok, diag::ext_c99_longlong);
}
// Parse the integer literal into Result.
if (Literal.GetIntegerValue(Result.Val)) {
// Overflow parsing integer literal.
if (ValueLive) PP.Diag(PeekTok, diag::warn_integer_too_large);
Result.Val.setIsUnsigned(true);
} else {
// Set the signedness of the result to match whether there was a U suffix
// or not.
Result.Val.setIsUnsigned(Literal.isUnsigned);
// Detect overflow based on whether the value is signed. If signed
// and if the value is too large, emit a warning "integer constant is so
// large that it is unsigned" e.g. on 12345678901234567890 where intmax_t
// is 64-bits.
if (!Literal.isUnsigned && Result.Val.isNegative()) {
// Don't warn for a hex literal: 0x8000..0 shouldn't warn.
if (ValueLive && Literal.getRadix() != 16)
//.........这里部分代码省略.........
示例11: to_string
bool operator==(Token t) { return to_string() == t.to_string(); }
示例12: getCurrentChar
/// LexTokenInternal - This implements a simple Fortran family lexer. It is an
/// extremely performance critical piece of code. This assumes that the buffer
/// has a null character at the end of the file. It assumes that the Flags of
/// Result have been cleared before calling this.
void Lexer::LexTokenInternal(Token &Result) {
// Check to see if there is still more of the line to lex.
if (Text.empty() || Text.AtEndOfLine()) {
Text.Reset();
Text.GetNextLine();
}
// Check to see if we're at the start of a line.
if (getLineBegin() == getCurrentPtr())
// The returned token is at the start of the line.
Result.setFlag(Token::StartOfStatement);
// If we saw a semicolon, then we're at the start of a new statement.
if (LastTokenWasSemicolon) {
LastTokenWasSemicolon = false;
Result.setFlag(Token::StartOfStatement);
}
// Small amounts of horizontal whitespace is very common between tokens.
char Char = getCurrentChar();
while (isHorizontalWhitespace(Char))
Char = getNextChar();
TokStart = getCurrentPtr();
tok::TokenKind Kind;
switch (Char) {
case 0: // Null.
// Found end of file?
if (getCurrentPtr() >= CurBuf->getBufferEnd()) {
Kind = tok::eof;
break;
}
getNextChar();
return LexTokenInternal(Result);
case '\n':
case '\r':
case ' ':
case '\t':
case '\f':
case '\v':
do {
Char = getNextChar();
} while (isHorizontalWhitespace(Char));
return LexTokenInternal(Result);
case '.':
Char = getNextChar();
if (isLetter(Char)) {
// Match [A-Za-z]*, we have already matched '.'.
while (isLetter(Char))
Char = getNextChar();
if (Char != '.') {
// [TODO]: error.
Diags.ReportError(SMLoc::getFromPointer(TokStart),
"invalid defined operator missing end '.'");
FormTokenWithChars(Result, tok::unknown);
return;
}
Char = getNextChar();
if (Char == '_') {
// Parse the kind.
do {
Char = getNextChar();
} while (isIdentifierBody(Char) || isDecimalNumberBody(Char));
}
return FormDefinedOperatorTokenWithChars(Result);
}
// FALLTHROUGH
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
// [TODO]: Kinds on literals.
if (Result.isAtStartOfStatement())
return LexStatementLabel(Result);
return LexNumericConstant(Result);
case '"':
case '\'':
// [TODO]: Kinds.
return LexCharacterLiteralConstant(Result, Char == '"');
// [TODO]: BOZ literals.
case 'B': case 'b':
if (Char == '"' || Char == '\'') { // No whitespace between B and quote.
// Possible binary constant: B'...', B"..."
const char *BOZBegin = getCurrentPtr();
bool DoubleQuote = (Char == '"');
do {
Char = getNextChar();
} while (isBinaryNumberBody(Char));
//.........这里部分代码省略.........
示例13: assert
/// getSpelling() - Return the 'spelling' of this token. The spelling of a
/// token are the characters used to represent the token in the source file.
void Lexer::getSpelling(const Token &Tok,
llvm::SmallVectorImpl<llvm::StringRef> &Spelling) const{
assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
const char *TokStart = Tok.isLiteral() ?
Tok.getLiteralData() : Tok.getLocation().getPointer();
unsigned TokLen = Tok.getLength();
// If this token contains nothing interesting, return it directly.
if (!Tok.needsCleaning())
return Spelling.push_back(llvm::StringRef(TokStart, TokLen));
const char *CurPtr = TokStart;
const char *Start = TokStart;
unsigned Len = 0;
while (true) {
while (Len != TokLen) {
if (*CurPtr != '&') {
++CurPtr, ++Len;
continue;
}
if (Tok.isNot(tok::char_literal_constant))
break;
const char *TmpPtr = CurPtr + 1;
unsigned TmpLen = Len + 1;
while (TmpLen != TokLen && isHorizontalWhitespace(*TmpPtr))
++TmpPtr, ++TmpLen;
if (*TmpPtr == '\n' || *TmpPtr == '\r')
break;
CurPtr = TmpPtr;
Len = TmpLen;
}
Spelling.push_back(llvm::StringRef(Start, CurPtr - Start));
if (*CurPtr != '&' || Len >= TokLen)
break;
Start = ++CurPtr; ++Len;
if (Len >= TokLen)
break;
while (true) {
// Skip blank lines...
while (Len != TokLen && isWhitespace(*CurPtr))
++CurPtr, ++Len;
if (*CurPtr != '!')
break;
// ...and lines with only comments.
while (Len != TokLen && *CurPtr != '\n' && *CurPtr != '\r')
++CurPtr, ++Len;
}
if (*CurPtr != '&' || Len >= TokLen)
break;
Start = ++CurPtr; ++Len;
}
}
示例14: ExpandBuiltinMacro
/// HandleMacroExpandedIdentifier - If an identifier token is read that is to be
/// expanded as a macro, handle it and return the next token as 'Identifier'.
bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
MacroInfo *MI) {
// If this is a macro expansion in the "#if !defined(x)" line for the file,
// then the macro could expand to different things in other contexts, we need
// to disable the optimization in this case.
if (CurPPLexer) CurPPLexer->MIOpt.ExpandedMacro();
// If this is a builtin macro, like __LINE__ or _Pragma, handle it specially.
if (MI->isBuiltinMacro()) {
if (Callbacks) Callbacks->MacroExpands(Identifier, MI,
Identifier.getLocation());
ExpandBuiltinMacro(Identifier);
return false;
}
/// Args - If this is a function-like macro expansion, this contains,
/// for each macro argument, the list of tokens that were provided to the
/// invocation.
MacroArgs *Args = 0;
// Remember where the end of the expansion occurred. For an object-like
// macro, this is the identifier. For a function-like macro, this is the ')'.
SourceLocation ExpansionEnd = Identifier.getLocation();
// If this is a function-like macro, read the arguments.
if (MI->isFunctionLike()) {
// C99 6.10.3p10: If the preprocessing token immediately after the the macro
// name isn't a '(', this macro should not be expanded.
if (!isNextPPTokenLParen())
return true;
// Remember that we are now parsing the arguments to a macro invocation.
// Preprocessor directives used inside macro arguments are not portable, and
// this enables the warning.
InMacroArgs = true;
Args = ReadFunctionLikeMacroArgs(Identifier, MI, ExpansionEnd);
// Finished parsing args.
InMacroArgs = false;
// If there was an error parsing the arguments, bail out.
if (Args == 0) return false;
++NumFnMacroExpanded;
} else {
++NumMacroExpanded;
}
// Notice that this macro has been used.
markMacroAsUsed(MI);
// Remember where the token is expanded.
SourceLocation ExpandLoc = Identifier.getLocation();
SourceRange ExpansionRange(ExpandLoc, ExpansionEnd);
if (Callbacks) {
if (InMacroArgs) {
// We can have macro expansion inside a conditional directive while
// reading the function macro arguments. To ensure, in that case, that
// MacroExpands callbacks still happen in source order, queue this
// callback to have it happen after the function macro callback.
DelayedMacroExpandsCallbacks.push_back(
MacroExpandsInfo(Identifier, MI, ExpansionRange));
} else {
Callbacks->MacroExpands(Identifier, MI, ExpansionRange);
if (!DelayedMacroExpandsCallbacks.empty()) {
for (unsigned i=0, e = DelayedMacroExpandsCallbacks.size(); i!=e; ++i) {
MacroExpandsInfo &Info = DelayedMacroExpandsCallbacks[i];
Callbacks->MacroExpands(Info.Tok, Info.MI, Info.Range);
}
DelayedMacroExpandsCallbacks.clear();
}
}
}
// If we started lexing a macro, enter the macro expansion body.
// If this macro expands to no tokens, don't bother to push it onto the
// expansion stack, only to take it right back off.
if (MI->getNumTokens() == 0) {
// No need for arg info.
if (Args) Args->destroy(*this);
// Ignore this macro use, just return the next token in the current
// buffer.
bool HadLeadingSpace = Identifier.hasLeadingSpace();
bool IsAtStartOfLine = Identifier.isAtStartOfLine();
Lex(Identifier);
// If the identifier isn't on some OTHER line, inherit the leading
// whitespace/first-on-a-line property of this token. This handles
// stuff like "! XX," -> "! ," and " XX," -> " ,", when XX is
// empty.
if (!Identifier.isAtStartOfLine()) {
if (IsAtStartOfLine) Identifier.setFlag(Token::StartOfLine);
if (HadLeadingSpace) Identifier.setFlag(Token::LeadingSpace);
}
//.........这里部分代码省略.........
示例15: EvaluateDefined
/// EvaluateDefined - Process a 'defined(sym)' expression.
static bool EvaluateDefined(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
bool ValueLive, Preprocessor &PP) {
IdentifierInfo *II;
Result.setBegin(PeekTok.getLocation());
// Get the next token, don't expand it.
PP.LexUnexpandedNonComment(PeekTok);
// Two options, it can either be a pp-identifier or a (.
SourceLocation LParenLoc;
if (PeekTok.is(tok::l_paren)) {
// Found a paren, remember we saw it and skip it.
LParenLoc = PeekTok.getLocation();
PP.LexUnexpandedNonComment(PeekTok);
}
if (PeekTok.is(tok::code_completion)) {
if (PP.getCodeCompletionHandler())
PP.getCodeCompletionHandler()->CodeCompleteMacroName(false);
PP.setCodeCompletionReached();
PP.LexUnexpandedNonComment(PeekTok);
}
// If we don't have a pp-identifier now, this is an error.
if ((II = PeekTok.getIdentifierInfo()) == 0) {
PP.Diag(PeekTok, diag::err_pp_defined_requires_identifier);
return true;
}
// Otherwise, we got an identifier, is it defined to something?
Result.Val = II->hasMacroDefinition();
Result.Val.setIsUnsigned(false); // Result is signed intmax_t.
// If there is a macro, mark it used.
if (Result.Val != 0 && ValueLive) {
MacroInfo *Macro = PP.getMacroInfo(II);
PP.markMacroAsUsed(Macro);
}
// Invoke the 'defined' callback.
if (PPCallbacks *Callbacks = PP.getPPCallbacks())
Callbacks->Defined(PeekTok);
// If we are in parens, ensure we have a trailing ).
if (LParenLoc.isValid()) {
// Consume identifier.
Result.setEnd(PeekTok.getLocation());
PP.LexUnexpandedNonComment(PeekTok);
if (PeekTok.isNot(tok::r_paren)) {
PP.Diag(PeekTok.getLocation(), diag::err_pp_missing_rparen) << "defined";
PP.Diag(LParenLoc, diag::note_matching) << "(";
return true;
}
// Consume the ).
Result.setEnd(PeekTok.getLocation());
PP.LexNonComment(PeekTok);
} else {
// Consume identifier.
Result.setEnd(PeekTok.getLocation());
PP.LexNonComment(PeekTok);
}
// Success, remember that we saw defined(X).
DT.State = DefinedTracker::DefinedMacro;
DT.TheMacro = II;
return false;
}