本文整理汇总了C++中SourceManager::getDecomposedLoc方法的典型用法代码示例。如果您正苦于以下问题:C++ SourceManager::getDecomposedLoc方法的具体用法?C++ SourceManager::getDecomposedLoc怎么用?C++ SourceManager::getDecomposedLoc使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SourceManager
的用法示例。
在下文中一共展示了SourceManager::getDecomposedLoc方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: getRawTextSlow
StringRef RawComment::getRawTextSlow(const SourceManager &SourceMgr) const {
FileID BeginFileID;
FileID EndFileID;
unsigned BeginOffset;
unsigned EndOffset;
std::tie(BeginFileID, BeginOffset) =
SourceMgr.getDecomposedLoc(Range.getBegin());
std::tie(EndFileID, EndOffset) = SourceMgr.getDecomposedLoc(Range.getEnd());
const unsigned Length = EndOffset - BeginOffset;
if (Length < 2)
return StringRef();
// The comment can't begin in one file and end in another.
assert(BeginFileID == EndFileID);
bool Invalid = false;
const char *BufferStart = SourceMgr.getBufferData(BeginFileID,
&Invalid).data();
if (Invalid)
return StringRef();
return StringRef(BufferStart + BeginOffset, Length);
}
示例2: string
static std::string getText(const SourceManager &SourceManager, const T &Node) {
SourceLocation StartSpellingLocatino =
SourceManager.getSpellingLoc(Node.getLocStart());
SourceLocation EndSpellingLocation =
SourceManager.getSpellingLoc(Node.getLocEnd());
if (!StartSpellingLocatino.isValid() || !EndSpellingLocation.isValid()) {
return std::string();
}
bool Invalid = true;
const char *Text =
SourceManager.getCharacterData(StartSpellingLocatino, &Invalid);
if (Invalid) {
return std::string();
}
std::pair<FileID, unsigned> Start =
SourceManager.getDecomposedLoc(StartSpellingLocatino);
std::pair<FileID, unsigned> End =
SourceManager.getDecomposedLoc(Lexer::getLocForEndOfToken(
EndSpellingLocation, 0, SourceManager, LangOptions()));
if (Start.first != End.first) {
// Start and end are in different files.
return std::string();
}
if (End.second < Start.second) {
// Shuffling text with macros may cause this.
return std::string();
}
return std::string(Text, End.second - Start.second);
}
示例3: getRangeSize
// FIXME: This should go into the Lexer, but we need to figure out how
// to handle ranges for refactoring in general first - there is no obvious
// good way how to integrate this into the Lexer yet.
static int getRangeSize(SourceManager &Sources, const CharSourceRange &Range) {
SourceLocation SpellingBegin = Sources.getSpellingLoc(Range.getBegin());
SourceLocation SpellingEnd = Sources.getSpellingLoc(Range.getEnd());
std::pair<FileID, unsigned> Start = Sources.getDecomposedLoc(SpellingBegin);
std::pair<FileID, unsigned> End = Sources.getDecomposedLoc(SpellingEnd);
if (Start.first != End.first) return -1;
if (Range.isTokenRange())
End.second += Lexer::MeasureTokenLength(SpellingEnd, Sources,
LangOptions());
return End.second - Start.second;
}
示例4: getImmediateMacroName
/// \brief Retrieve the name of the immediate macro expansion.
///
/// This routine starts from a source location, and finds the name of the macro
/// responsible for its immediate expansion. It looks through any intervening
/// macro argument expansions to compute this. It returns a StringRef which
/// refers to the SourceManager-owned buffer of the source where that macro
/// name is spelled. Thus, the result shouldn't out-live that SourceManager.
///
/// This differs from Lexer::getImmediateMacroName in that any macro argument
/// location will result in the topmost function macro that accepted it.
/// e.g.
/// \code
/// MAC1( MAC2(foo) )
/// \endcode
/// for location of 'foo' token, this function will return "MAC1" while
/// Lexer::getImmediateMacroName will return "MAC2".
static StringRef getImmediateMacroName(SourceLocation Loc,
const SourceManager &SM,
const LangOptions &LangOpts) {
assert(Loc.isMacroID() && "Only reasonble to call this on macros");
// Walk past macro argument expanions.
while (SM.isMacroArgExpansion(Loc))
Loc = SM.getImmediateExpansionRange(Loc).first;
// If the macro's spelling has no FileID, then it's actually a token paste
// or stringization (or similar) and not a macro at all.
if (!SM.getFileEntryForID(SM.getFileID(SM.getSpellingLoc(Loc))))
return StringRef();
// Find the spelling location of the start of the non-argument expansion
// range. This is where the macro name was spelled in order to begin
// expanding this macro.
Loc = SM.getSpellingLoc(SM.getImmediateExpansionRange(Loc).first);
// Dig out the buffer where the macro name was spelled and the extents of the
// name so that we can render it into the expansion note.
std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc);
unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first);
return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength);
}
示例5: getBeginLine
unsigned RawComment::getBeginLine(const SourceManager &SM) const {
if (BeginLineValid)
return BeginLine;
std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Range.getBegin());
BeginLine = SM.getLineNumber(LocInfo.first, LocInfo.second);
BeginLineValid = true;
return BeginLine;
}
示例6: setFromSourceLocation
void Replacement::setFromSourceLocation(const SourceManager &Sources,
SourceLocation Start, unsigned Length,
StringRef ReplacementText) {
const std::pair<FileID, unsigned> DecomposedLocation =
Sources.getDecomposedLoc(Start);
const FileEntry *Entry = Sources.getFileEntryForID(DecomposedLocation.first);
this->FilePath = Entry ? Entry->getName() : InvalidLocation;
this->ReplacementRange = Range(DecomposedLocation.second, Length);
this->ReplacementText = ReplacementText;
}
示例7: TheLexer
// This function is an adaptation from StringLiteral::getLocationOfByte in llvm-3.7.1\src\tools\clang\lib\AST\Expr.cpp
std::vector<std::string>
splitStringLiteral(StringLiteral *S, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target)
{
// Loop over all of the tokens in this string until we find the one that
// contains the byte we're looking for.
unsigned TokNo = 0;
std::vector<std::string> result;
for (TokNo = 0; TokNo < S->getNumConcatenated(); ++TokNo)
{
SourceLocation StrTokLoc = S->getStrTokenLoc(TokNo);
// Get the spelling of the string so that we can get the data that makes up
// the string literal, not the identifier for the macro it is potentially
// expanded through.
SourceLocation StrTokSpellingLoc = SM.getSpellingLoc(StrTokLoc);
// Re-lex the token to get its length and original spelling.
std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(StrTokSpellingLoc);
bool Invalid = false;
StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
if (Invalid)
continue; // We ignore this part
const char *StrData = Buffer.data() + LocInfo.second;
// Create a lexer starting at the beginning of this token.
Lexer TheLexer(SM.getLocForStartOfFile(LocInfo.first), Features,
Buffer.begin(), StrData, Buffer.end());
Token TheTok;
TheLexer.LexFromRawLexer(TheTok);
if (TheTok.isAnyIdentifier())
{
// It should not be, since we are parsing inside a string literal, but it can happen with special macros such as __func__
// of __PRETTY_FUNCTION__ that are not resolved at this time. In that case, we just ignore them...
continue;
}
// Get the spelling of the token.
SmallString<32> SpellingBuffer;
SpellingBuffer.resize(TheTok.getLength());
bool StringInvalid = false;
const char *SpellingPtr = &SpellingBuffer[0];
unsigned TokLen = Lexer::getSpelling(TheTok, SpellingPtr, SM, Features, &StringInvalid);
if (StringInvalid)
continue;
const char *SpellingStart = SpellingPtr;
const char *SpellingEnd = SpellingPtr + TokLen;
result.push_back(std::string(SpellingStart, SpellingEnd));
}
return result;
}
示例8: setFromSourceLocation
void Replacement::setFromSourceLocation(SourceManager &Sources,
SourceLocation Start, unsigned Length,
llvm::StringRef ReplacementText) {
const std::pair<FileID, unsigned> DecomposedLocation =
Sources.getDecomposedLoc(Start);
const FileEntry *Entry = Sources.getFileEntryForID(DecomposedLocation.first);
this->FilePath = Entry != NULL ? Entry->getName() : InvalidLocation;
this->Offset = DecomposedLocation.second;
this->Length = Length;
this->ReplacementText = ReplacementText;
}
示例9: TheLexer
/// getLocationOfByte - Return a source location that points to the specified
/// byte of this string literal.
///
/// Strings are amazingly complex. They can be formed from multiple tokens and
/// can have escape sequences in them in addition to the usual trigraph and
/// escaped newline business. This routine handles this complexity.
///
SourceLocation
StringLiteral::getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
const LangOptions &Features, const TargetInfo &Target) const {
// Loop over all of the tokens in this string until we find the one that
// contains the byte we're looking for.
unsigned TokNo = 0;
while (1) {
assert(TokNo < getNumConcatenated() && "Invalid byte number!");
SourceLocation StrTokLoc = getStrTokenLoc(TokNo);
// Get the spelling of the string so that we can get the data that makes up
// the string literal, not the identifier for the macro it is potentially
// expanded through.
SourceLocation StrTokSpellingLoc = SM.getSpellingLoc(StrTokLoc);
// Re-lex the token to get its length and original spelling.
std::pair<FileID, unsigned> LocInfo =SM.getDecomposedLoc(StrTokSpellingLoc);
bool Invalid = false;
llvm::StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
if (Invalid)
return StrTokSpellingLoc;
const char *StrData = Buffer.data()+LocInfo.second;
// Create a langops struct and enable trigraphs. This is sufficient for
// relexing tokens.
LangOptions LangOpts;
// Create a lexer starting at the beginning of this token.
Lexer TheLexer(StrTokSpellingLoc, Features, Buffer.begin(), StrData,
Buffer.end());
Token TheTok;
TheLexer.LexFromRawLexer(TheTok);
// Use the StringLiteralParser to compute the length of the string in bytes.
StringLiteralParser SLP(&TheTok, 1, SM, Features, Target);
unsigned TokNumBytes = SLP.GetStringLength();
// If the byte is in this token, return the location of the byte.
if (ByteNo < TokNumBytes ||
(ByteNo == TokNumBytes && TokNo == getNumConcatenated())) {
unsigned Offset = SLP.getOffsetOfStringByte(TheTok, ByteNo);
// Now that we know the offset of the token in the spelling, use the
// preprocessor to get the offset in the original source.
return Lexer::AdvanceToTokenCharacter(StrTokLoc, Offset, SM, Features);
}
// Move to the next string token.
++TokNo;
ByteNo -= TokNumBytes;
}
}
示例10: onlyWhitespaceBetween
static bool onlyWhitespaceBetween(SourceManager &SM,
SourceLocation Loc1, SourceLocation Loc2,
unsigned MaxNewlinesAllowed) {
std::pair<FileID, unsigned> Loc1Info = SM.getDecomposedLoc(Loc1);
std::pair<FileID, unsigned> Loc2Info = SM.getDecomposedLoc(Loc2);
// Question does not make sense if locations are in different files.
if (Loc1Info.first != Loc2Info.first)
return false;
bool Invalid = false;
const char *Buffer = SM.getBufferData(Loc1Info.first, &Invalid).data();
if (Invalid)
return false;
unsigned NumNewlines = 0;
assert(Loc1Info.second <= Loc2Info.second && "Loc1 after Loc2!");
// Look for non-whitespace characters and remember any newlines seen.
for (unsigned I = Loc1Info.second; I != Loc2Info.second; ++I) {
switch (Buffer[I]) {
default:
return false;
case ' ':
case '\t':
case '\f':
case '\v':
break;
case '\r':
case '\n':
++NumNewlines;
// Check if we have found more than the maximum allowed number of
// newlines.
if (NumNewlines > MaxNewlinesAllowed)
return false;
// Collapse \r\n and \n\r into a single newline.
if (I + 1 != Loc2Info.second &&
(Buffer[I + 1] == '\n' || Buffer[I + 1] == '\r') &&
Buffer[I] != Buffer[I + 1])
++I;
break;
}
}
return true;
}
示例11: CheckRemoval
// Checks if 'typedef' keyword can be removed - we do it only if
// it is the only declaration in a declaration chain.
static bool CheckRemoval(SourceManager &SM, SourceLocation StartLoc,
ASTContext &Context) {
assert(StartLoc.isFileID() && "StartLoc must not be in a macro");
std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(StartLoc);
StringRef File = SM.getBufferData(LocInfo.first);
const char *TokenBegin = File.data() + LocInfo.second;
Lexer DeclLexer(SM.getLocForStartOfFile(LocInfo.first), Context.getLangOpts(),
File.begin(), TokenBegin, File.end());
Token Tok;
int ParenLevel = 0;
bool FoundTypedef = false;
while (!DeclLexer.LexFromRawLexer(Tok) && !Tok.is(tok::semi)) {
switch (Tok.getKind()) {
case tok::l_brace:
case tok::r_brace:
// This might be the `typedef struct {...} T;` case.
return false;
case tok::l_paren:
ParenLevel++;
break;
case tok::r_paren:
ParenLevel--;
break;
case tok::comma:
if (ParenLevel == 0) {
// If there is comma and we are not between open parenthesis then it is
// two or more declarations in this chain.
return false;
}
break;
case tok::raw_identifier:
if (Tok.getRawIdentifier() == "typedef") {
FoundTypedef = true;
}
break;
default:
break;
}
}
// Sanity check against weird macro cases.
return FoundTypedef;
}
示例12: setFromSourceLocation
void Replacement::setFromSourceLocation(const SourceManager &Sources,
SourceLocation Start, unsigned Length,
StringRef ReplacementText) {
const std::pair<FileID, unsigned> DecomposedLocation =
Sources.getDecomposedLoc(Start);
const FileEntry *Entry = Sources.getFileEntryForID(DecomposedLocation.first);
if (Entry) {
// Make FilePath absolute so replacements can be applied correctly when
// relative paths for files are used.
llvm::SmallString<256> FilePath(Entry->getName());
std::error_code EC = llvm::sys::fs::make_absolute(FilePath);
this->FilePath = EC ? FilePath.c_str() : Entry->getName();
} else {
this->FilePath = InvalidLocation;
}
this->ReplacementRange = Range(DecomposedLocation.second, Length);
this->ReplacementText = ReplacementText;
}
示例13: emitParseableFixits
void TextDiagnostic::emitParseableFixits(ArrayRef<FixItHint> Hints,
const SourceManager &SM) {
if (!DiagOpts->ShowParseableFixits)
return;
// We follow FixItRewriter's example in not (yet) handling
// fix-its in macros.
for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
I != E; ++I) {
if (I->RemoveRange.isInvalid() ||
I->RemoveRange.getBegin().isMacroID() ||
I->RemoveRange.getEnd().isMacroID())
return;
}
for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
I != E; ++I) {
SourceLocation BLoc = I->RemoveRange.getBegin();
SourceLocation ELoc = I->RemoveRange.getEnd();
std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(BLoc);
std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(ELoc);
// Adjust for token ranges.
if (I->RemoveRange.isTokenRange())
EInfo.second += Lexer::MeasureTokenLength(ELoc, SM, LangOpts);
// We specifically do not do word-wrapping or tab-expansion here,
// because this is supposed to be easy to parse.
PresumedLoc PLoc = SM.getPresumedLoc(BLoc);
if (PLoc.isInvalid())
break;
OS << "fix-it:\"";
OS.write_escaped(PLoc.getFilename());
OS << "\":{" << SM.getLineNumber(BInfo.first, BInfo.second)
<< ':' << SM.getColumnNumber(BInfo.first, BInfo.second)
<< '-' << SM.getLineNumber(EInfo.first, EInfo.second)
<< ':' << SM.getColumnNumber(EInfo.first, EInfo.second)
<< "}:\"";
OS.write_escaped(I->CodeToInsert);
OS << "\"\n";
}
}
示例14: RawLexer
// Re-lex the tokens to get precise locations to insert 'override' and remove
// 'virtual'.
static SmallVector<Token, 16> ParseTokens(CharSourceRange Range,
const SourceManager &Sources,
LangOptions LangOpts) {
std::pair<FileID, unsigned> LocInfo =
Sources.getDecomposedLoc(Range.getBegin());
StringRef File = Sources.getBufferData(LocInfo.first);
const char *TokenBegin = File.data() + LocInfo.second;
Lexer RawLexer(Sources.getLocForStartOfFile(LocInfo.first), LangOpts,
File.begin(), TokenBegin, File.end());
SmallVector<Token, 16> Tokens;
Token Tok;
while (!RawLexer.LexFromRawLexer(Tok)) {
if (Tok.is(tok::semi) || Tok.is(tok::l_brace))
break;
if (Sources.isBeforeInTranslationUnit(Range.getEnd(), Tok.getLocation()))
break;
Tokens.push_back(Tok);
}
return Tokens;
}
示例15: Range
RawComment::RawComment(const SourceManager &SourceMgr, SourceRange SR,
bool Merged, bool ParseAllComments) :
Range(SR), RawTextValid(false), BriefTextValid(false),
IsAttached(false), IsTrailingComment(false), IsAlmostTrailingComment(false),
ParseAllComments(ParseAllComments) {
// Extract raw comment text, if possible.
if (SR.getBegin() == SR.getEnd() || getRawText(SourceMgr).empty()) {
Kind = RCK_Invalid;
return;
}
// Guess comment kind.
std::pair<CommentKind, bool> K = getCommentKind(RawText, ParseAllComments);
// Guess whether an ordinary comment is trailing.
if (ParseAllComments && isOrdinaryKind(K.first)) {
FileID BeginFileID;
unsigned BeginOffset;
std::tie(BeginFileID, BeginOffset) =
SourceMgr.getDecomposedLoc(Range.getBegin());
if (BeginOffset != 0) {
bool Invalid = false;
const char *Buffer =
SourceMgr.getBufferData(BeginFileID, &Invalid).data();
IsTrailingComment |=
(!Invalid && !onlyWhitespaceOnLineBefore(Buffer, BeginOffset));
}
}
if (!Merged) {
Kind = K.first;
IsTrailingComment |= K.second;
IsAlmostTrailingComment = RawText.startswith("//<") ||
RawText.startswith("/*<");
} else {
Kind = RCK_Merged;
IsTrailingComment =
IsTrailingComment || mergedCommentIsTrailingComment(RawText);
}
}