本文整理汇总了Java中com.intellij.lexer.Lexer.getTokenEnd方法的典型用法代码示例。如果您正苦于以下问题:Java Lexer.getTokenEnd方法的具体用法?Java Lexer.getTokenEnd怎么用?Java Lexer.getTokenEnd使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.intellij.lexer.Lexer
的用法示例。
在下文中一共展示了Lexer.getTokenEnd方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: addWordHonoringEscapeSequences
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
public static void addWordHonoringEscapeSequences(CharSequence editorText,
TextRange literalTextRange,
int cursorOffset,
Lexer lexer,
List<TextRange> result) {
lexer.start(editorText, literalTextRange.getStartOffset(), literalTextRange.getEndOffset());
while (lexer.getTokenType() != null) {
if (lexer.getTokenStart() <= cursorOffset && cursorOffset < lexer.getTokenEnd()) {
if (StringEscapesTokenTypes.STRING_LITERAL_ESCAPES.contains(lexer.getTokenType())) {
result.add(new TextRange(lexer.getTokenStart(), lexer.getTokenEnd()));
}
else {
TextRange word = getWordSelectionRange(editorText, cursorOffset, JAVA_IDENTIFIER_PART_CONDITION);
if (word != null) {
result.add(new TextRange(Math.max(word.getStartOffset(), lexer.getTokenStart()),
Math.min(word.getEndOffset(), lexer.getTokenEnd())));
}
}
break;
}
lexer.advance();
}
}
示例2: parseScope
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@Override
@Nullable
public String parseScope(Lexer lexer) {
if (lexer.getTokenType() != ScopeTokenTypes.IDENTIFIER) return null;
String id = getTokenText(lexer);
if (FilePatternPackageSet.SCOPE_FILE.equals(id)) {
final CharSequence buf = lexer.getBufferSequence();
final int end = lexer.getTokenEnd();
final int bufferEnd = lexer.getBufferEnd();
if (end >= bufferEnd || buf.charAt(end) != ':' && buf.charAt(end) != '[') {
return null;
}
lexer.advance();
return FilePatternPackageSet.SCOPE_FILE;
}
return null;
}
示例3: parseScope
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@Override
public String parseScope(final Lexer lexer) {
if (lexer.getTokenType() != ScopeTokenTypes.IDENTIFIER) return PatternPackageSet.SCOPE_ANY;
String id = getTokenText(lexer);
String scope = PatternPackageSet.SCOPE_ANY;
if (PatternPackageSet.SCOPE_SOURCE.equals(id)) {
scope = PatternPackageSet.SCOPE_SOURCE;
} else if (PatternPackageSet.SCOPE_TEST.equals(id)) {
scope = PatternPackageSet.SCOPE_TEST;
} else if (PatternPackageSet.SCOPE_PROBLEM.equals(id)) {
scope = PatternPackageSet.SCOPE_PROBLEM;
} else if (PatternPackageSet.SCOPE_LIBRARY.equals(id)) {
scope = PatternPackageSet.SCOPE_LIBRARY;
} else if (!id.trim().isEmpty()) {
scope = null;
}
final CharSequence buf = lexer.getBufferSequence();
int end = lexer.getTokenEnd();
int bufferEnd = lexer.getBufferEnd();
if (scope == PatternPackageSet.SCOPE_ANY || end >= bufferEnd || buf.charAt(end) != ':' && buf.charAt(end) != '[') {
return PatternPackageSet.SCOPE_ANY;
}
if (scope != null) {
lexer.advance();
}
return scope;
}
示例4: canStickTokensTogetherByLexer
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
public static ParserDefinition.SpaceRequirements canStickTokensTogetherByLexer(ASTNode left, ASTNode right, Lexer lexer) {
String textStr = left.getText() + right.getText();
lexer.start(textStr, 0, textStr.length());
if(lexer.getTokenType() != left.getElementType()) return ParserDefinition.SpaceRequirements.MUST;
if(lexer.getTokenEnd() != left.getTextLength()) return ParserDefinition.SpaceRequirements.MUST;
lexer.advance();
if(lexer.getTokenEnd() != textStr.length()) return ParserDefinition.SpaceRequirements.MUST;
if(lexer.getTokenType() != right.getElementType()) return ParserDefinition.SpaceRequirements.MUST;
return ParserDefinition.SpaceRequirements.MAY;
}
示例5: createOuterLanguageElement
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
protected OuterLanguageElementImpl createOuterLanguageElement(final Lexer lexer, final CharTable table,
@NotNull IElementType outerElementType) {
final CharSequence buffer = lexer.getBufferSequence();
final int tokenStart = lexer.getTokenStart();
if (tokenStart < 0 || tokenStart > buffer.length()) {
LOG.error("Invalid start: " + tokenStart + "; " + lexer);
}
final int tokenEnd = lexer.getTokenEnd();
if (tokenEnd < 0 || tokenEnd > buffer.length()) {
LOG.error("Invalid end: " + tokenEnd + "; " + lexer);
}
return new OuterLanguageElementImpl(outerElementType, table.intern(buffer, tokenStart, tokenEnd));
}
示例6: doLexerTest
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
public static void doLexerTest(String text,
Lexer lexer,
boolean checkTokenText,
String... expectedTokens) {
lexer.start(text);
int idx = 0;
int tokenPos = 0;
while (lexer.getTokenType() != null) {
if (idx >= expectedTokens.length) {
StringBuilder remainingTokens = new StringBuilder("\"" + lexer.getTokenType().toString() + "\"");
lexer.advance();
while (lexer.getTokenType() != null) {
remainingTokens.append(",");
remainingTokens.append(" \"").append(checkTokenText ? lexer.getTokenText() : lexer.getTokenType().toString()).append("\"");
lexer.advance();
}
fail("Too many tokens. Following tokens: " + remainingTokens.toString());
}
assertEquals("Token offset mismatch at position " + idx, tokenPos, lexer.getTokenStart());
String tokenName = checkTokenText ? lexer.getTokenText() : lexer.getTokenType().toString();
assertEquals("Token mismatch at position " + idx, expectedTokens[idx], tokenName);
idx++;
tokenPos = lexer.getTokenEnd();
lexer.advance();
}
if (idx < expectedTokens.length) fail("Not enough tokens");
}
示例7: lexemsEqual
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
private static boolean lexemsEqual(final PsiClass classToBind, final PsiClass newClass) {
Lexer oldTextLexer = JavaParserDefinition.createLexer(LanguageLevel.HIGHEST);
Lexer newTextLexer = JavaParserDefinition.createLexer(LanguageLevel.HIGHEST);
String oldBuffer = classToBind.getText();
String newBuffer = newClass.getText();
oldTextLexer.start(oldBuffer);
newTextLexer.start(newBuffer);
while(true) {
IElementType oldLexem = oldTextLexer.getTokenType();
IElementType newLexem = newTextLexer.getTokenType();
if (oldLexem == null || newLexem == null) {
// must terminate at the same time
return oldLexem == null && newLexem == null;
}
if (oldLexem != newLexem) {
return false;
}
if (oldLexem != TokenType.WHITE_SPACE && oldLexem != JavaDocElementType.DOC_COMMENT) {
int oldStart = oldTextLexer.getTokenStart();
int newStart = newTextLexer.getTokenStart();
int oldLength = oldTextLexer.getTokenEnd() - oldTextLexer.getTokenStart();
int newLength = newTextLexer.getTokenEnd() - newTextLexer.getTokenStart();
if (oldLength != newLength) {
return false;
}
for(int i=0; i<oldLength; i++) {
if (oldBuffer.charAt(oldStart+i) != newBuffer.charAt(newStart+i)) {
return false;
}
}
}
oldTextLexer.advance();
newTextLexer.advance();
}
}
示例8: getTokenText
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
private static String getTokenText(Lexer lexer) {
int start = lexer.getTokenStart();
int end = lexer.getTokenEnd();
return lexer.getBufferSequence().subSequence(start, end).toString();
}
示例9: createTextChunks
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@NotNull
public TextChunk[] createTextChunks(@NotNull UsageInfo2UsageAdapter usageInfo2UsageAdapter,
@NotNull CharSequence chars,
int start,
int end,
boolean selectUsageWithBold,
@NotNull List<TextChunk> result) {
final Lexer lexer = myHighlighter.getHighlightingLexer();
final SyntaxHighlighterOverEditorHighlighter highlighter = myHighlighter;
LOG.assertTrue(start <= end);
int i = StringUtil.indexOf(chars, '\n', start, end);
if (i != -1) end = i;
if (myDocumentStamp != myDocument.getModificationStamp()) {
highlighter.restart(chars);
myDocumentStamp = myDocument.getModificationStamp();
} else if(lexer.getTokenType() == null || lexer.getTokenStart() > start) {
highlighter.resetPosition(0); // todo restart from nearest position with initial state
}
boolean isBeginning = true;
for(;lexer.getTokenType() != null; lexer.advance()) {
int hiStart = lexer.getTokenStart();
int hiEnd = lexer.getTokenEnd();
if (hiStart >= end) break;
hiStart = Math.max(hiStart, start);
hiEnd = Math.min(hiEnd, end);
if (hiStart >= hiEnd) { continue; }
if (isBeginning) {
String text = chars.subSequence(hiStart, hiEnd).toString();
if(text.trim().isEmpty()) continue;
}
isBeginning = false;
IElementType tokenType = lexer.getTokenType();
TextAttributesKey[] tokenHighlights = highlighter.getTokenHighlights(tokenType);
processIntersectingRange(usageInfo2UsageAdapter, chars, hiStart, hiEnd, tokenHighlights, selectUsageWithBold, result);
}
return result.toArray(new TextChunk[result.size()]);
}
示例10: isMinified
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
protected static boolean isMinified(Lexer lexer, ParserDefinition parserDefinition, TokenSet noWSRequireAfterTokenSet) {
int offsetIgnoringComments = 0;
int offsetIgnoringCommentsAndStrings = 0;
int lineLength = 0;
int unneededWhitespaceCount = 0;
IElementType lastTokenType = null;
TokenSet whitespaceTokens = parserDefinition.getWhitespaceTokens();
TokenSet stringLiteralElements = parserDefinition.getStringLiteralElements();
TokenSet commentTokens = parserDefinition.getCommentTokens();
for (IElementType tokenType = lexer.getTokenType(); tokenType != null; lexer.advance(), tokenType = lexer.getTokenType()) {
if (commentTokens.contains(tokenType)) {
lastTokenType = tokenType;
continue;
}
int tokenLength = lexer.getTokenEnd() - lexer.getTokenStart();
offsetIgnoringComments += tokenLength;
if (stringLiteralElements.contains(tokenType)) {
lineLength += tokenLength;
lastTokenType = tokenType;
continue;
}
offsetIgnoringCommentsAndStrings += tokenLength;
if (whitespaceTokens.contains(tokenType)) {
if (!commentTokens.contains(lastTokenType) && tokenLength > 1) {
lexer.advance();
if (lexer.getTokenType() == null) {
// it was last token
break;
} else {
return false;
}
}
if (lexer.getTokenText().contains("\n")) {
if (lineLength > MIN_LINE_LENGTH) {
break;
}
lineLength = 0;
}
if (" ".equals(lexer.getTokenText()) && noWSRequireAfterTokenSet.contains(lastTokenType)) {
unneededWhitespaceCount++;
}
}
else {
lineLength += tokenLength;
}
if (offsetIgnoringComments >= MAX_OFFSET) {
break;
}
lastTokenType = tokenType;
}
return offsetIgnoringComments >= MIN_SIZE &&
(unneededWhitespaceCount + 0.0d) / offsetIgnoringCommentsAndStrings < MAX_UNNEEDED_OFFSET_PERCENTAGE;
}
示例11: isCommentComplete
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
public static boolean isCommentComplete(PsiComment comment, CodeDocumentationAwareCommenter commenter, Editor editor) {
for (CommentCompleteHandler handler : Extensions.getExtensions(CommentCompleteHandler.EP_NAME)) {
if (handler.isApplicable(comment, commenter)) {
return handler.isCommentComplete(comment, commenter, editor);
}
}
String commentText = comment.getText();
final boolean docComment = isDocComment(comment, commenter);
final String expectedCommentEnd = docComment ? commenter.getDocumentationCommentSuffix():commenter.getBlockCommentSuffix();
if (!commentText.endsWith(expectedCommentEnd)) return false;
final PsiFile containingFile = comment.getContainingFile();
final Language language = containingFile.getLanguage();
ParserDefinition parserDefinition = LanguageParserDefinitions.INSTANCE.forLanguage(language);
if (parserDefinition == null) {
return true;
}
Lexer lexer = parserDefinition.createLexer(containingFile.getProject());
final String commentPrefix = docComment? commenter.getDocumentationCommentPrefix() : commenter.getBlockCommentPrefix();
lexer.start(commentText, commentPrefix == null? 0 : commentPrefix.length(), commentText.length());
QuoteHandler fileTypeHandler = TypedHandler.getQuoteHandler(containingFile, editor);
JavaLikeQuoteHandler javaLikeQuoteHandler = fileTypeHandler instanceof JavaLikeQuoteHandler ?
(JavaLikeQuoteHandler)fileTypeHandler:null;
while (true) {
IElementType tokenType = lexer.getTokenType();
if (tokenType == null) {
return false;
}
if (javaLikeQuoteHandler != null &&
javaLikeQuoteHandler.getStringTokenTypes() != null &&
javaLikeQuoteHandler.getStringTokenTypes().contains(tokenType)) {
String text = commentText.substring(lexer.getTokenStart(), lexer.getTokenEnd());
int endOffset = comment.getTextRange().getEndOffset();
if (text.endsWith(expectedCommentEnd) &&
endOffset < containingFile.getTextLength() &&
containingFile.getText().charAt(endOffset) == '\n') {
return true;
}
}
if (tokenType == commenter.getDocumentationCommentTokenType() || tokenType == commenter.getBlockCommentTokenType()) {
return false;
}
if (tokenType == commenter.getLineCommentTokenType() && lexer.getTokenText().contains(commentPrefix)) {
return false;
}
if (lexer.getTokenEnd() == commentText.length()) {
if (tokenType == commenter.getLineCommentTokenType()) {
String prefix = commenter.getLineCommentPrefix();
lexer.start(commentText, lexer.getTokenStart() + (prefix == null ? 0 : prefix.length()), commentText.length());
lexer.advance();
continue;
}
else if (isInvalidPsi(comment)) {
return false;
}
return true;
}
lexer.advance();
}
}