本文整理汇总了Java中com.intellij.lexer.Lexer.advance方法的典型用法代码示例。如果您正苦于以下问题:Java Lexer.advance方法的具体用法?Java Lexer.advance怎么用?Java Lexer.advance使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.intellij.lexer.Lexer
的用法示例。
在下文中一共展示了Lexer.advance方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getMergeFunction
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@Override
public MergeFunction getMergeFunction() {
return ((final IElementType type, final Lexer originalLexer) -> {
if (type == SoyTypes.OTHER || type == TokenType.WHITE_SPACE) {
IElementType returnType = type;
while (originalLexer.getTokenType() == SoyTypes.OTHER
|| originalLexer.getTokenType() == TokenType.WHITE_SPACE) {
if (originalLexer.getTokenType() == SoyTypes.OTHER) {
returnType = SoyTypes.OTHER;
}
originalLexer.advance();
}
return returnType;
}
return type;
});
}
示例2: getErrorsCount
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@Override
public int getErrorsCount(final CharSequence seq, Language fileLanguage, final Project project) {
Lexer lexer = JavaParserDefinition.createLexer(LanguageLevel.HIGHEST);
lexer.start(seq);
if (lexer.getTokenType() != JavaTokenType.LBRACE) return IErrorCounterReparseableElementType.FATAL_ERROR;
lexer.advance();
int balance = 1;
while (true) {
IElementType type = lexer.getTokenType();
if (type == null) break;
if (balance == 0) return IErrorCounterReparseableElementType.FATAL_ERROR;
if (type == JavaTokenType.LBRACE) {
balance++;
}
else if (type == JavaTokenType.RBRACE) {
balance--;
}
lexer.advance();
}
return balance;
}
示例3: getErrorsCount
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@Override
public int getErrorsCount(final CharSequence seq, Language fileLanguage, final Project project) {
final Lexer lexer = new GroovyLexer();
lexer.start(seq);
if (lexer.getTokenType() != GroovyTokenTypes.mLCURLY) return FATAL_ERROR;
lexer.advance();
int balance = 1;
while (true) {
IElementType type = lexer.getTokenType();
if (type == null) break;
if (balance == 0) return FATAL_ERROR;
if (type == GroovyTokenTypes.mLCURLY) {
balance++;
}
else if (type == GroovyTokenTypes.mRCURLY) {
balance--;
}
lexer.advance();
}
return balance;
}
示例4: checkCorrectRestart
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
protected void checkCorrectRestart(String text) {
Lexer mainLexer = createLexer();
String allTokens = printTokens(text, 0, mainLexer);
Lexer auxLexer = createLexer();
auxLexer.start(text);
while (true) {
IElementType type = auxLexer.getTokenType();
if (type == null) {
break;
}
if (auxLexer.getState() == 0) {
int tokenStart = auxLexer.getTokenStart();
String subTokens = printTokens(text, tokenStart, mainLexer);
if (!allTokens.endsWith(subTokens)) {
assertEquals("Restarting impossible from offset " + tokenStart + "; lexer state should not return 0 at this point", allTokens, subTokens);
}
}
auxLexer.advance();
}
}
示例5: printTokens
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
public static String printTokens(CharSequence text, int start, Lexer lexer) {
lexer.start(text, start, text.length());
String result = "";
while (true) {
IElementType tokenType = lexer.getTokenType();
if (tokenType == null) {
break;
}
String tokenText = getTokenText(lexer);
String tokenTypeName = tokenType.toString();
String line = tokenTypeName + " ('" + tokenText + "')\n";
result += line;
lexer.advance();
}
return result;
}
示例6: parseCardinality
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@Nullable
public static XPath2SequenceType.Cardinality parseCardinality(Lexer lexer) {
if (lexer.getTokenType() == XPath2TokenTypes.QUEST) {
lexer.advance();
return XPath2SequenceType.Cardinality.OPTIONAL;
} else if (lexer.getTokenType() == XPathTokenTypes.MULT || lexer.getTokenType() == XPathTokenTypes.STAR) {
lexer.advance();
return XPath2SequenceType.Cardinality.ZERO_OR_MORE;
} else if (lexer.getTokenType() == XPathTokenTypes.PLUS) {
lexer.advance();
return XPath2SequenceType.Cardinality.ONE_OR_MORE;
}
return null;
}
示例7: lookingAt
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
private static boolean lookingAt(Lexer baseLexer, IElementType... tokens) {
final LexerPosition position = baseLexer.getCurrentPosition();
try {
for (IElementType token : tokens) {
baseLexer.advance();
skipWhitespaceAnComments(baseLexer);
if (baseLexer.getTokenType() != token) {
return false;
}
}
return true;
} finally {
baseLexer.restore(position);
}
}
示例8: isParsable
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@Override
public boolean isParsable(final CharSequence buffer, Language fileLanguage, final Project project) {
if (!StringUtil.startsWith(buffer, "/**") || !StringUtil.endsWith(buffer, "*/")) return false;
Lexer lexer = JavaParserDefinition.createLexer(LanguageLevelProjectExtension.getInstance(project).getLanguageLevel());
lexer.start(buffer);
if (lexer.getTokenType() == DOC_COMMENT) {
lexer.advance();
if (lexer.getTokenType() == null) {
return true;
}
}
return false;
}
示例9: lexemsEqual
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
private static boolean lexemsEqual(final PsiClass classToBind, final PsiClass newClass) {
Lexer oldTextLexer = JavaParserDefinition.createLexer(LanguageLevel.HIGHEST);
Lexer newTextLexer = JavaParserDefinition.createLexer(LanguageLevel.HIGHEST);
String oldBuffer = classToBind.getText();
String newBuffer = newClass.getText();
oldTextLexer.start(oldBuffer);
newTextLexer.start(newBuffer);
while(true) {
IElementType oldLexem = oldTextLexer.getTokenType();
IElementType newLexem = newTextLexer.getTokenType();
if (oldLexem == null || newLexem == null) {
// must terminate at the same time
return oldLexem == null && newLexem == null;
}
if (oldLexem != newLexem) {
return false;
}
if (oldLexem != TokenType.WHITE_SPACE && oldLexem != JavaDocElementType.DOC_COMMENT) {
int oldStart = oldTextLexer.getTokenStart();
int newStart = newTextLexer.getTokenStart();
int oldLength = oldTextLexer.getTokenEnd() - oldTextLexer.getTokenStart();
int newLength = newTextLexer.getTokenEnd() - newTextLexer.getTokenStart();
if (oldLength != newLength) {
return false;
}
for(int i=0; i<oldLength; i++) {
if (oldBuffer.charAt(oldStart+i) != newBuffer.charAt(newStart+i)) {
return false;
}
}
}
oldTextLexer.advance();
newTextLexer.advance();
}
}
示例10: parseScope
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@Override
public String parseScope(final Lexer lexer) {
if (lexer.getTokenType() != ScopeTokenTypes.IDENTIFIER) return PatternPackageSet.SCOPE_ANY;
String id = getTokenText(lexer);
String scope = PatternPackageSet.SCOPE_ANY;
if (PatternPackageSet.SCOPE_SOURCE.equals(id)) {
scope = PatternPackageSet.SCOPE_SOURCE;
} else if (PatternPackageSet.SCOPE_TEST.equals(id)) {
scope = PatternPackageSet.SCOPE_TEST;
} else if (PatternPackageSet.SCOPE_PROBLEM.equals(id)) {
scope = PatternPackageSet.SCOPE_PROBLEM;
} else if (PatternPackageSet.SCOPE_LIBRARY.equals(id)) {
scope = PatternPackageSet.SCOPE_LIBRARY;
} else if (!id.trim().isEmpty()) {
scope = null;
}
final CharSequence buf = lexer.getBufferSequence();
int end = lexer.getTokenEnd();
int bufferEnd = lexer.getBufferEnd();
if (scope == PatternPackageSet.SCOPE_ANY || end >= bufferEnd || buf.charAt(end) != ':' && buf.charAt(end) != '[') {
return PatternPackageSet.SCOPE_ANY;
}
if (scope != null) {
lexer.advance();
}
return scope;
}
示例11: replaceImportAliases
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
private String replaceImportAliases(String type) {
Map<String, String> lookup = myAliasCache.getValue();
if (lookup == null || lookup.isEmpty()) {
return type;
}
Lexer lexer = getJavaLexer();
lexer.start(type);
boolean checkNext = true;
StringBuilder out = new StringBuilder();
IElementType tokenType = lexer.getTokenType();
while (tokenType != null) {
if (checkNext && tokenType == JavaTokenType.IDENTIFIER) {
// this might be something we want to replace
String tokenText = lexer.getTokenText();
String replacement = lookup.get(tokenText);
if (replacement != null) {
out.append(replacement);
} else {
out.append(tokenText);
}
} else {
out.append(lexer.getTokenText());
}
if (tokenType != TokenType.WHITE_SPACE) { // ignore spaces
if (tokenType == JavaTokenType.LT || tokenType == JavaTokenType.COMMA) {
checkNext = true;
} else {
checkNext = false;
}
}
lexer.advance();
tokenType = lexer.getTokenType();
}
return out.toString();
}
示例12: scanContent
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
public static ScanContent scanContent(FileContent content, IdAndToDoScannerBasedOnFilterLexer indexer) {
ScanContent data = content.getUserData(scanContentKey);
if (data != null) {
content.putUserData(scanContentKey, null);
return data;
}
final boolean needTodo = content.getFile().isInLocalFileSystem(); // same as TodoIndex.getFilter().isAcceptable
final boolean needIdIndex = IdTableBuilding.getFileTypeIndexer(content.getFileType()) instanceof LexerBasedIdIndexer;
final IdDataConsumer consumer = needIdIndex? new IdDataConsumer():null;
final OccurrenceConsumer todoOccurrenceConsumer = new OccurrenceConsumer(consumer, needTodo);
final Lexer filterLexer = indexer.createLexer(todoOccurrenceConsumer);
filterLexer.start(content.getContentAsText());
while (filterLexer.getTokenType() != null) filterLexer.advance();
Map<TodoIndexEntry,Integer> todoMap = null;
if (needTodo) {
for (IndexPattern indexPattern : IndexPatternUtil.getIndexPatterns()) {
final int count = todoOccurrenceConsumer.getOccurrenceCount(indexPattern);
if (count > 0) {
if (todoMap == null) todoMap = new THashMap<TodoIndexEntry, Integer>();
todoMap.put(new TodoIndexEntry(indexPattern.getPatternString(), indexPattern.isCaseSensitive()), count);
}
}
}
data = new ScanContent(
consumer != null? consumer.getResult():Collections.<IdIndexEntry, Integer>emptyMap(),
todoMap != null ? todoMap: Collections.<TodoIndexEntry,Integer>emptyMap()
);
if (needIdIndex && needTodo) content.putUserData(scanContentKey, data);
return data;
}
示例13: isIdentifier
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
public static boolean isIdentifier(@Nullable String text) {
if (text == null) return false;
Lexer lexer = new GroovyLexer();
lexer.start(text);
if (lexer.getTokenType() != GroovyTokenTypes.mIDENT) return false;
lexer.advance();
return lexer.getTokenType() == null;
}
示例14: isCommentComplete
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
public static boolean isCommentComplete(PsiComment comment, CodeDocumentationAwareCommenter commenter, Editor editor) {
for (CommentCompleteHandler handler : Extensions.getExtensions(CommentCompleteHandler.EP_NAME)) {
if (handler.isApplicable(comment, commenter)) {
return handler.isCommentComplete(comment, commenter, editor);
}
}
String commentText = comment.getText();
final boolean docComment = isDocComment(comment, commenter);
final String expectedCommentEnd = docComment ? commenter.getDocumentationCommentSuffix():commenter.getBlockCommentSuffix();
if (!commentText.endsWith(expectedCommentEnd)) return false;
final PsiFile containingFile = comment.getContainingFile();
final Language language = containingFile.getLanguage();
ParserDefinition parserDefinition = LanguageParserDefinitions.INSTANCE.forLanguage(language);
if (parserDefinition == null) {
return true;
}
Lexer lexer = parserDefinition.createLexer(containingFile.getProject());
final String commentPrefix = docComment? commenter.getDocumentationCommentPrefix() : commenter.getBlockCommentPrefix();
lexer.start(commentText, commentPrefix == null? 0 : commentPrefix.length(), commentText.length());
QuoteHandler fileTypeHandler = TypedHandler.getQuoteHandler(containingFile, editor);
JavaLikeQuoteHandler javaLikeQuoteHandler = fileTypeHandler instanceof JavaLikeQuoteHandler ?
(JavaLikeQuoteHandler)fileTypeHandler:null;
while (true) {
IElementType tokenType = lexer.getTokenType();
if (tokenType == null) {
return false;
}
if (javaLikeQuoteHandler != null &&
javaLikeQuoteHandler.getStringTokenTypes() != null &&
javaLikeQuoteHandler.getStringTokenTypes().contains(tokenType)) {
String text = commentText.substring(lexer.getTokenStart(), lexer.getTokenEnd());
int endOffset = comment.getTextRange().getEndOffset();
if (text.endsWith(expectedCommentEnd) &&
endOffset < containingFile.getTextLength() &&
containingFile.getText().charAt(endOffset) == '\n') {
return true;
}
}
if (tokenType == commenter.getDocumentationCommentTokenType() || tokenType == commenter.getBlockCommentTokenType()) {
return false;
}
if (tokenType == commenter.getLineCommentTokenType() && lexer.getTokenText().contains(commentPrefix)) {
return false;
}
if (lexer.getTokenEnd() == commentText.length()) {
if (tokenType == commenter.getLineCommentTokenType()) {
String prefix = commenter.getLineCommentPrefix();
lexer.start(commentText, lexer.getTokenStart() + (prefix == null ? 0 : prefix.length()), commentText.length());
lexer.advance();
continue;
}
else if (isInvalidPsi(comment)) {
return false;
}
return true;
}
lexer.advance();
}
}
示例15: getInjectionRanges
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@NotNull
private synchronized TextRange[] getInjectionRanges(final XmlAttribute attribute, XsltChecker.LanguageLevel languageLevel) {
final TextRange[] cachedFiles = getCachedRanges(attribute);
if (cachedFiles != null) {
return cachedFiles;
}
final String value = attribute.getDisplayValue();
if (value == null) return EMPTY_ARRAY;
final TextRange[] ranges;
if (XsltSupport.mayBeAVT(attribute)) {
final List<TextRange> avtRanges = new SmartList<TextRange>();
int i;
int j = 0;
Lexer lexer = null;
while ((i = XsltSupport.getAVTOffset(value, j)) != -1) {
if (lexer == null) {
lexer = LanguageParserDefinitions.INSTANCE.forLanguage(languageLevel.getXPathVersion().getLanguage())
.createLexer(attribute.getProject());
}
// "A right curly brace inside a Literal in an expression is not recognized as terminating the expression."
lexer.start(value, i, value.length());
j = -1;
while (lexer.getTokenType() != null) {
if (lexer.getTokenType() == XPathTokenTypes.RBRACE) {
j = lexer.getTokenStart();
break;
}
lexer.advance();
}
if (j != -1) {
avtRanges.add(AVTRange.create(attribute, i, j + 1, j > i + 1));
} else {
// missing '}' error will be flagged by xpath parser
avtRanges.add(AVTRange.create(attribute, i, value.length(), false));
break;
}
}
if (avtRanges.size() > 0) {
ranges = avtRanges.toArray(new TextRange[avtRanges.size()]);
} else {
ranges = EMPTY_ARRAY;
}
} else {
ranges = new TextRange[]{ attribute.getValueTextRange() };
}
attribute.putUserData(CACHED_FILES, Pair.create(attribute.getValue(), ranges));
return ranges;
}