本文整理汇总了Java中com.intellij.lexer.Lexer.getTokenType方法的典型用法代码示例。如果您正苦于以下问题:Java Lexer.getTokenType方法的具体用法?Java Lexer.getTokenType怎么用?Java Lexer.getTokenType使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.intellij.lexer.Lexer
的用法示例。
在下文中一共展示了Lexer.getTokenType方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getMergeFunction
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@Override
public MergeFunction getMergeFunction() {
return ((final IElementType type, final Lexer originalLexer) -> {
if (type == SoyTypes.OTHER || type == TokenType.WHITE_SPACE) {
IElementType returnType = type;
while (originalLexer.getTokenType() == SoyTypes.OTHER
|| originalLexer.getTokenType() == TokenType.WHITE_SPACE) {
if (originalLexer.getTokenType() == SoyTypes.OTHER) {
returnType = SoyTypes.OTHER;
}
originalLexer.advance();
}
return returnType;
}
return type;
});
}
示例2: getMergeFunction
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@Override
public MergeFunction getMergeFunction() {
return new MergeFunction() {
@Override
public IElementType merge(IElementType type, Lexer originalLexer) {
if (!allMergables.contains(type)) {
return type;
}
TokenSet merging = tokensToMerge.contains(type) ? tokensToMerge : tokensToMerge2;
while (true) {
final IElementType tokenType = originalLexer.getTokenType();
if (!merging.contains(tokenType)) break;
originalLexer.advance();
}
return merging == tokensToMerge ? LONGSTRING : LONGCOMMENT;
}
};
}
示例3: addWordHonoringEscapeSequences
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
public static void addWordHonoringEscapeSequences(CharSequence editorText,
TextRange literalTextRange,
int cursorOffset,
Lexer lexer,
List<TextRange> result) {
lexer.start(editorText, literalTextRange.getStartOffset(), literalTextRange.getEndOffset());
while (lexer.getTokenType() != null) {
if (lexer.getTokenStart() <= cursorOffset && cursorOffset < lexer.getTokenEnd()) {
if (StringEscapesTokenTypes.STRING_LITERAL_ESCAPES.contains(lexer.getTokenType())) {
result.add(new TextRange(lexer.getTokenStart(), lexer.getTokenEnd()));
}
else {
TextRange word = getWordSelectionRange(editorText, cursorOffset, JAVA_IDENTIFIER_PART_CONDITION);
if (word != null) {
result.add(new TextRange(Math.max(word.getStartOffset(), lexer.getTokenStart()),
Math.min(word.getEndOffset(), lexer.getTokenEnd())));
}
}
break;
}
lexer.advance();
}
}
示例4: getErrorsCount
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@Override
public int getErrorsCount(final CharSequence seq, Language fileLanguage, final Project project) {
Lexer lexer = JavaParserDefinition.createLexer(LanguageLevel.HIGHEST);
lexer.start(seq);
if (lexer.getTokenType() != JavaTokenType.LBRACE) return IErrorCounterReparseableElementType.FATAL_ERROR;
lexer.advance();
int balance = 1;
while (true) {
IElementType type = lexer.getTokenType();
if (type == null) break;
if (balance == 0) return IErrorCounterReparseableElementType.FATAL_ERROR;
if (type == JavaTokenType.LBRACE) {
balance++;
}
else if (type == JavaTokenType.RBRACE) {
balance--;
}
lexer.advance();
}
return balance;
}
示例5: parseVariable
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
private static Expression parseVariable(Lexer lexer, String expression) {
String variableName = getString(lexer, expression);
advance(lexer);
if (lexer.getTokenType() == null) {
if (TemplateImpl.END.equals(variableName)) {
return new EmptyNode();
}
return new VariableNode(variableName, null);
}
if (lexer.getTokenType() != MacroTokenType.EQ) {
return new VariableNode(variableName, null);
}
advance(lexer);
Expression node = parseMacro(lexer, expression);
return new VariableNode(variableName, node);
}
示例6: doTest
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
private static void doTest(String text, String[] expectedTokens,Lexer lexer) {
lexer.start(text);
int idx = 0;
while (lexer.getTokenType() != null) {
if (idx >= expectedTokens.length) fail("Too many tokens");
String tokenName = lexer.getTokenType().toString();
String expectedTokenType = expectedTokens[idx++];
String expectedTokenText = expectedTokens[idx++];
assertEquals(expectedTokenType, tokenName);
String tokenText = lexer.getBufferSequence().subSequence(lexer.getTokenStart(), lexer.getTokenEnd()).toString();
assertEquals(expectedTokenText, tokenText);
lexer.advance();
}
if (idx < expectedTokens.length) fail("Not enough tokens");
}
示例7: isMinified
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
/**
* Finds out whether the file minified by using common (not language-specific) heuristics.
* Can be used for checking of css/less/scss/sass and js files.
*
* @param fileContent target file content
* @param parserDefinition Parser definition of target language
* @param noWSRequireAfterTokenSet TokenSet of types that doesn't require whitespaces after them.
*/
public static boolean isMinified(@NotNull CharSequence fileContent,
@NotNull ParserDefinition parserDefinition,
@NotNull TokenSet noWSRequireAfterTokenSet) {
Lexer lexer = parserDefinition.createLexer(null);
lexer.start(fileContent);
if (!isMinified(lexer, parserDefinition, noWSRequireAfterTokenSet)) {
return false;
}
else if (lexer.getTokenType() == null) {
// whole file had been considered
return true;
}
int startOffset = fileContent.length() - COUNT_OF_CONSIDERING_CHARACTERS_FROM_END_OF_FILE;
if (startOffset <= 0) {
return true;
}
lexer.start(fileContent, startOffset, fileContent.length());
return isMinified(lexer, parserDefinition, noWSRequireAfterTokenSet);
}
示例8: doLexerTest
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
public static void doLexerTest(String text,
Lexer lexer,
boolean checkTokenText,
String... expectedTokens) {
lexer.start(text);
int idx = 0;
int tokenPos = 0;
while (lexer.getTokenType() != null) {
if (idx >= expectedTokens.length) {
StringBuilder remainingTokens = new StringBuilder("\"" + lexer.getTokenType().toString() + "\"");
lexer.advance();
while (lexer.getTokenType() != null) {
remainingTokens.append(",");
remainingTokens.append(" \"").append(checkTokenText ? lexer.getTokenText() : lexer.getTokenType().toString()).append("\"");
lexer.advance();
}
fail("Too many tokens. Following tokens: " + remainingTokens.toString());
}
assertEquals("Token offset mismatch at position " + idx, tokenPos, lexer.getTokenStart());
String tokenName = checkTokenText ? lexer.getTokenText() : lexer.getTokenType().toString();
assertEquals("Token mismatch at position " + idx, expectedTokens[idx], tokenName);
idx++;
tokenPos = lexer.getTokenEnd();
lexer.advance();
}
if (idx < expectedTokens.length) fail("Not enough tokens");
}
示例9: isParsable
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@Override
public boolean isParsable(final CharSequence buffer, Language fileLanguage, final Project project) {
if (!StringUtil.startsWith(buffer, "/**") || !StringUtil.endsWith(buffer, "*/")) return false;
Lexer lexer = JavaParserDefinition.createLexer(LanguageLevelProjectExtension.getInstance(project).getLanguageLevel());
lexer.start(buffer);
if (lexer.getTokenType() == DOC_COMMENT) {
lexer.advance();
if (lexer.getTokenType() == null) {
return true;
}
}
return false;
}
示例10: checkToken
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
private static boolean checkToken(final ASTNode token1) {
Lexer lexer = JavaParserDefinition.createLexer(LanguageLevel.HIGHEST);
final String text = token1.getText();
lexer.start(text);
if (lexer.getTokenType() != token1.getElementType()) return false;
lexer.advance();
return lexer.getTokenType() == null;
}
示例11: parseAspectJPattern
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
private static String parseAspectJPattern(Lexer lexer) throws ParsingException {
StringBuffer pattern = new StringBuffer();
boolean wasIdentifier = false;
while (true) {
if (lexer.getTokenType() == ScopeTokenTypes.DOT) {
pattern.append('.');
wasIdentifier = false;
}
else if (lexer.getTokenType() == ScopeTokenTypes.ASTERISK) {
pattern.append('*');
wasIdentifier = false;
}
else if (lexer.getTokenType() == ScopeTokenTypes.IDENTIFIER) {
if (wasIdentifier) error(AnalysisScopeBundle.message("error.packageset.token.expectations", getTokenText(lexer)), lexer);
wasIdentifier = true;
pattern.append(getTokenText(lexer));
}
else {
break;
}
lexer.advance();
}
if (pattern.length() == 0) {
error(AnalysisScopeBundle.message("error.packageset.pattern.expectations"), lexer);
}
return pattern.toString();
}
示例12: readPackageName
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@Nullable
private static String readPackageName(final CharSequence text, final Lexer lexer) {
final StringBuilder buffer = StringBuilderSpinAllocator.alloc();
try {
while (true) {
if (lexer.getTokenType() != JavaTokenType.IDENTIFIER && lexer.getTokenType() != JavaTokenType.ASTERISK) {
break;
}
buffer.append(text, lexer.getTokenStart(), lexer.getTokenEnd());
advanceLexer(lexer);
if (lexer.getTokenType() != JavaTokenType.DOT) {
break;
}
buffer.append('.');
advanceLexer(lexer);
}
String packageName = buffer.toString();
if (packageName.length() == 0 || StringUtil.endsWithChar(packageName, '.') || StringUtil.startsWithChar(packageName, '*')) {
return null;
}
return packageName;
}
finally {
StringBuilderSpinAllocator.dispose(buffer);
}
}
示例13: getPackageName
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@Nullable
public static String getPackageName(CharSequence text) {
Lexer lexer = JavaParserDefinition.createLexer(LanguageLevel.JDK_1_3);
lexer.start(text);
skipWhiteSpaceAndComments(lexer);
final IElementType firstToken = lexer.getTokenType();
if (firstToken != JavaTokenType.PACKAGE_KEYWORD) {
if (JAVA_FILE_FIRST_TOKEN_SET.contains(firstToken)) {
return "";
}
return null;
}
lexer.advance();
skipWhiteSpaceAndComments(lexer);
final StringBuilder buffer = StringBuilderSpinAllocator.alloc();
try {
while(true){
if (lexer.getTokenType() != JavaTokenType.IDENTIFIER) break;
buffer.append(text, lexer.getTokenStart(), lexer.getTokenEnd());
lexer.advance();
skipWhiteSpaceAndComments(lexer);
if (lexer.getTokenType() != JavaTokenType.DOT) break;
buffer.append('.');
lexer.advance();
skipWhiteSpaceAndComments(lexer);
}
String packageName = buffer.toString();
if (packageName.length() == 0 || StringUtil.endsWithChar(packageName, '.')) return null;
return packageName;
}
finally {
StringBuilderSpinAllocator.dispose(buffer);
}
}
示例14: canStickTokensTogetherByLexer
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
public static ParserDefinition.SpaceRequirements canStickTokensTogetherByLexer(ASTNode left, ASTNode right, Lexer lexer) {
String textStr = left.getText() + right.getText();
lexer.start(textStr, 0, textStr.length());
if(lexer.getTokenType() != left.getElementType()) return ParserDefinition.SpaceRequirements.MUST;
if(lexer.getTokenEnd() != left.getTextLength()) return ParserDefinition.SpaceRequirements.MUST;
lexer.advance();
if(lexer.getTokenEnd() != textStr.length()) return ParserDefinition.SpaceRequirements.MUST;
if(lexer.getTokenType() != right.getElementType()) return ParserDefinition.SpaceRequirements.MUST;
return ParserDefinition.SpaceRequirements.MAY;
}
示例15: scanContent
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
public static ScanContent scanContent(FileContent content, IdAndToDoScannerBasedOnFilterLexer indexer) {
ScanContent data = content.getUserData(scanContentKey);
if (data != null) {
content.putUserData(scanContentKey, null);
return data;
}
final boolean needTodo = content.getFile().isInLocalFileSystem(); // same as TodoIndex.getFilter().isAcceptable
final boolean needIdIndex = IdTableBuilding.getFileTypeIndexer(content.getFileType()) instanceof LexerBasedIdIndexer;
final IdDataConsumer consumer = needIdIndex? new IdDataConsumer():null;
final OccurrenceConsumer todoOccurrenceConsumer = new OccurrenceConsumer(consumer, needTodo);
final Lexer filterLexer = indexer.createLexer(todoOccurrenceConsumer);
filterLexer.start(content.getContentAsText());
while (filterLexer.getTokenType() != null) filterLexer.advance();
Map<TodoIndexEntry,Integer> todoMap = null;
if (needTodo) {
for (IndexPattern indexPattern : IndexPatternUtil.getIndexPatterns()) {
final int count = todoOccurrenceConsumer.getOccurrenceCount(indexPattern);
if (count > 0) {
if (todoMap == null) todoMap = new THashMap<TodoIndexEntry, Integer>();
todoMap.put(new TodoIndexEntry(indexPattern.getPatternString(), indexPattern.isCaseSensitive()), count);
}
}
}
data = new ScanContent(
consumer != null? consumer.getResult():Collections.<IdIndexEntry, Integer>emptyMap(),
todoMap != null ? todoMap: Collections.<TodoIndexEntry,Integer>emptyMap()
);
if (needIdIndex && needTodo) content.putUserData(scanContentKey, data);
return data;
}