本文整理汇总了Java中com.intellij.lexer.Lexer.start方法的典型用法代码示例。如果您正苦于以下问题:Java Lexer.start方法的具体用法?Java Lexer.start怎么用?Java Lexer.start使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.intellij.lexer.Lexer
的用法示例。
在下文中一共展示了Lexer.start方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: canStickJavaTokens
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
private static boolean canStickJavaTokens(ASTNode token1, ASTNode token2) {
IElementType type1 = token1.getElementType();
IElementType type2 = token2.getElementType();
Pair<IElementType, IElementType> pair = Pair.create(type1, type2);
Boolean res = myCanStickJavaTokensMatrix.get(pair);
if (res == null) {
if (!checkToken(token1) || !checkToken(token2)) return true;
String text = token1.getText() + token2.getText();
Lexer lexer = JavaParserDefinition.createLexer(LanguageLevel.HIGHEST);
lexer.start(text);
boolean canMerge = lexer.getTokenType() == type1;
lexer.advance();
canMerge &= lexer.getTokenType() == type2;
res = canMerge;
myCanStickJavaTokensMatrix.put(pair, res);
}
return res.booleanValue();
}
示例2: getErrorsCount
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@Override
public int getErrorsCount(final CharSequence seq, Language fileLanguage, final Project project) {
final Lexer lexer = new GroovyLexer();
lexer.start(seq);
if (lexer.getTokenType() != GroovyTokenTypes.mLCURLY) return FATAL_ERROR;
lexer.advance();
int balance = 1;
while (true) {
IElementType type = lexer.getTokenType();
if (type == null) break;
if (balance == 0) return FATAL_ERROR;
if (type == GroovyTokenTypes.mLCURLY) {
balance++;
}
else if (type == GroovyTokenTypes.mRCURLY) {
balance--;
}
lexer.advance();
}
return balance;
}
示例3: printTokens
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
public static String printTokens(CharSequence text, int start, Lexer lexer) {
lexer.start(text, start, text.length());
String result = "";
while (true) {
IElementType tokenType = lexer.getTokenType();
if (tokenType == null) {
break;
}
String tokenText = getTokenText(lexer);
String tokenTypeName = tokenType.toString();
String line = tokenTypeName + " ('" + tokenText + "')\n";
result += line;
lexer.advance();
}
return result;
}
示例4: isMinified
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
/**
* Finds out whether the file minified by using common (not language-specific) heuristics.
* Can be used for checking of css/less/scss/sass and js files.
*
* @param fileContent target file content
* @param parserDefinition Parser definition of target language
* @param noWSRequireAfterTokenSet TokenSet of types that doesn't require whitespaces after them.
*/
public static boolean isMinified(@NotNull CharSequence fileContent,
@NotNull ParserDefinition parserDefinition,
@NotNull TokenSet noWSRequireAfterTokenSet) {
Lexer lexer = parserDefinition.createLexer(null);
lexer.start(fileContent);
if (!isMinified(lexer, parserDefinition, noWSRequireAfterTokenSet)) {
return false;
}
else if (lexer.getTokenType() == null) {
// whole file had been considered
return true;
}
int startOffset = fileContent.length() - COUNT_OF_CONSIDERING_CHARACTERS_FROM_END_OF_FILE;
if (startOffset <= 0) {
return true;
}
lexer.start(fileContent, startOffset, fileContent.length());
return isMinified(lexer, parserDefinition, noWSRequireAfterTokenSet);
}
示例5: addWordHonoringEscapeSequences
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
public static void addWordHonoringEscapeSequences(CharSequence editorText,
TextRange literalTextRange,
int cursorOffset,
Lexer lexer,
List<TextRange> result) {
lexer.start(editorText, literalTextRange.getStartOffset(), literalTextRange.getEndOffset());
while (lexer.getTokenType() != null) {
if (lexer.getTokenStart() <= cursorOffset && cursorOffset < lexer.getTokenEnd()) {
if (StringEscapesTokenTypes.STRING_LITERAL_ESCAPES.contains(lexer.getTokenType())) {
result.add(new TextRange(lexer.getTokenStart(), lexer.getTokenEnd()));
}
else {
TextRange word = getWordSelectionRange(editorText, cursorOffset, JAVA_IDENTIFIER_PART_CONDITION);
if (word != null) {
result.add(new TextRange(Math.max(word.getStartOffset(), lexer.getTokenStart()),
Math.min(word.getEndOffset(), lexer.getTokenEnd())));
}
}
break;
}
lexer.advance();
}
}
示例6: getPackageName
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@Nullable
public static String getPackageName(CharSequence text) {
Lexer lexer = new GroovyLexer();
lexer.start(text);
skipWhitespacesAndComments(lexer);
final IElementType firstToken = lexer.getTokenType();
if (firstToken != GroovyTokenTypes.kPACKAGE) {
return "";
}
lexer.advance();
skipWhitespacesAndComments(lexer);
final StringBuilder buffer = StringBuilderSpinAllocator.alloc();
try {
while(true){
if (lexer.getTokenType() != GroovyTokenTypes.mIDENT) break;
buffer.append(text, lexer.getTokenStart(), lexer.getTokenEnd());
lexer.advance();
skipWhitespacesAndComments(lexer);
if (lexer.getTokenType() != GroovyTokenTypes.mDOT) break;
buffer.append('.');
lexer.advance();
skipWhitespacesAndComments(lexer);
}
String packageName = buffer.toString();
if (packageName.isEmpty() || StringUtil.endsWithChar(packageName, '.')) return null;
return packageName;
}
finally {
StringBuilderSpinAllocator.dispose(buffer);
}
}
示例7: testKeywords
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@Test
public void testKeywords() {
Lexer l = createLexer();
l.start("[true,off,TruE,\"true\",12,12.3,null]");
assertAndAdvance(l, YamlTokenTypes.YAML_FlowSequenceStart, 0, 1, "[");
assertAndAdvance(l, YamlTokenTypes.YAML_Tag_BOOL, 1, 5, "true");
assertAndAdvance(l, YamlTokenTypes.YAML_FlowEntry, 5, 6, ",");
assertAndAdvance(l, YamlTokenTypes.YAML_Tag_BOOL, 6, 9, "off");
assertAndAdvance(l, YamlTokenTypes.YAML_FlowEntry, 9, 10, ",");
assertAndAdvance(l, YamlTokenTypes.YAML_Scalar, 10, 14, "TruE");
assertAndAdvance(l, YamlTokenTypes.YAML_FlowEntry, 14, 15, ",");
assertAndAdvance(l, YamlTokenTypes.YAML_Scalar, 15, 21, "\"true\"");
assertAndAdvance(l, YamlTokenTypes.YAML_FlowEntry);
assertEquals(YamlTokenTypes.YAML_Tag_INT, l.getTokenType());
assertEquals("12", l.getTokenText());
l.advance();
assertAndAdvance(l, YamlTokenTypes.YAML_FlowEntry);
assertEquals(YamlTokenTypes.YAML_Tag_FLOAT, l.getTokenType());
assertEquals("12.3", l.getTokenText());
l.advance();
assertAndAdvance(l, YamlTokenTypes.YAML_FlowEntry);
assertEquals(YamlTokenTypes.YAML_Tag_NULL, l.getTokenType());
assertEquals("null", l.getTokenText());
l.advance();
assertEquals(YamlTokenTypes.YAML_FlowSequenceEnd, l.getTokenType());
assertEquals("]", l.getTokenText());
l.advance();
//assertEquals(null, l.getTokenType());
}
示例8: doLex
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
private static void doLex(Lexer lexer, final String text) {
lexer.start(text);
long time = System.currentTimeMillis();
int count = 0;
while (lexer.getTokenType() != null) {
lexer.advance();
count++;
}
System.out.println("Plain lexing took " + (System.currentTimeMillis() - time) + "ms lexems count:" + count);
}
示例9: replaceImportAliases
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
private String replaceImportAliases(String type) {
Map<String, String> lookup = myAliasCache.getValue();
if (lookup == null || lookup.isEmpty()) {
return type;
}
Lexer lexer = getJavaLexer();
lexer.start(type);
boolean checkNext = true;
StringBuilder out = new StringBuilder();
IElementType tokenType = lexer.getTokenType();
while (tokenType != null) {
if (checkNext && tokenType == JavaTokenType.IDENTIFIER) {
// this might be something we want to replace
String tokenText = lexer.getTokenText();
String replacement = lookup.get(tokenText);
if (replacement != null) {
out.append(replacement);
} else {
out.append(tokenText);
}
} else {
out.append(lexer.getTokenText());
}
if (tokenType != TokenType.WHITE_SPACE) { // ignore spaces
if (tokenType == JavaTokenType.LT || tokenType == JavaTokenType.COMMA) {
checkNext = true;
} else {
checkNext = false;
}
}
lexer.advance();
tokenType = lexer.getTokenType();
}
return out.toString();
}
示例10: map
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@Override
@NotNull
public Map<String, Void> map(@NotNull FileContent inputData) {
CharSequence input = inputData.getContentAsText();
Language language = ((LanguageFileType)inputData.getFileType()).getLanguage();
if (language == HTMLLanguage.INSTANCE || language == XHTMLLanguage.INSTANCE) {
final Lexer lexer = (language == HTMLLanguage.INSTANCE ? new HtmlHighlightingLexer(FileTypeManager.getInstance().getStdFileType("CSS")) : new XHtmlHighlightingLexer());
lexer.start(input);
Map<String, Void> result = new HashMap<String, Void>();
IElementType tokenType = lexer.getTokenType();
while (tokenType != null) {
if (tokenType == XmlTokenType.XML_NAME) {
String xmlName = input.subSequence(lexer.getTokenStart(), lexer.getTokenEnd()).toString();
if (HtmlUtil.isCustomHtml5Attribute(xmlName)) {
result.put(xmlName, null);
}
}
else if (tokenType == XmlTokenType.XML_DOCTYPE_PUBLIC || tokenType == XmlTokenType.XML_DOCTYPE_SYSTEM) {
// this is not an HTML5 context
break;
}
lexer.advance();
tokenType = lexer.getTokenType();
}
return result;
}
return Collections.emptyMap();
}
示例11: checkToken
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
private static boolean checkToken(final ASTNode token1) {
Lexer lexer = JavaParserDefinition.createLexer(LanguageLevel.HIGHEST);
final String text = token1.getText();
lexer.start(text);
if (lexer.getTokenType() != token1.getElementType()) return false;
lexer.advance();
return lexer.getTokenType() == null;
}
示例12: doLexerTest
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
public static void doLexerTest(String text,
Lexer lexer,
boolean checkTokenText,
String... expectedTokens) {
lexer.start(text);
int idx = 0;
int tokenPos = 0;
while (lexer.getTokenType() != null) {
if (idx >= expectedTokens.length) {
StringBuilder remainingTokens = new StringBuilder("\"" + lexer.getTokenType().toString() + "\"");
lexer.advance();
while (lexer.getTokenType() != null) {
remainingTokens.append(",");
remainingTokens.append(" \"").append(checkTokenText ? lexer.getTokenText() : lexer.getTokenType().toString()).append("\"");
lexer.advance();
}
fail("Too many tokens. Following tokens: " + remainingTokens.toString());
}
assertEquals("Token offset mismatch at position " + idx, tokenPos, lexer.getTokenStart());
String tokenName = checkTokenText ? lexer.getTokenText() : lexer.getTokenType().toString();
assertEquals("Token mismatch at position " + idx, expectedTokens[idx], tokenName);
idx++;
tokenPos = lexer.getTokenEnd();
lexer.advance();
}
if (idx < expectedTokens.length) fail("Not enough tokens");
}
示例13: lexemsEqual
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
private static boolean lexemsEqual(final PsiClass classToBind, final PsiClass newClass) {
Lexer oldTextLexer = JavaParserDefinition.createLexer(LanguageLevel.HIGHEST);
Lexer newTextLexer = JavaParserDefinition.createLexer(LanguageLevel.HIGHEST);
String oldBuffer = classToBind.getText();
String newBuffer = newClass.getText();
oldTextLexer.start(oldBuffer);
newTextLexer.start(newBuffer);
while(true) {
IElementType oldLexem = oldTextLexer.getTokenType();
IElementType newLexem = newTextLexer.getTokenType();
if (oldLexem == null || newLexem == null) {
// must terminate at the same time
return oldLexem == null && newLexem == null;
}
if (oldLexem != newLexem) {
return false;
}
if (oldLexem != TokenType.WHITE_SPACE && oldLexem != JavaDocElementType.DOC_COMMENT) {
int oldStart = oldTextLexer.getTokenStart();
int newStart = newTextLexer.getTokenStart();
int oldLength = oldTextLexer.getTokenEnd() - oldTextLexer.getTokenStart();
int newLength = newTextLexer.getTokenEnd() - newTextLexer.getTokenStart();
if (oldLength != newLength) {
return false;
}
for(int i=0; i<oldLength; i++) {
if (oldBuffer.charAt(oldStart+i) != newBuffer.charAt(newStart+i)) {
return false;
}
}
}
oldTextLexer.advance();
newTextLexer.advance();
}
}
示例14: checkZeroState
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
protected void checkZeroState(String text, TokenSet tokenTypes) {
Lexer lexer = createLexer();
lexer.start(text);
while (true) {
IElementType type = lexer.getTokenType();
if (type == null) {
break;
}
if (tokenTypes.contains(type) && lexer.getState() != 0) {
fail("Non-zero lexer state on token \"" + lexer.getTokenText() + "\" (" + type + ") at " + lexer.getTokenStart());
}
lexer.advance();
}
}
示例15: printWithHighlighting
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
public static void printWithHighlighting(@NotNull ConsoleView console, @NotNull String text, @NotNull SyntaxHighlighter highlighter) {
Lexer lexer = highlighter.getHighlightingLexer();
lexer.start(text, 0, text.length(), 0);
IElementType tokenType;
while ((tokenType = lexer.getTokenType()) != null) {
console.print(lexer.getTokenText(), getContentTypeForToken(tokenType, highlighter));
lexer.advance();
}
}