本文整理汇总了Java中com.intellij.lexer.Lexer.getTokenStart方法的典型用法代码示例。如果您正苦于以下问题:Java Lexer.getTokenStart方法的具体用法?Java Lexer.getTokenStart怎么用?Java Lexer.getTokenStart使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.intellij.lexer.Lexer
的用法示例。
在下文中一共展示了Lexer.getTokenStart方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkCorrectRestart
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
protected void checkCorrectRestart(String text) {
Lexer mainLexer = createLexer();
String allTokens = printTokens(text, 0, mainLexer);
Lexer auxLexer = createLexer();
auxLexer.start(text);
while (true) {
IElementType type = auxLexer.getTokenType();
if (type == null) {
break;
}
if (auxLexer.getState() == 0) {
int tokenStart = auxLexer.getTokenStart();
String subTokens = printTokens(text, tokenStart, mainLexer);
if (!allTokens.endsWith(subTokens)) {
assertEquals("Restarting impossible from offset " + tokenStart + "; lexer state should not return 0 at this point", allTokens, subTokens);
}
}
auxLexer.advance();
}
}
示例2: addWordHonoringEscapeSequences
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
public static void addWordHonoringEscapeSequences(CharSequence editorText,
TextRange literalTextRange,
int cursorOffset,
Lexer lexer,
List<TextRange> result) {
lexer.start(editorText, literalTextRange.getStartOffset(), literalTextRange.getEndOffset());
while (lexer.getTokenType() != null) {
if (lexer.getTokenStart() <= cursorOffset && cursorOffset < lexer.getTokenEnd()) {
if (StringEscapesTokenTypes.STRING_LITERAL_ESCAPES.contains(lexer.getTokenType())) {
result.add(new TextRange(lexer.getTokenStart(), lexer.getTokenEnd()));
}
else {
TextRange word = getWordSelectionRange(editorText, cursorOffset, JAVA_IDENTIFIER_PART_CONDITION);
if (word != null) {
result.add(new TextRange(Math.max(word.getStartOffset(), lexer.getTokenStart()),
Math.min(word.getEndOffset(), lexer.getTokenEnd())));
}
}
break;
}
lexer.advance();
}
}
示例3: createOuterLanguageElement
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
protected OuterLanguageElementImpl createOuterLanguageElement(final Lexer lexer, final CharTable table,
@NotNull IElementType outerElementType) {
final CharSequence buffer = lexer.getBufferSequence();
final int tokenStart = lexer.getTokenStart();
if (tokenStart < 0 || tokenStart > buffer.length()) {
LOG.error("Invalid start: " + tokenStart + "; " + lexer);
}
final int tokenEnd = lexer.getTokenEnd();
if (tokenEnd < 0 || tokenEnd > buffer.length()) {
LOG.error("Invalid end: " + tokenEnd + "; " + lexer);
}
return new OuterLanguageElementImpl(outerElementType, table.intern(buffer, tokenStart, tokenEnd));
}
示例4: lexemsEqual
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
private static boolean lexemsEqual(final PsiClass classToBind, final PsiClass newClass) {
Lexer oldTextLexer = JavaParserDefinition.createLexer(LanguageLevel.HIGHEST);
Lexer newTextLexer = JavaParserDefinition.createLexer(LanguageLevel.HIGHEST);
String oldBuffer = classToBind.getText();
String newBuffer = newClass.getText();
oldTextLexer.start(oldBuffer);
newTextLexer.start(newBuffer);
while(true) {
IElementType oldLexem = oldTextLexer.getTokenType();
IElementType newLexem = newTextLexer.getTokenType();
if (oldLexem == null || newLexem == null) {
// must terminate at the same time
return oldLexem == null && newLexem == null;
}
if (oldLexem != newLexem) {
return false;
}
if (oldLexem != TokenType.WHITE_SPACE && oldLexem != JavaDocElementType.DOC_COMMENT) {
int oldStart = oldTextLexer.getTokenStart();
int newStart = newTextLexer.getTokenStart();
int oldLength = oldTextLexer.getTokenEnd() - oldTextLexer.getTokenStart();
int newLength = newTextLexer.getTokenEnd() - newTextLexer.getTokenStart();
if (oldLength != newLength) {
return false;
}
for(int i=0; i<oldLength; i++) {
if (oldBuffer.charAt(oldStart+i) != newBuffer.charAt(newStart+i)) {
return false;
}
}
}
oldTextLexer.advance();
newTextLexer.advance();
}
}
示例5: getTokenText
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
private static String getTokenText(Lexer lexer) {
int start = lexer.getTokenStart();
int end = lexer.getTokenEnd();
return lexer.getBufferSequence().subSequence(start, end).toString();
}
示例6: createTextChunks
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@NotNull
public TextChunk[] createTextChunks(@NotNull UsageInfo2UsageAdapter usageInfo2UsageAdapter,
@NotNull CharSequence chars,
int start,
int end,
boolean selectUsageWithBold,
@NotNull List<TextChunk> result) {
final Lexer lexer = myHighlighter.getHighlightingLexer();
final SyntaxHighlighterOverEditorHighlighter highlighter = myHighlighter;
LOG.assertTrue(start <= end);
int i = StringUtil.indexOf(chars, '\n', start, end);
if (i != -1) end = i;
if (myDocumentStamp != myDocument.getModificationStamp()) {
highlighter.restart(chars);
myDocumentStamp = myDocument.getModificationStamp();
} else if(lexer.getTokenType() == null || lexer.getTokenStart() > start) {
highlighter.resetPosition(0); // todo restart from nearest position with initial state
}
boolean isBeginning = true;
for(;lexer.getTokenType() != null; lexer.advance()) {
int hiStart = lexer.getTokenStart();
int hiEnd = lexer.getTokenEnd();
if (hiStart >= end) break;
hiStart = Math.max(hiStart, start);
hiEnd = Math.min(hiEnd, end);
if (hiStart >= hiEnd) { continue; }
if (isBeginning) {
String text = chars.subSequence(hiStart, hiEnd).toString();
if(text.trim().isEmpty()) continue;
}
isBeginning = false;
IElementType tokenType = lexer.getTokenType();
TextAttributesKey[] tokenHighlights = highlighter.getTokenHighlights(tokenType);
processIntersectingRange(usageInfo2UsageAdapter, chars, hiStart, hiEnd, tokenHighlights, selectUsageWithBold, result);
}
return result.toArray(new TextChunk[result.size()]);
}
示例7: isMinified
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
protected static boolean isMinified(Lexer lexer, ParserDefinition parserDefinition, TokenSet noWSRequireAfterTokenSet) {
int offsetIgnoringComments = 0;
int offsetIgnoringCommentsAndStrings = 0;
int lineLength = 0;
int unneededWhitespaceCount = 0;
IElementType lastTokenType = null;
TokenSet whitespaceTokens = parserDefinition.getWhitespaceTokens();
TokenSet stringLiteralElements = parserDefinition.getStringLiteralElements();
TokenSet commentTokens = parserDefinition.getCommentTokens();
for (IElementType tokenType = lexer.getTokenType(); tokenType != null; lexer.advance(), tokenType = lexer.getTokenType()) {
if (commentTokens.contains(tokenType)) {
lastTokenType = tokenType;
continue;
}
int tokenLength = lexer.getTokenEnd() - lexer.getTokenStart();
offsetIgnoringComments += tokenLength;
if (stringLiteralElements.contains(tokenType)) {
lineLength += tokenLength;
lastTokenType = tokenType;
continue;
}
offsetIgnoringCommentsAndStrings += tokenLength;
if (whitespaceTokens.contains(tokenType)) {
if (!commentTokens.contains(lastTokenType) && tokenLength > 1) {
lexer.advance();
if (lexer.getTokenType() == null) {
// it was last token
break;
} else {
return false;
}
}
if (lexer.getTokenText().contains("\n")) {
if (lineLength > MIN_LINE_LENGTH) {
break;
}
lineLength = 0;
}
if (" ".equals(lexer.getTokenText()) && noWSRequireAfterTokenSet.contains(lastTokenType)) {
unneededWhitespaceCount++;
}
}
else {
lineLength += tokenLength;
}
if (offsetIgnoringComments >= MAX_OFFSET) {
break;
}
lastTokenType = tokenType;
}
return offsetIgnoringComments >= MIN_SIZE &&
(unneededWhitespaceCount + 0.0d) / offsetIgnoringCommentsAndStrings < MAX_UNNEEDED_OFFSET_PERCENTAGE;
}
示例8: getInjectionRanges
import com.intellij.lexer.Lexer; //导入方法依赖的package包/类
@NotNull
private synchronized TextRange[] getInjectionRanges(final XmlAttribute attribute, XsltChecker.LanguageLevel languageLevel) {
final TextRange[] cachedFiles = getCachedRanges(attribute);
if (cachedFiles != null) {
return cachedFiles;
}
final String value = attribute.getDisplayValue();
if (value == null) return EMPTY_ARRAY;
final TextRange[] ranges;
if (XsltSupport.mayBeAVT(attribute)) {
final List<TextRange> avtRanges = new SmartList<TextRange>();
int i;
int j = 0;
Lexer lexer = null;
while ((i = XsltSupport.getAVTOffset(value, j)) != -1) {
if (lexer == null) {
lexer = LanguageParserDefinitions.INSTANCE.forLanguage(languageLevel.getXPathVersion().getLanguage())
.createLexer(attribute.getProject());
}
// "A right curly brace inside a Literal in an expression is not recognized as terminating the expression."
lexer.start(value, i, value.length());
j = -1;
while (lexer.getTokenType() != null) {
if (lexer.getTokenType() == XPathTokenTypes.RBRACE) {
j = lexer.getTokenStart();
break;
}
lexer.advance();
}
if (j != -1) {
avtRanges.add(AVTRange.create(attribute, i, j + 1, j > i + 1));
} else {
// missing '}' error will be flagged by xpath parser
avtRanges.add(AVTRange.create(attribute, i, value.length(), false));
break;
}
}
if (avtRanges.size() > 0) {
ranges = avtRanges.toArray(new TextRange[avtRanges.size()]);
} else {
ranges = EMPTY_ARRAY;
}
} else {
ranges = new TextRange[]{ attribute.getValueTextRange() };
}
attribute.putUserData(CACHED_FILES, Pair.create(attribute.getValue(), ranges));
return ranges;
}