本文整理汇总了Java中org.netbeans.api.lexer.TokenSequence类的典型用法代码示例。如果您正苦于以下问题:Java TokenSequence类的具体用法?Java TokenSequence怎么用?Java TokenSequence使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
TokenSequence类属于org.netbeans.api.lexer包,在下文中一共展示了TokenSequence类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: findIdentifierSpanImpl
import org.netbeans.api.lexer.TokenSequence; //导入依赖的package包/类
private static Token<JavaTokenId> findIdentifierSpanImpl(CompilationInfo info, IdentifierTree tree, CompilationUnitTree cu, SourcePositions positions) {
int start = (int)positions.getStartPosition(cu, tree);
int endPosition = (int)positions.getEndPosition(cu, tree);
if (start == (-1) || endPosition == (-1))
return null;
TokenHierarchy<?> th = info.getTokenHierarchy();
TokenSequence<JavaTokenId> ts = th.tokenSequence(JavaTokenId.language());
if (ts.move(start) == Integer.MAX_VALUE) {
return null;
}
if (ts.moveNext()) {
if (ts.offset() >= start) {
Token<JavaTokenId> t = ts.token();
return t;
}
}
return null;
}
示例2: testNextToken2
import org.netbeans.api.lexer.TokenSequence; //导入依赖的package包/类
public void testNextToken2() {
String text = "\\t\\b\\b\\t \\tabc\\rsddfdsffffffffff\\uuuuAbcD\\377";
TokenHierarchy<?> hi = TokenHierarchy.create(text, JavaStringTokenId.language());
TokenSequence<?> ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.TAB, "\\t");
LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.BACKSPACE, "\\b");
LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.BACKSPACE, "\\b");
LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.TAB, "\\t");
LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.TEXT, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.TAB, "\\t");
LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.TEXT, "abc");
LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.CR, "\\r");
LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.TEXT, "sddfdsffffffffff");
LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.UNICODE_ESCAPE, "\\uuuuAbcD");
LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.OCTAL_ESCAPE, "\\377");
}
示例3: findFirstSentence
import org.netbeans.api.lexer.TokenSequence; //导入依赖的package包/类
private int [] findFirstSentence(TokenSequence<? extends TokenId> seq) {
seq.moveStart();
if (seq.moveNext()) {
int start = seq.offset();
do {
if (seq.token().id() == JavadocTokenId.DOT) {
if (seq.moveNext()) {
if (isWhiteSpace(seq.token())) {
return new int [] { start, seq.offset()};
}
seq.movePrevious();
}
} else if (seq.token().id() == JavadocTokenId.TAG) {
if (seq.movePrevious()) {
if (!seq.token().text().toString().trim().endsWith("{")) {
//not an inline tag
return new int [] { start, seq.offset()};
}
}
seq.moveNext();
}
} while (seq.moveNext());
}
return null;
}
示例4: getEmbeddings
import org.netbeans.api.lexer.TokenSequence; //导入依赖的package包/类
@Override
public List<Embedding> getEmbeddings(Snapshot snapshot) {
if (snapshot == null) {
return Collections.emptyList();
}
TokenHierarchy th = snapshot.getTokenHierarchy();
if(th == null) {
//no lexer language for the snapshot's mimetype???
return Collections.emptyList();
}
TokenSequence<HTMLTokenId> ts = th.tokenSequence(HTMLTokenId.language());
HashMap<String, Object> state = new HashMap<>(6);
List<Embedding> embeddings = new ArrayList<>();
extractCssFromHTML(snapshot, ts, state, embeddings);
return embeddings;
}
示例5: getMarkList
import org.netbeans.api.lexer.TokenSequence; //导入依赖的package包/类
private List<FoldMarkInfo> getMarkList(TokenSequence seq) {
List<FoldMarkInfo> markList = null;
for(seq.moveStart(); seq.moveNext(); ) {
Token token = seq.token();
FoldMarkInfo info;
try {
info = scanToken(token);
} catch (BadLocationException e) {
LOG.log(Level.WARNING, null, e);
info = null;
}
if (info != null) {
if (markList == null) {
markList = new ArrayList<FoldMarkInfo>();
}
markList.add(info);
}
}
return markList;
}
示例6: hasPrevious
import org.netbeans.api.lexer.TokenSequence; //导入依赖的package包/类
private boolean hasPrevious() {
boolean anotherSeq = false;
if (index == -1) {
index = list.size() - 1;
anotherSeq = true;
}
for( ; index >= 0; index--) {
TokenSequence<?> seq = list.get(index);
if (anotherSeq) {
seq.moveEnd();
}
if (seq.movePrevious()) {
return true;
}
anotherSeq = true;
}
return false;
}
示例7: getInstantRenamerVisitor
import org.netbeans.api.lexer.TokenSequence; //导入依赖的package包/类
@Override
public <T extends Set<OffsetRange>> NodeVisitor<T> getInstantRenamerVisitor(EditorFeatureContext context, T result) {
TokenSequence<CssTokenId> tokenSequence = context.getTokenSequence();
int diff = tokenSequence.move(context.getCaretOffset());
if (diff > 0 && tokenSequence.moveNext() || diff == 0 && tokenSequence.movePrevious()) {
Token<CssTokenId> token = tokenSequence.token();
final CharSequence elementName = token.text();
return new NodeVisitor<T>(result) {
@Override
public boolean visit(Node node) {
switch (node.type()) {
case cp_mixin_name:
case cp_variable:
if (LexerUtils.equals(elementName, node.image(), false, false)) {
OffsetRange range = new OffsetRange(node.from(), node.to());
getResult().add(range);
break;
}
}
return false;
}
};
}
return null;
}
示例8: dumpSequence
import org.netbeans.api.lexer.TokenSequence; //导入依赖的package包/类
private static void dumpSequence(TokenSequence<?> seq, StringBuilder sb) {
if (seq == null) {
sb.append("Inactive TokenHierarchy"); //NOI18N
} else {
for(seq.moveStart(); seq.moveNext(); ) {
TokenSequence<?> emSeq = seq.embedded();
if (emSeq != null) {
dumpSequence(emSeq, sb);
} else {
Token<?> token = seq.token();
sb.append("<"); //NOI18N
sb.append(String.format("%3s", seq.offset())).append(", "); //NOI18N
sb.append(String.format("%3s", seq.offset() + token.length())).append(", "); //NOI18N
sb.append(String.format("%+3d", token.length())).append("> : "); //NOI18N
sb.append(tokenId(token.id(), true)).append(" : '"); //NOI18N
sb.append(tokenText(token));
sb.append("'\n"); //NOI18N
}
}
}
}
示例9: test
import org.netbeans.api.lexer.TokenSequence; //导入依赖的package包/类
public void test() throws Exception {
File testJComponentFile = new File(getDataDir() + "/testfiles/JComponent.java.txt");
FileReader r = new FileReader(testJComponentFile);
int fileLen = (int)testJComponentFile.length();
CharBuffer cb = CharBuffer.allocate(fileLen);
r.read(cb);
cb.rewind();
String text = cb.toString();
TokenHierarchy<?> hi = TokenHierarchy.create(text, JavaTokenId.language());
TokenSequence<?> ts = hi.tokenSequence();
System.err.println("Flyweight tokens: " + LexerTestUtilities.flyweightTokenCount(ts)
+ "\nTotal tokens: " + ts.tokenCount()
+ "\nFlyweight text length: " + LexerTestUtilities.flyweightTextLength(ts)
+ "\nTotal text length: " + fileLen
+ "\nDistribution: " + LexerTestUtilities.flyweightDistribution(ts)
);
assertEquals(LexerTestUtilities.flyweightTokenCount(ts), 13786);
assertEquals(LexerTestUtilities.flyweightTextLength(ts), 21710);
assertEquals(ts.tokenCount(), 21379);
}
示例10: insideLambdaExpression
import org.netbeans.api.lexer.TokenSequence; //导入依赖的package包/类
private void insideLambdaExpression(Env env) throws IOException {
TreePath path = env.getPath();
LambdaExpressionTree let = (LambdaExpressionTree) path.getLeaf();
TokenSequence<JavaTokenId> ts = findLastNonWhitespaceToken(env, let, env.getOffset());
if (ts != null) {
switch (ts.token().id()) {
case ARROW:
localResult(env);
addValueKeywords(env);
break;
case COMMA:
if (let.getParameters().isEmpty()
|| env.getController().getTrees().getSourcePositions().getStartPosition(path.getCompilationUnit(), let.getParameters().get(0).getType()) >= 0) {
addTypes(env, EnumSet.of(CLASS, INTERFACE, ENUM, ANNOTATION_TYPE, TYPE_PARAMETER), null);
addPrimitiveTypeKeywords(env);
addKeyword(env, FINAL_KEYWORD, SPACE, false);
}
break;
}
}
}
示例11: skipWhitespaces
import org.netbeans.api.lexer.TokenSequence; //导入依赖的package包/类
private static int skipWhitespaces(CompilationInfo info, int pos, boolean forward) {
TokenSequence<JavaTokenId> ts = info.getTokenHierarchy().tokenSequence(JavaTokenId.language());
ts.move(pos);
boolean moveSucceeded = false;
while (forward ? ts.moveNext() : ts.movePrevious()) {
moveSucceeded = true;
if (!WHITESPACES.contains(ts.token().id())) {
break;
}
}
if (moveSucceeded) {
return forward ? ts.offset() : ts.offset() + ts.token().length();
} else {
return pos;
}
}
示例12: testOperators
import org.netbeans.api.lexer.TokenSequence; //导入依赖的package包/类
public void testOperators() {
String text = "^ ^= % %= * *= / /= = ==";
TokenHierarchy<?> hi = TokenHierarchy.create(text, JavaTokenId.language());
TokenSequence<?> ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.CARET, "^");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.CARETEQ, "^=");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.PERCENT, "%");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.PERCENTEQ, "%=");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.STAR, "*");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.STAREQ, "*=");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.SLASH, "/");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.SLASHEQ, "/=");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.EQ, "=");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.EQEQ, "==");
}
示例13: isEmpty
import org.netbeans.api.lexer.TokenSequence; //导入依赖的package包/类
private static boolean isEmpty (
int ln,
StyledDocument document,
Set<Integer> whitespaces
) throws BadLocationException {
int start = NbDocument.findLineOffset (document, ln);
int end = document.getLength ();
try {
end = NbDocument.findLineOffset (document, ln + 1) - 1;
} catch (IndexOutOfBoundsException ex) {
}
TokenSequence ts = Utils.getTokenSequence (document, start);
if (ts.token () == null) return true;
while (whitespaces.contains (ts.token ().id ().ordinal ())) {
if (!ts.moveNext ()) return true;
if (ts.offset () > end) return true;
}
return false;
}
示例14: getAttribute
import org.netbeans.api.lexer.TokenSequence; //导入依赖的package包/类
private Token<XMLTokenId> getAttribute(TokenSequence<XMLTokenId> ts) {
Token<XMLTokenId> tok = ts.token();
if (tok.id() == XMLTokenId.VALUE) {
while (ts.movePrevious()) {
tok = ts.token();
switch (tok.id()) {
case ARGUMENT:
return tok;
case OPERATOR:
case EOL:
case ERROR:
case WS:
continue;
default:
return null;
}
}
}
return null;
}
示例15: getToken
import org.netbeans.api.lexer.TokenSequence; //导入依赖的package包/类
private Token getToken(TokenSequence ts, int offset, boolean next, int[] startOffset) {
int diff = ts.move(offset);
boolean ok;
if(next) {
ok = ts.moveNext();
} else if (diff > 0) {
ok = ts.moveNext();
} else {
ok = ts.movePrevious();
}
if (!ok) {
return null;
}
if (startOffset != null) {
startOffset[0] = ts.offset();
if (startOffset.length > 1) {
startOffset[1] = ts.offset() + ts.token().length();
}
}
return ts.token();
}