本文整理汇总了Java中org.netbeans.api.lexer.TokenSequence.moveIndex方法的典型用法代码示例。如果您正苦于以下问题:Java TokenSequence.moveIndex方法的具体用法?Java TokenSequence.moveIndex怎么用?Java TokenSequence.moveIndex使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.netbeans.api.lexer.TokenSequence
的用法示例。
在下文中一共展示了TokenSequence.moveIndex方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: forTokenIndex
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
public static ElementsParser forTokenIndex(CharSequence sourceCode, TokenSequence<HTMLTokenId> tokenSequence, int tokenIndex) {
if (tokenIndex < 0) {
throw new IllegalArgumentException(String.format("TokenSequence index (%s) must be positive", tokenIndex));
}
tokenSequence.moveEnd();
int lastTokenIndex = tokenSequence.index();
if(tokenIndex > lastTokenIndex) {
throw new IllegalArgumentException(String.format("token index (%s) is bigger than last index in the sequence (%s)", tokenIndex, lastTokenIndex));
}
tokenSequence.moveIndex(tokenIndex);
return new ElementsParser(sourceCode, tokenSequence);
}
示例2: getCommentsCollection
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
/**
* Note - Because of {@link Comment.Style#WHITESPACE}, whitespaces are also
* recorded
*/
private CommentsCollection getCommentsCollection(TokenSequence<JavaTokenId> ts, int maxTension) {
CommentsCollection result = new CommentsCollection();
Token<JavaTokenId> t = ts.token();
result.add(t);
boolean isLC = t.id() == JavaTokenId.LINE_COMMENT;
int lastCommentIndex = ts.index();
int start = ts.offset();
int end = ts.offset() + ts.token().length();
while (ts.moveNext()) {
if (ts.index() < tokenIndexAlreadyAdded) continue;
t = ts.token();
if (isComment(t.id())) {
if (t.id() == JavaTokenId.JAVADOC_COMMENT &&
mixedJDocTokenIndexes.contains(ts.index())) {
// skip javadocs already added
continue;
}
result.add(t);
start = Math.min(ts.offset(), start);
end = Math.max(ts.offset() + t.length(), end);
isLC = t.id() == JavaTokenId.LINE_COMMENT;
lastCommentIndex = ts.index();
} else if (t.id() == JavaTokenId.WHITESPACE) {
if ((numberOfNL(t) + (isLC ? 1 : 0)) > maxTension) {
break;
}
} else {
break;
}
}
ts.moveIndex(lastCommentIndex);
ts.moveNext();
tokenIndexAlreadyAdded = ts.index();
result.setBounds(new int[]{start, end});
// System.out.println("tokenIndexAlreadyAdded = " + tokenIndexAlreadyAdded);
return result;
}
示例3: getTokenSequenceEndOffset
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
public static int getTokenSequenceEndOffset(TokenSequence<? extends TokenId> ts) {
int currentIndex = ts.index();
ts.moveEnd();
ts.movePrevious();
int offset = ts.offset() + ts.token().length();
ts.moveIndex(currentIndex);
return offset;
}
示例4: getTokenSequenceStartOffset
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
public static int getTokenSequenceStartOffset(TokenSequence<? extends TokenId> ts) {
int currentIndex = ts.index();
ts.moveStart();
ts.moveNext();
int offset = ts.offset();
ts.moveIndex(currentIndex);
return offset;
}
示例5: testLanguagesEmbeddingMapMT
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
public void testLanguagesEmbeddingMapMT() throws Exception {
Document doc = new PlainDocument();
doc.putProperty("mimeType", "text/x-simple-plain");
// All words have to be longer than 3 characters
doc.insertString(0, "Hello 1234 0xFF00", SimpleAttributeSet.EMPTY);
TokenHierarchy th = TokenHierarchy.get(doc);
assertNotNull("Can't find token hierarchy for a text/x-simple-plain document", th);
TokenSequence seq = th.tokenSequence();
Language lang = seq.language();
assertNotNull("Can't find language for text/x-simple-plain", lang);
assertEquals("Wrong language", "text/x-simple-plain", lang.mimeType());
for(int i = 0; i < seq.tokenCount(); i++) {
seq.moveIndex(i);
assertTrue(seq.moveNext());
Token token = seq.token();
if (token.id() == SimplePlainTokenId.WORD) {
TokenSequence embeddedSeq = seq.embedded();
assertNotNull("Can't find embedded token sequence", embeddedSeq);
Language embeddedLang = embeddedSeq.language();
assertNotNull("Can't find language of the embedded sequence", embeddedLang);
assertEquals("Wrong language of the embedded sequence", "text/x-simple-char", embeddedLang.mimeType());
embeddedSeq.moveStart();
assertTrue("Embedded sequence has no tokens (moveFirst)", embeddedSeq.moveNext());
assertEquals("Wrong startSkipLength", 1, embeddedSeq.offset() - seq.offset());
embeddedSeq.moveEnd();
assertTrue("Embedded sequence has no tokens (moveLast)", embeddedSeq.movePrevious());
assertEquals("Wrong endSkipLength", 2,
(seq.offset() + seq.token().length()) - (embeddedSeq.offset() + embeddedSeq.token().length()));
}
}
}
示例6: moveOrSkipSemicolon
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
/**
* Called to add semicolon after bracket for some conditions
*
* @param context
* @return relative caretOffset change
* @throws BadLocationException
*/
static int moveOrSkipSemicolon(TypedTextInterceptor.MutableContext context) throws BadLocationException {
TokenSequence<JavaTokenId> javaTS = javaTokenSequence(context, false);
if (javaTS == null || isStringOrComment(javaTS.token().id())) {
return -1;
}
if (javaTS.token().id() == JavaTokenId.SEMICOLON) {
context.setText("", 0); // NOI18N
return javaTS.offset() + 1;
}
int lastParenPos = context.getOffset();
int index = javaTS.index();
// Move beyond semicolon
while (javaTS.moveNext()
&& !(javaTS.token().id() == JavaTokenId.WHITESPACE && javaTS.token().text().toString().contains("\n"))
&& javaTS.token().id() != JavaTokenId.RBRACE) { // NOI18N
switch (javaTS.token().id()) {
case RPAREN:
lastParenPos = javaTS.offset();
break;
case WHITESPACE:
break;
default:
return -1;
}
}
// Restore javaTS position
javaTS.moveIndex(index);
javaTS.moveNext();
if (isForLoopTryWithResourcesOrLambdaSemicolon(javaTS) || posWithinAnyQuote(context, javaTS) || (lastParenPos == context.getOffset() && !javaTS.token().id().equals(JavaTokenId.RPAREN))) {
return -1;
}
context.setText("", 0); // NOI18N
context.getDocument().insertString(lastParenPos + 1, ";", null); // NOI18N
return lastParenPos + 2;
}
示例7: isTypeParameterTag
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
/**
* simple check whether selected token is type parameter {@code @param <T>}
* @param seq token sequence with selected token
* @return {@code true} when the token should not be interpreted.
*/
private static boolean isTypeParameterTag(TokenSequence<? extends TokenId> seq) {
int index = seq.index();
try {
if (!seq.movePrevious() || seq.token().id() != JavadocTokenId.OTHER_TEXT) {
return false;
}
return seq.movePrevious() && seq.token().id() == JavadocTokenId.TAG
&& "@param".contentEquals(seq.token().text()); // NOI18N
} finally {
seq.moveIndex(index);
seq.moveNext();
}
}
示例8: flyweightTokenCount
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
/**
* Compute number of flyweight tokens in the given token sequence.
*
* @param ts non-null token sequence.
* @return number of flyweight tokens in the token sequence.
*/
public static int flyweightTokenCount(TokenSequence<?> ts) {
int flyTokenCount = 0;
ts.moveIndex(0);
while (ts.moveNext()) {
if (ts.token().isFlyweight()) {
flyTokenCount++;
}
}
return flyTokenCount;
}
示例9: flyweightTextLength
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
/**
* Compute total number of characters represented by flyweight tokens
* in the given token sequence.
*
* @param ts non-null token sequence.
* @return number of characters contained in the flyweight tokens
* in the token sequence.
*/
public static int flyweightTextLength(TokenSequence<?> ts) {
int flyTokenTextLength = 0;
ts.moveIndex(0);
while (ts.moveNext()) {
if (ts.token().isFlyweight()) {
flyTokenTextLength += ts.token().text().length();
}
}
return flyTokenTextLength;
}
示例10: flyweightDistribution
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
/**
* Compute distribution of flyweight token lengths accross the given token sequence.
*
* @param ts non-null token sequence.
* @return non-null list containing number of the flyweight tokens that have the length
* equal to the index in the list.
*/
public static List<Integer> flyweightDistribution(TokenSequence<?> ts) {
List<Integer> distribution = new ArrayList<Integer>();
ts.moveIndex(0);
while (ts.moveNext()) {
if (ts.token().isFlyweight()) {
int len = ts.token().text().length();
while (distribution.size() <= len) {
distribution.add(0);
}
distribution.set(len, distribution.get(len) + 1);
}
}
return distribution;
}
示例11: testPerf
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
public void testPerf() {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < 7000; i++) {
sb.append("public static x + y /* test comment */ abc * def\n");
}
String text = sb.toString();
long tm;
Language<TestTokenId> language = TestTokenId.language();
tm = System.currentTimeMillis();
TokenHierarchy<?> hi = TokenHierarchy.create(text, language);
tm = System.currentTimeMillis() - tm;
assertTrue("Timeout tm = " + tm + "msec", tm < 100); // Should be fast
tm = System.currentTimeMillis();
TokenSequence<?> ts = hi.tokenSequence();
tm = System.currentTimeMillis() - tm;
assertTrue("Timeout tm = " + tm + "msec", tm < 100); // Should be fast
// Fetch 2 initial tokens - should be lexed lazily
tm = System.currentTimeMillis();
ts.moveNext();
ts.token();
ts.moveNext();
ts.token();
tm = System.currentTimeMillis() - tm;
assertTrue("Timeout tm = " + tm + "msec", tm < 100); // Should be fast
tm = System.currentTimeMillis();
ts.moveIndex(0);
int cntr = 1; // On the first token
while (ts.moveNext()) {
Token t = ts.token();
cntr++;
}
tm = System.currentTimeMillis() - tm;
assertTrue("Timeout tm = " + tm + "msec", tm < 1000); // Should be fast
System.out.println("SimpleLexerBatchTest.testPerf(): Lexed input " + text.length()
+ " chars long and created " + cntr + " tokens in " + tm + " ms.");
}
示例12: isForLoopTryWithResourcesOrLambdaSemicolon
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private static boolean isForLoopTryWithResourcesOrLambdaSemicolon(TokenSequence<JavaTokenId> ts) {
int parenDepth = 0; // parenthesis depth
int braceDepth = 0; // brace depth
boolean semicolonFound = false; // next semicolon
int tsOrigIndex = ts.index();
try {
while (ts.movePrevious()) {
switch (ts.token().id()) {
case LPAREN:
if (parenDepth == 0) { // could be a 'for (' or 'try ('
while (ts.movePrevious()) {
switch (ts.token().id()) {
case WHITESPACE:
case BLOCK_COMMENT:
case JAVADOC_COMMENT:
case LINE_COMMENT:
break; // skip
case FOR:
case TRY:
return true;
default:
return false;
}
}
return false;
} else { // non-zero depth
parenDepth--;
}
break;
case RPAREN:
parenDepth++;
break;
case LBRACE:
if (braceDepth == 0) { // unclosed left brace
if (!semicolonFound) {
while (ts.movePrevious()) {
switch (ts.token().id()) {
case WHITESPACE:
case BLOCK_COMMENT:
case JAVADOC_COMMENT:
case LINE_COMMENT:
break; // skip
case ARROW:
return true;
default:
return false;
}
}
}
return false;
}
braceDepth--;
break;
case RBRACE:
braceDepth++;
break;
case SEMICOLON:
if (semicolonFound) { // one semicolon already found
return false;
}
semicolonFound = true;
break;
}
}
} finally {
// Restore orig TS's location
ts.moveIndex(tsOrigIndex);
ts.moveNext();
}
return false;
}
示例13: testCreateEmbedding
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
public void testCreateEmbedding() throws Exception {
Document doc = new ModificationTextDocument();
String text = "abc def ghi";
doc.insertString(0, text, null);
// Assign a language to the document
doc.putProperty(Language.class,TestTokenId.language());
TokenHierarchy<?> hi = TokenHierarchy.get(doc);
LexerTestUtilities.initLastTokenHierarchyEventListening(doc);
((AbstractDocument)doc).readLock();
try {
TokenSequence<?> ts = hi.tokenSequence();
assertTrue(ts.moveNext());
LexerTestUtilities.assertTokenEquals(ts,TestTokenId.IDENTIFIER, "abc", 0);
assertTrue(ts.moveNext());
LexerTestUtilities.assertTokenEquals(ts,TestTokenId.WHITESPACE, " ", 3);
assertTrue(ts.moveNext());
LexerTestUtilities.assertTokenEquals(ts,TestTokenId.IDENTIFIER, "def", 4);
assertTrue(ts.moveNext());
LexerTestUtilities.assertTokenEquals(ts,TestTokenId.WHITESPACE, " ", 7);
assertTrue(ts.moveNext());
LexerTestUtilities.assertTokenEquals(ts,TestTokenId.IDENTIFIER, "ghi", 8);
// Extra newline at doc's end is contained in the DocumentUtilities.getText(doc)
assertTrue(ts.moveNext());
LexerTestUtilities.assertTokenEquals(ts,TestTokenId.WHITESPACE, "\n", 11);
assertFalse(ts.moveNext());
} finally {
((AbstractDocument)doc).readUnlock();
}
// Do insert
doc.insertString(5, "x", null);
((AbstractDocument)doc).readLock();
try {
// Check the fired event
TokenHierarchyEvent evt = LexerTestUtilities.getLastTokenHierarchyEvent(doc);
assertNotNull(evt);
TokenChange<?> tc = evt.tokenChange();
assertNotNull(tc);
assertEquals(2, tc.index());
assertEquals(4, tc.offset());
assertEquals(1, tc.removedTokenCount());
TokenSequence<?> removedTS = tc.removedTokenSequence();
assertTrue(removedTS.moveNext());
LexerTestUtilities.assertTokenEquals(removedTS, TestTokenId.IDENTIFIER, 3, 4);
assertEquals(1, tc.addedTokenCount());
TokenSequence<?> currentTS = tc.currentTokenSequence();
currentTS.moveIndex(tc.index());
assertTrue(currentTS.moveNext());
LexerTestUtilities.assertTokenEquals(currentTS, TestTokenId.IDENTIFIER, "dxef", 4);
assertEquals(TestTokenId.language(), tc.language());
assertEquals(0, tc.embeddedChangeCount());
} finally {
((AbstractDocument)doc).readUnlock();
}
}