本文整理汇总了Java中org.netbeans.api.lexer.TokenSequence.moveEnd方法的典型用法代码示例。如果您正苦于以下问题:Java TokenSequence.moveEnd方法的具体用法?Java TokenSequence.moveEnd怎么用?Java TokenSequence.moveEnd使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.netbeans.api.lexer.TokenSequence
的用法示例。
在下文中一共展示了TokenSequence.moveEnd方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: reformat
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
public static LinkedList<Diff> reformat(String text, TokenSequence<JavaTokenId> tokens, TreePath path, SourcePositions sp, CodeStyle cs, int rightMargin) {
Pretty pretty = new Pretty(text, tokens, path, sp, cs, 0, text.length(), rightMargin);
pretty.scan(path, null);
CompilationUnitTree cut = (CompilationUnitTree) path.getLeaf();
List<? extends Tree> typeDecls = cut.getTypeDecls();
int size = typeDecls.size();
int cnt = size > 0 && org.netbeans.api.java.source.TreeUtilities.CLASS_TREE_KINDS.contains(typeDecls.get(size - 1).getKind()) ? cs.getBlankLinesAfterClass() : 1;
if (cnt < 1)
cnt = 1;
String s = pretty.getNewlines(cnt);
tokens.moveEnd();
tokens.movePrevious();
if (tokens.token().id() != WHITESPACE)
pretty.diffs.addFirst(new Diff(text.length(), text.length(), s));
else if (!s.contentEquals(tokens.token().text()))
pretty.diffs.addFirst(new Diff(tokens.offset(), tokens.offset() + tokens.token().length(), s));
return pretty.diffs;
}
示例2: hasPrevious
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private boolean hasPrevious() {
boolean anotherSeq = false;
if (index == -1) {
index = list.size() - 1;
anotherSeq = true;
}
for( ; index >= 0; index--) {
TokenSequence<?> seq = list.get(index);
if (anotherSeq) {
seq.moveEnd();
}
if (seq.movePrevious()) {
return true;
}
anotherSeq = true;
}
return false;
}
示例3: forTokenIndex
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
public static ElementsParser forTokenIndex(CharSequence sourceCode, TokenSequence<HTMLTokenId> tokenSequence, int tokenIndex) {
if (tokenIndex < 0) {
throw new IllegalArgumentException(String.format("TokenSequence index (%s) must be positive", tokenIndex));
}
tokenSequence.moveEnd();
int lastTokenIndex = tokenSequence.index();
if(tokenIndex > lastTokenIndex) {
throw new IllegalArgumentException(String.format("token index (%s) is bigger than last index in the sequence (%s)", tokenIndex, lastTokenIndex));
}
tokenSequence.moveIndex(tokenIndex);
return new ElementsParser(sourceCode, tokenSequence);
}
示例4: embeddedEpilogLength
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private int embeddedEpilogLength(
TokenSequence<?> embeddingSeq,
TokenSequence<?> embeddedSeq)
{
embeddedSeq.moveEnd();
if (embeddedSeq.movePrevious()) {
return (embeddingSeq.offset() + embeddingSeq.token().length()) - (embeddedSeq.offset() + embeddedSeq.token().length());
} else {
return -1;
}
}
示例5: TokenSequenceWrapper
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
public TokenSequenceWrapper(TokenSequence<T1> ts, boolean virtual) {
this.ts = ts;
this.virtual = virtual;
ts.moveStart();
ts.moveNext();
start = ts.offset();
ts.moveEnd();
ts.movePrevious();
end = ts.offset() + ts.token().length();
}
示例6: getTokenSequenceEndOffset
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
public static int getTokenSequenceEndOffset(TokenSequence<? extends TokenId> ts) {
int currentIndex = ts.index();
ts.moveEnd();
ts.movePrevious();
int offset = ts.offset() + ts.token().length();
ts.moveIndex(currentIndex);
return offset;
}
示例7: calculateCodeBlock
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private static <T1 extends TokenId> List<JoinedTokenSequence.CodeBlock<T1>> calculateCodeBlock(List<TokenSequence<T1>> tss,
VirtualSource virtualSource
) throws BadLocationException {
List<JoinedTokenSequence.CodeBlock<T1>> blocks = new ArrayList<JoinedTokenSequence.CodeBlock<T1>>();
for (int i=0; i<tss.size(); i++) {
TokenSequence<T1> ts =tss.get(i);
List<TokenSequenceWrapper<T1>> tss2 = new ArrayList<TokenSequenceWrapper<T1>>();
tss2.add(new TokenSequenceWrapper<T1>(ts, false));
// try to find additional token sequences which comprise this language block:
for (int j=i+1; j<tss.size(); j++) {
TokenSequence<T1> prev = tss.get(j-1);
prev.moveEnd();
prev.movePrevious();
TokenSequence<T1> next = tss.get(j);
next.moveStart();
next.moveNext();
// check whether current token sequence is continuation of previous one:
TokenSequence<T1> tsVirtual = LexUtilities.getVirtualTokens(virtualSource, prev.offset()+prev.token().length(), next.offset(), ts.language());
if (tsVirtual != null) {
tss2.add(new TokenSequenceWrapper<T1>(tsVirtual, true));
tss2.add(new TokenSequenceWrapper<T1>(next, false));
i++;
} else {
break;
}
}
blocks.add(new JoinedTokenSequence.CodeBlock<T1>(tss2));
}
return blocks;
}
示例8: findTokenSequenceBounds
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private TextBounds findTokenSequenceBounds(BaseDocument doc, TokenSequence tokenSequence) throws BadLocationException {
tokenSequence.moveStart();
tokenSequence.moveNext();
int absoluteStart = tokenSequence.offset();
tokenSequence.moveEnd();
tokenSequence.movePrevious();
int absoluteEnd = tokenSequence.offset() + tokenSequence.token().length();
// trim whitespaces from both ends
while (isWSToken(tokenSequence.token())) {
if (!tokenSequence.movePrevious()) {
return new TextBounds(absoluteStart, absoluteEnd); // a block of empty text
}
}
int whiteSpaceSuffixLen = 0;
while (Character.isWhitespace(tokenSequence.token().text().charAt(tokenSequence.token().length() - 1 - whiteSpaceSuffixLen))) {
whiteSpaceSuffixLen++;
}
int languageBlockEnd = tokenSequence.offset() + tokenSequence.token().length() - whiteSpaceSuffixLen;
tokenSequence.moveStart();
do {
tokenSequence.moveNext();
} while (isWSToken(tokenSequence.token()));
int whiteSpacePrefixLen = 0;
while (Character.isWhitespace(tokenSequence.token().text().charAt(whiteSpacePrefixLen))) {
whiteSpacePrefixLen++;
}
int languageBlockStart = tokenSequence.offset() + whiteSpacePrefixLen;
int firstLineOfTheLanguageBlock = Utilities.getLineOffset(doc, languageBlockStart);
int lastLineOfTheLanguageBlock = Utilities.getLineOffset(doc, languageBlockEnd);
return new TextBounds(absoluteStart, absoluteEnd, languageBlockStart, languageBlockEnd, firstLineOfTheLanguageBlock, lastLineOfTheLanguageBlock);
}
示例9: testLanguagesEmbeddingMapMT
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
public void testLanguagesEmbeddingMapMT() throws Exception {
Document doc = new PlainDocument();
doc.putProperty("mimeType", "text/x-simple-plain");
// All words have to be longer than 3 characters
doc.insertString(0, "Hello 1234 0xFF00", SimpleAttributeSet.EMPTY);
TokenHierarchy th = TokenHierarchy.get(doc);
assertNotNull("Can't find token hierarchy for a text/x-simple-plain document", th);
TokenSequence seq = th.tokenSequence();
Language lang = seq.language();
assertNotNull("Can't find language for text/x-simple-plain", lang);
assertEquals("Wrong language", "text/x-simple-plain", lang.mimeType());
for(int i = 0; i < seq.tokenCount(); i++) {
seq.moveIndex(i);
assertTrue(seq.moveNext());
Token token = seq.token();
if (token.id() == SimplePlainTokenId.WORD) {
TokenSequence embeddedSeq = seq.embedded();
assertNotNull("Can't find embedded token sequence", embeddedSeq);
Language embeddedLang = embeddedSeq.language();
assertNotNull("Can't find language of the embedded sequence", embeddedLang);
assertEquals("Wrong language of the embedded sequence", "text/x-simple-char", embeddedLang.mimeType());
embeddedSeq.moveStart();
assertTrue("Embedded sequence has no tokens (moveFirst)", embeddedSeq.moveNext());
assertEquals("Wrong startSkipLength", 1, embeddedSeq.offset() - seq.offset());
embeddedSeq.moveEnd();
assertTrue("Embedded sequence has no tokens (moveLast)", embeddedSeq.movePrevious());
assertEquals("Wrong endSkipLength", 2,
(seq.offset() + seq.token().length()) - (embeddedSeq.offset() + embeddedSeq.token().length()));
}
}
}
示例10: getCssAreaRange
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private OffsetRange getCssAreaRange(Document doc, int from, int to) {
//limit the search just to one embedded css section
TokenSequence<CssTokenId> ts = getCssTokenSequence(doc, from);
if (ts == null) {
return OffsetRange.NONE;
}
ts.moveStart();
int limitedFrom = ts.moveNext() ? ts.offset() : from;
ts.moveEnd();
int limitedTo = ts.moveNext() ? ts.offset() + ts.token().length() : to;
return new OffsetRange(limitedFrom, limitedTo);
}
示例11: testSaveTokensWithinLookahead
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
public void testSaveTokensWithinLookahead() throws Exception {
Document doc = new ModificationTextDocument();
String text = "aabc";
doc.insertString(0, text, null);
doc.putProperty(Language.class, TestSaveTokensInLATokenId.language());
TokenHierarchy<?> hi = TokenHierarchy.get(doc);
((AbstractDocument)doc).readLock();
try {
TokenSequence<?> ts = hi.tokenSequence();
ts.moveEnd(); // Force creation of all tokens
LexerTestUtilities.initLastTokenHierarchyEventListening(doc);
} finally {
((AbstractDocument)doc).readUnlock();
}
doc.remove(1, 1);
((AbstractDocument)doc).readLock();
try {
TokenHierarchyEvent evt = LexerTestUtilities.getLastTokenHierarchyEvent(doc);
TokenChange<?> change = evt.tokenChange();
assertEquals(1, change.addedTokenCount());
} finally {
((AbstractDocument)doc).readUnlock();
}
}
示例12: testTokenCountWasCalledInUpdater
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
public void testTokenCountWasCalledInUpdater() throws Exception {
Document doc = new ModificationTextDocument();
String text = "+/* */";
doc.insertString(0, text, null);
doc.putProperty(Language.class, TestTokenId.language());
TokenHierarchy<?> hi = TokenHierarchy.get(doc);
TokenSequence<?> ts;
((AbstractDocument)doc).readLock();
try {
ts = hi.tokenSequence();
assertTrue(ts.moveNext());
LexerTestUtilities.assertTokenEquals(ts,TestTokenId.PLUS, "+", -1);
} finally {
((AbstractDocument)doc).readUnlock();
}
doc.remove(1, 3); // Remove "/* "
((AbstractDocument)doc).readLock();
try {
ts = hi.tokenSequence();
ts.moveEnd();
// Extra ending '\n' of the document returned by DocumentUtilities.getText(doc) and lexed
assertTrue(ts.movePrevious());
LexerTestUtilities.assertTokenEquals(ts,TestTokenId.WHITESPACE, "\n", -1);
assertTrue(ts.movePrevious());
LexerTestUtilities.assertTokenEquals(ts,TestTokenId.DIV, "/", -1);
} finally {
((AbstractDocument)doc).readUnlock();
}
}
示例13: printBlock
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private void printBlock(JCTree tree, List<? extends JCTree> stats, BracePlacement bracePlacement, boolean spaceBeforeLeftBrace, boolean members, boolean printComments) {
if (printComments) printPrecedingComments(tree, true);
int old = indent();
int bcol = old;
switch(bracePlacement) {
case NEW_LINE:
newline();
toColExactly(old);
break;
case NEW_LINE_HALF_INDENTED:
newline();
bcol += (indentSize >> 1);
toColExactly(bcol);
break;
case NEW_LINE_INDENTED:
newline();
bcol = out.leftMargin;
toColExactly(bcol);
break;
}
String trailing = null;
if (conditionStartHack != (-1)) {
TokenSequence<JavaTokenId> ts = TokenHierarchy.create(toString().substring(conditionStartHack), JavaTokenId.language()).tokenSequence(JavaTokenId.language());
boolean found;
ts.moveEnd();
while ((found = ts.movePrevious()) && PositionEstimator.nonRelevant.contains(ts.token().id()))
;
if (found) {
String content = toString();
trailing = content.substring(conditionStartHack + ts.offset() + ts.token().text().length());
out.used -= trailing.length();
out.col -= trailing.length();
}
}
if (spaceBeforeLeftBrace)
needSpace();
print('{');
if (trailing != null) print(trailing);
boolean emptyBlock = true;
for (List<? extends JCTree> l = stats; l.nonEmpty(); l = l.tail) {
if (!isSynthetic(l.head)) {
emptyBlock = false;
break;
}
}
if (emptyBlock) {
printEmptyBlockComments(tree, members);
} else {
if (innerCommentsHandled.add(tree)) {
java.util.List<Comment> comments = commentHandler.getComments(tree).getComments(CommentSet.RelativePosition.INNER);
for (Comment c : comments) {
printComment(c, false, members);
}
}
if (members)
blankLines(cs.getBlankLinesAfterAnonymousClassHeader());
else
newline();
printStats(stats, members);
}
toColExactly(bcol);
undent(old);
print('}');
if (printComments) printTrailingComments(tree, true);
}
示例14: Pretty
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private Pretty(String text, TokenSequence<JavaTokenId> tokens, TreePath path, SourcePositions sp, CodeStyle cs, int startOffset, int endOffset, int rightMargin) {
this.fText = text;
this.sp = sp;
this.cs = cs;
this.rightMargin = rightMargin > 0 ? rightMargin : Integer.MAX_VALUE;
this.tabSize = cs.getTabSize();
this.indentSize = cs.getIndentSize();
this.continuationIndentSize = cs.getContinuationIndentSize();
this.expandTabToSpaces = cs.expandTabToSpaces();
this.maxPreservedBlankLines = insideBlock(path) ? cs.getMaximumBlankLinesInCode() : cs.getMaximumBlankLinesInDeclarations();
this.lastBlankLines = -1;
this.lastBlankLinesTokenIndex = -1;
this.lastBlankLinesDiff = null;
this.lastNewLineOffset = -1;
this.afterAnnotation = false;
this.wrapAnnotation = false;
this.fieldGroup = false;
Tree tree = path.getLeaf();
this.indent = this.lastIndent = tokens != null ? getIndentLevel(tokens, path) : 0;
this.col = this.indent;
this.tokens = tokens;
if (tree.getKind() == Tree.Kind.COMPILATION_UNIT) {
tokens.moveEnd();
tokens.movePrevious();
} else {
tokens.move((int)sp.getEndPosition(path.getCompilationUnit(), tree));
if (!tokens.moveNext())
tokens.movePrevious();
}
this.endPos = tokens.offset();
if (tree.getKind() == Tree.Kind.COMPILATION_UNIT) {
tokens.moveStart();
bof = true;
} else {
tokens.move((int)sp.getStartPosition(path.getCompilationUnit(), tree));
}
tokens.moveNext();
this.root = path.getCompilationUnit();
this.startOffset = startOffset;
this.endOffset = endOffset;
this.tpLevel = 0;
}