本文整理汇总了Java中org.netbeans.api.lexer.TokenHierarchy.tokenSequence方法的典型用法代码示例。如果您正苦于以下问题:Java TokenHierarchy.tokenSequence方法的具体用法?Java TokenHierarchy.tokenSequence怎么用?Java TokenHierarchy.tokenSequence使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.netbeans.api.lexer.TokenHierarchy
的用法示例。
在下文中一共展示了TokenHierarchy.tokenSequence方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getDeepestTokenSequence
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
private static TokenSequence getDeepestTokenSequence (
TokenHierarchy tokenHierarchy,
int offset
) {
TokenSequence tokenSequence = tokenHierarchy.tokenSequence ();
while(tokenSequence != null) {
tokenSequence.move(offset - 1);
if(!tokenSequence.moveNext()) {
break;
}
TokenSequence ts = tokenSequence.embedded();
if(ts == null) {
return tokenSequence;
} else {
tokenSequence = ts;
}
}
return tokenSequence;
}
示例2: testGCedE
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
public void testGCedE() {
TokenHierarchy th = TokenHierarchy.create("abc", TestPlainTokenId.language());
TokenSequence tokens = th.tokenSequence();
tokens.moveStart();
assertEquals(true, tokens.moveNext());
TokenSequence embedded = tokens.embedded();
assertNotNull("There should be an embedded language", embedded);
WeakReference<Language> refLang = new WeakReference<Language>(embedded.language());
embedded = null;
WeakReference<Token> refToken = new WeakReference<Token>(tokens.token());
tokens = null;
th = null;
// This no longer works after the language is statically held in the xxTokenId by the new convention
//assertGC("The embedded language has not been GCed", refLang);
assertGC("The token with embedded language has not been GCed", refToken);
}
示例3: testCacheRefreshedE
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
public void testCacheRefreshedE() {
TokenHierarchy th = TokenHierarchy.create("abc", TestPlainTokenId.language());
TokenSequence tokens = th.tokenSequence();
tokens.moveStart();
assertEquals(true, tokens.moveNext());
TokenSequence embeddedA = tokens.embedded();
assertNotNull("There should be an embedded language", embeddedA);
SimpleLanguageProvider.fireTokenLanguageChange();
TokenSequence embeddedB = tokens.embedded();
assertNotNull("There should be an embedded language", embeddedB);
assertNotSame("The token language cache has not been refreshed", embeddedA, embeddedB);
}
示例4: testLexerInput_ReadText_StartEnd
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
@Test
public void testLexerInput_ReadText_StartEnd() {
String text = "abcdefg";
TokenHierarchy hi = TokenHierarchy.create(text, TokenIdImpl.language());
TokenSequence ts = hi.tokenSequence();
assertTrue(ts.moveNext());
}
示例5: getEditorCookie
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
private EditCookie getEditorCookie(Document doc, int offset) {
TokenHierarchy<?> th = TokenHierarchy.get(doc);
TokenSequence ts = th.tokenSequence(Language.find(JavaFXEditorUtils.FXML_MIME_TYPE));
if (ts == null) {
return null;
}
ts.move(offset);
if (!ts.moveNext()) {
return null;
}
Token t = ts.token();
FileObject fo = getFileObject(doc);
String name = t.text().toString();
FileObject props = findFile(fo, name);
if (props != null) {
try {
DataObject dobj = DataObject.find(props);
return dobj.getLookup().lookup(EditCookie.class);
} catch (DataObjectNotFoundException ex) {
Exceptions.printStackTrace(ex);
}
}
return null;
}
示例6: testWhitespaceAtTheEnd
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
@Test
public void testWhitespaceAtTheEnd() {
String text = " 1 + 1 => 1 + 1;; ";
TokenHierarchy<?> hi = TokenHierarchy.create(text, language());
TokenSequence<?> ts = hi.tokenSequence();
assertNextTokenEquals(ts, JAVA_SNIPPET, " 1 + 1 ");
assertNextTokenEquals(ts, LEADS_TO, "=>");
assertNextTokenEquals(ts, JAVA_SNIPPET, " 1 + 1");
assertNextTokenEquals(ts, DOUBLE_SEMICOLON, ";;");
assertNextTokenEquals(ts, WHITESPACE, " ");
assertFalse(ts.moveNext());
}
示例7: testSimple
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
@Test
public void testSimple() {
String text = "%%TestCase name\njava code\n%%=>\ntarget\n";
TokenHierarchy<?> hi = TokenHierarchy.create(text, language());
TokenSequence<?> ts = hi.tokenSequence();
assertNextTokenEquals(ts, METADATA, "%%TestCase name\n");
assertNextTokenEquals(ts, JAVA_CODE, "java code\n");
assertNextTokenEquals(ts, METADATA, "%%=>\n");
assertNextTokenEquals(ts, JAVA_CODE, "target\n");
assertFalse(ts.moveNext());
}
示例8: testFindIndentTaskFactory
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
public void testFindIndentTaskFactory() throws BadLocationException {
Document doc = new PlainDocument();
doc.insertString(0, "first line\nsecond-line", null);
doc.putProperty("mimeType", TestLineTokenId.MIME_TYPE);
TokenHierarchy hi = TokenHierarchy.get(doc);
assertNotNull(hi);
TokenSequence<?> ts = hi.tokenSequence();
assertTrue(ts.moveNext());
TokenSequence<?> ets = ts.embedded();
assertNotNull(ets);
// There should be two language paths - root one and one with plain embedding
assertEquals(2, hi.languagePaths().size());
Reformat reformat = Reformat.get(doc);
reformat.lock();
try {
//doc.atomicLock();
try {
reformat.reformat(0, doc.getLength());
} finally {
//doc.atomicUnlock();
}
} finally {
reformat.unlock();
}
String text = doc.getText(0, doc.getLength());
// lineReformatTaskFactory should be called first and its mimetype at the begining of document
// and plainReformatTaskFactory should be called second and also add its mimetype at the begining of document
assertEquals(TestPlainTokenId.MIME_TYPE + "/" + TestLineTokenId.MIME_TYPE + "/first line\nsecond-line", text);
}
示例9: getEmbeddings
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
@Override
public List<Embedding> getEmbeddings(Snapshot snapshot) {
TokenHierarchy<?> th = snapshot.getTokenHierarchy();
TokenSequence<XhtmlElTokenId> sequence = th.tokenSequence(XhtmlElTokenId.language());
sequence.moveStart();
List<Embedding> embeddings = new ArrayList<>();
boolean lastEmbeddingIsVirtual = false;
while (sequence.moveNext()) {
Token t = sequence.token();
if (t.id() == XhtmlElTokenId.HTML) {
//lets suppose the text is always html :-(
embeddings.add(snapshot.create(sequence.offset(), t.length(), "text/html")); //NOI18N
lastEmbeddingIsVirtual = false;
} else {
//replace templating tokens by generated code marker
if (!lastEmbeddingIsVirtual) {
embeddings.add(snapshot.create(Constants.LANGUAGE_SNIPPET_SEPARATOR, "text/html"));
lastEmbeddingIsVirtual = true;
}
}
}
if (embeddings.isEmpty()) {
return Collections.emptyList();
} else {
return Collections.singletonList(Embedding.create(embeddings));
}
}
示例10: createHighlightImpl
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
private static Token<JavaTokenId> createHighlightImpl(CompilationInfo info, Document doc, TreePath tree) {
Tree leaf = tree.getLeaf();
SourcePositions positions = info.getTrees().getSourcePositions();
CompilationUnitTree cu = info.getCompilationUnit();
//XXX: do not use instanceof:
if (leaf instanceof MethodTree || leaf instanceof VariableTree || leaf instanceof ClassTree
|| leaf instanceof MemberSelectTree || leaf instanceof AnnotatedTypeTree || leaf instanceof MemberReferenceTree) {
return findIdentifierSpan(info, doc, tree);
}
int start = (int) positions.getStartPosition(cu, leaf);
int end = (int) positions.getEndPosition(cu, leaf);
if (start == Diagnostic.NOPOS || end == Diagnostic.NOPOS) {
return null;
}
TokenHierarchy<?> th = info.getTokenHierarchy();
TokenSequence<JavaTokenId> ts = th.tokenSequence(JavaTokenId.language());
if (ts.move(start) == Integer.MAX_VALUE) {
return null;
}
if (ts.moveNext()) {
Token<JavaTokenId> token = ts.token();
if (ts.offset() == start && token != null) {
final JavaTokenId id = token.id();
if (id == JavaTokenId.IDENTIFIER) {
return token;
}
if (id == JavaTokenId.THIS || id == JavaTokenId.SUPER) {
return ts.offsetToken();
}
}
}
return null;
}
示例11: testLanguagesEmbeddingMapMT
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
public void testLanguagesEmbeddingMapMT() throws Exception {
Document doc = new PlainDocument();
doc.putProperty("mimeType", "text/x-simple-plain");
// All words have to be longer than 3 characters
doc.insertString(0, "Hello 1234 0xFF00", SimpleAttributeSet.EMPTY);
TokenHierarchy th = TokenHierarchy.get(doc);
assertNotNull("Can't find token hierarchy for a text/x-simple-plain document", th);
TokenSequence seq = th.tokenSequence();
Language lang = seq.language();
assertNotNull("Can't find language for text/x-simple-plain", lang);
assertEquals("Wrong language", "text/x-simple-plain", lang.mimeType());
for(int i = 0; i < seq.tokenCount(); i++) {
seq.moveIndex(i);
assertTrue(seq.moveNext());
Token token = seq.token();
if (token.id() == SimplePlainTokenId.WORD) {
TokenSequence embeddedSeq = seq.embedded();
assertNotNull("Can't find embedded token sequence", embeddedSeq);
Language embeddedLang = embeddedSeq.language();
assertNotNull("Can't find language of the embedded sequence", embeddedLang);
assertEquals("Wrong language of the embedded sequence", "text/x-simple-char", embeddedLang.mimeType());
embeddedSeq.moveStart();
assertTrue("Embedded sequence has no tokens (moveFirst)", embeddedSeq.moveNext());
assertEquals("Wrong startSkipLength", 1, embeddedSeq.offset() - seq.offset());
embeddedSeq.moveEnd();
assertTrue("Embedded sequence has no tokens (moveLast)", embeddedSeq.movePrevious());
assertEquals("Wrong endSkipLength", 2,
(seq.offset() + seq.token().length()) - (embeddedSeq.offset() + embeddedSeq.token().length()));
}
}
}
示例12: testMapData
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
public void testMapData() throws BadLocationException {
BaseDocument doc = createDocument();
doc.insertString(0, "<div>#{{'one' : 1, 'two' : 2}}</div>", null);
TokenHierarchy th = TokenHierarchy.get(doc);
TokenSequence<XhtmlElTokenId> ts = th.tokenSequence(XhtmlElTokenId.language());
assertToken("<div>", XhtmlElTokenId.HTML, ts);
assertToken("#{{'one' : 1, 'two' : 2}}", XhtmlElTokenId.EL, ts);
assertToken("</div>\n", XhtmlElTokenId.HTML, ts);
}
示例13: HSImpl
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
public HSImpl(long version, TokenHierarchy<? extends Document> scanner, int startOffset, int endOffset) {
this.version = version;
this.scanner = scanner;
startOffset = Math.max(startOffset, 0); // Tests may request Integer.MIN_VALUE for startOffset
this.startOffset = startOffset;
this.sequences = new ArrayList<TSInfo<?>>(4);
this.hiStartOffset = startOffset;
this.hiEndOffset = startOffset;
Document doc = scanner.inputSource();
this.docText = DocumentUtilities.getText(doc);
endOffset = Math.min(endOffset, docText.length());
this.endOffset = endOffset;
newlineOffset = -1;
updateNewlineOffset(startOffset);
@SuppressWarnings("unchecked")
TokenSequence<TokenId> seq = (TokenSequence<TokenId>) scanner.tokenSequence();
if (seq != null) {
seq.move(startOffset);
TSInfo<TokenId> tsInfo = new TSInfo<TokenId>(seq);
sequences.add(tsInfo);
state = S_NEXT_TOKEN;
} else {
state = S_DONE;
}
if (LOG.isLoggable(Level.FINE)) {
logHelper = new LogHelper();
logHelper.startTime = System.currentTimeMillis();
LOG.fine("SyntaxHighlighting.HSImpl <" + startOffset + "," + endOffset + ">\n"); // NOI18N
if (LOG.isLoggable(Level.FINEST)) {
LOG.log(Level.FINEST, "Highlighting caller", new Exception()); // NOI18N
}
}
}
示例14: prepareTestDocument
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
private int prepareTestDocument(String text) throws Exception {
ModificationTextDocument doc = new ModificationTextDocument();
doc.insertString(0, text, null);
doc.putProperty(Language.class, JavaTokenId.language());
TokenHierarchy<?> hi = TokenHierarchy.get(doc);
TokenSequence<?> ts = hi.tokenSequence();
int tokenCount = 0;
while (ts.moveNext()) { tokenCount++; }
return tokenCount;
}
示例15: getEndTagCompletionItem
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
public static CompletionResultItem getEndTagCompletionItem(JTextComponent component,
BaseDocument document) {
int caretPos = component.getCaret().getDot();
try {
((AbstractDocument) document).readLock();
TokenHierarchy tokenHierarchy = TokenHierarchy.get(document);
TokenSequence tokenSequence = tokenHierarchy.tokenSequence();
String incompleteTagName = findIncompleteTagName(caretPos, tokenSequence);
if (isCaretInsideTag(caretPos, tokenSequence)) return null;
boolean beforeUnclosedStartTagFound = isUnclosedStartTagFoundBefore(
caretPos, tokenSequence);
if (! beforeUnclosedStartTagFound) return null;
Token token = tokenSequence.token();
String startTagName = getTokenTagName(token);
if (startTagName == null) return null;
boolean closingTagFound = isClosingEndTagFoundAfter(caretPos,
tokenSequence, startTagName);
if (closingTagFound) return null;
CompletionResultItem resultItem;
if ((incompleteTagName != null) &&
(! startTagName.startsWith(incompleteTagName))) {
resultItem = new TagLastCharResultItem(incompleteTagName, tokenSequence);
} else {
resultItem = new EndTagResultItem(startTagName, tokenSequence);
}
return resultItem;
} catch(Exception e) {
_logger.log(Level.WARNING,
e.getMessage() == null ? e.getClass().getName() : e.getMessage(), e);
return null;
} finally {
((AbstractDocument) document).readUnlock();
}
}