本文整理汇总了Java中org.netbeans.api.lexer.TokenHierarchy.tokenSequenceList方法的典型用法代码示例。如果您正苦于以下问题:Java TokenHierarchy.tokenSequenceList方法的具体用法?Java TokenHierarchy.tokenSequenceList怎么用?Java TokenHierarchy.tokenSequenceList使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.netbeans.api.lexer.TokenHierarchy
的用法示例。
在下文中一共展示了TokenHierarchy.tokenSequenceList方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testBoundaries
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
public void testBoundaries() throws Exception {
ModificationTextDocument doc = new ModificationTextDocument();
doc.insertString(0, getText1(), null);
doc.putProperty(Language.class,TestTokenId.language());
LanguagePath lp = LanguagePath.get(TestTokenId.language()).
embedded(TestJavadocTokenId.language()).
embedded(TestHTMLTagTokenId.language());
TokenHierarchy<?> tokenHierarchy = TokenHierarchy.get(doc);
((AbstractDocument)doc).readLock();
try {
List<TokenSequence<?>> tsList = tokenHierarchy.tokenSequenceList(lp, 35, 48);
assertEquals(3, tsList.size());
TokenSequence<?> ts = tsList.get(0);
assertTrue(ts.moveNext());
LexerTestUtilities.assertTokenEquals(ts, TestHTMLTagTokenId.TEXT, "tq", -1);
assertTrue(ts.moveNext());
LexerTestUtilities.assertTokenEquals(ts, TestHTMLTagTokenId.GT, ">", -1);
ts = tsList.get(1);
assertTrue(ts.moveNext());
LexerTestUtilities.assertTokenEquals(ts, TestHTMLTagTokenId.LT, "<", -1);
assertTrue(ts.moveNext());
LexerTestUtilities.assertTokenEquals(ts, TestHTMLTagTokenId.TEXT, "/tq", -1);
assertTrue(ts.moveNext());
LexerTestUtilities.assertTokenEquals(ts, TestHTMLTagTokenId.GT, ">", -1);
ts = tsList.get(2);
assertTrue(ts.moveNext());
LexerTestUtilities.assertTokenEquals(ts, TestHTMLTagTokenId.LT, "<", -1);
assertTrue(ts.moveNext());
LexerTestUtilities.assertTokenEquals(ts, TestHTMLTagTokenId.TEXT, "code", -1);
} finally {
((AbstractDocument)doc).readUnlock();
}
}
示例2: reindent
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
public void reindent () throws BadLocationException {
//S ystem.out.println("SCHLIEMAN reformat !\n " + context.document() + "\n " + context.isIndent() + "\n " + context.startOffset () + "\n " + context.endOffset());
StyledDocument document = (StyledDocument) context.document ();
try {
MimePath mimePath = MimePath.parse (context.mimePath ());
String mimeType = mimePath.getMimeType (mimePath.size () - 1);
Language l = LanguagesManager.getDefault ().getLanguage (mimeType);
Object indentValue = getIndentProperties (l);
if (indentValue == null) return;
if (indentValue instanceof Feature) {
Feature m = (Feature) indentValue;
m.getValue (Context.create (document, context.startOffset ()));
return;
}
Object[] params = (Object[]) indentValue;
TokenHierarchy tokenHierarchy = TokenHierarchy.get (document);
LanguagePath languagePath = LanguagePath.get (org.netbeans.api.lexer.Language.find (mimePath.getMimeType (0)));
for (int i = 1; i < mimePath.size(); i++)
languagePath = languagePath.embedded (org.netbeans.api.lexer.Language.find (mimePath.getMimeType (i)));
List<TokenSequence> tokenSequences = tokenHierarchy.tokenSequenceList (languagePath, context.startOffset (), context.endOffset ());
Set<Integer> whitespaces = l.getAnalyser().getSkipTokenTypes ();
Iterator<Region> it = context.indentRegions ().iterator ();
while (it.hasNext ()) {
Region region = it.next ();
Map<Position,Integer> indentMap = new HashMap<Position,Integer> ();
int ln = NbDocument.findLineNumber (document, region.getStartOffset ());
int endLineNumber = NbDocument.findLineNumber (document, region.getEndOffset ());
if (!Utils.getTokenSequence (document, context.lineStartOffset (region.getStartOffset ())).language ().mimeType ().equals (mimeType))
ln++;
int indent = 0;
if (ln > 0) {
int offset = NbDocument.findLineOffset (document, ln - 1);
indent = context.lineIndent (offset);
if (!Utils.getTokenSequence (document, offset).language ().mimeType ().equals (mimeType))
indent += IndentUtils.indentLevelSize (document);
}
while (ln <= endLineNumber) {
if (ln == endLineNumber &&
isEmpty (ln, document, whitespaces) &&
!Utils.getTokenSequence (document, region.getEndOffset ()).language ().mimeType ().equals (mimeType)
) break;
indent = indent (context, document, params, ln++, indent, indentMap, whitespaces);
}
Iterator<Position> it2 = indentMap.keySet ().iterator ();
while (it2.hasNext ()) {
Position position = it2.next ();
context.modifyIndent (position.getOffset (), indentMap.get (position));
}
}
} catch (LanguageDefinitionNotFoundException ldnfe) {
//no language found - this might happen when some of the embedded languages are not schliemann based,
//so just ignore and do nothing - no indent
} catch (Exception ex) {
ErrorManager.getDefault ().notify (ex);
}
}
示例3: getMIMETypesOnLine
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
/**
* Get a list of MIME types of languages found on a line.
* @param line The line to search for the MIME types.
* @return A set of MIME types.
* @since 2.50
*/
public Set<String> getMIMETypesOnLine(Line line) {
EditorCookie editorCookie = line.getLookup().lookup(EditorCookie.class);
if (editorCookie == null) {
DataObject dobj = line.getLookup().lookup(DataObject.class);
if (dobj != null) {
editorCookie = dobj.getLookup().lookup(EditorCookie.class);
}
if (editorCookie == null) {
return Collections.emptySet();
}
}
StyledDocument document = editorCookie.getDocument();
if (document == null) {
return Collections.emptySet();
}
Set<String> mimeTypes = null;
((AbstractDocument) document).readLock();
try {
TokenHierarchy<Document> th = TokenHierarchy.get((Document) document);
int ln = line.getLineNumber();
int offset = NbDocument.findLineOffset(document, ln);
int maxOffset = document.getLength() - 1;
int maxLine = NbDocument.findLineNumber(document, maxOffset);
int offset2;
if (ln + 1 > maxLine) {
offset2 = maxOffset;
} else {
offset2 = NbDocument.findLineOffset(document, ln+1) - 1;
}
// The line has offsets <offset, offset2>
Set<LanguagePath> languagePaths = th.languagePaths();
for (LanguagePath lp : languagePaths) {
List<TokenSequence<?>> tsl = th.tokenSequenceList(lp, offset, offset2);
for (TokenSequence ts : tsl) {
if (ts.moveNext()) {
//int to = ts.offset();
//if (!(offset <= to && to < offset2)) {
// continue;
//}
String mimeType = ts.language().mimeType();
if (mimeType != null) {
if (mimeTypes == null) {
mimeTypes = Collections.singleton(mimeType);
} else {
if (mimeTypes.size() == 1) {
mimeTypes = new HashSet<String>(mimeTypes);
}
mimeTypes.add(mimeType);
}
}
}
}
}
} finally {
((AbstractDocument) document).readUnlock();
}
return (mimeTypes != null) ? mimeTypes : Collections.<String>emptySet();
}
示例4: testEmbeddingDynamicCreation
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
public void testEmbeddingDynamicCreation() throws Exception {
// 000000000011111111112222222222
// 012345678901234567890123456789
String text = "a%";
ModificationTextDocument doc = new ModificationTextDocument();
doc.insertString(0, text, null);
doc.putProperty(Language.class, TestJoinTopTokenId.language());
TokenHierarchy<?> hi = TokenHierarchy.get(doc);
LanguagePath embLP = LanguagePath.get(TestJoinTopTokenId.language()).
embedded(TestJoinTextTokenId.inPercentsLanguage);
List<TokenSequence<?>> tsList;
((AbstractDocument)doc).readLock();
try {
tsList = hi.tokenSequenceList(embLP, 0, Integer.MAX_VALUE);
assertEquals(1, tsList.size()); // Contains single token for extra '\n' in the doc
LexerTestUtilities.incCheck(doc, true); // Ensure the whole embedded hierarchy gets created
} finally {
((AbstractDocument)doc).readUnlock();
}
// Logger.getLogger("org.netbeans.lib.lexer.inc.TokenHierarchyUpdate").setLevel(Level.FINEST); // Extra logging
// Logger.getLogger("org.netbeans.lib.lexer.inc.TokenListUpdater").setLevel(Level.FINE); // Extra logging
// Logger.getLogger("org.netbeans.lib.lexer.inc.TokenListListUpdate").setLevel(Level.FINE); // Extra logging
doc.insertString(2, "%", null);
((AbstractDocument)doc).readLock();
try {
tsList = hi.tokenSequenceList(embLP, 0, Integer.MAX_VALUE);
assertEquals(1, tsList.size());
} finally {
((AbstractDocument)doc).readUnlock();
}
doc.remove(2, 1);
((AbstractDocument)doc).readLock();
try {
tsList = hi.tokenSequenceList(embLP, 0, Integer.MAX_VALUE);
assertEquals(1, tsList.size()); // Contains single token for extra '\n' in the doc
} finally {
((AbstractDocument)doc).readUnlock();
}
}
示例5: testEmbeddingDynamicUpdate
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
public void testEmbeddingDynamicUpdate() throws Exception {
// 000000000011111111112222222222
// 012345678901234567890123456789
String text = "a%";
ModificationTextDocument doc = new ModificationTextDocument();
doc.insertString(0, text, null);
doc.putProperty(Language.class, TestJoinTopTokenId.language());
LexerTestUtilities.incCheck(doc, true); // Ensure the whole embedded hierarchy gets created
// Logger.getLogger("org.netbeans.lib.lexer.inc.TokenHierarchyUpdate").setLevel(Level.FINEST); // Extra logging
// Logger.getLogger("org.netbeans.lib.lexer.inc.TokenListUpdater").setLevel(Level.FINE); // Extra logging
// Logger.getLogger("org.netbeans.lib.lexer.inc.TokenListListUpdate").setLevel(Level.FINE); // Extra logging
doc.insertString(2, "%", null);
TokenHierarchy<?> hi = TokenHierarchy.get(doc);
LanguagePath embLP;
List<TokenSequence<?>> tsList;
((AbstractDocument)doc).readLock();
try {
embLP = LanguagePath.get(TestJoinTopTokenId.language()).
embedded(TestJoinTextTokenId.inPercentsLanguage);
tsList = hi.tokenSequenceList(embLP, 0, Integer.MAX_VALUE);
assertEquals(1, tsList.size());
} finally {
((AbstractDocument)doc).readUnlock();
}
doc.remove(2, 1);
((AbstractDocument)doc).readLock();
try {
tsList = hi.tokenSequenceList(embLP, 0, Integer.MAX_VALUE);
assertEquals(1, tsList.size()); // contains single token for extra '\n' at the end of doc
} finally {
((AbstractDocument)doc).readUnlock();
}
doc.insertString(2, "%", null); // BTW does not have to be '%'
((AbstractDocument)doc).readLock();
try {
tsList = hi.tokenSequenceList(embLP, 0, Integer.MAX_VALUE);
assertEquals(1, tsList.size());
} finally {
((AbstractDocument)doc).readUnlock();
}
}