本文整理汇总了Java中org.netbeans.api.lexer.TokenHierarchy.get方法的典型用法代码示例。如果您正苦于以下问题:Java TokenHierarchy.get方法的具体用法?Java TokenHierarchy.get怎么用?Java TokenHierarchy.get使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.netbeans.api.lexer.TokenHierarchy
的用法示例。
在下文中一共展示了TokenHierarchy.get方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testShortDoc
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
public void testShortDoc() throws Exception {
// 000000000011111111112222222222
// 012345678901234567890123456789
String text = "a<b>c";
ModificationTextDocument doc = new ModificationTextDocument();
doc.insertString(0, text, null);
doc.putProperty(Language.class, TestJoinTopTokenId.language());
TokenHierarchy<?> hi = TokenHierarchy.get(doc);
((AbstractDocument)doc).readLock();
try {
TokenSequence<?> ts = hi.tokenSequence();
assertTrue(ts.moveNext());
ts.embedded(); // Creates JTL
} finally {
((AbstractDocument)doc).readUnlock();
}
}
示例2: setUp
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
@Override
protected void setUp() throws Exception {
super.setUp();
File dataDir = getDataDir();
fname = getName().replace("test", "");
File f = new File(dataDir, getClass().getName().
replaceAll("\\.", "/") + "/" + fname + ".fxml");
File w = new File(getWorkDir(), f.getName());
InputStream is = new FileInputStream(f);
OutputStream os = new FileOutputStream(w);
FileUtil.copy(is, os);
os.close();
is.close();
FileObject fo = FileUtil.toFileObject(w);
sourceDO = DataObject.find(fo);
document = ((EditorCookie)sourceDO.getCookie(EditorCookie.class)).openDocument();
hierarchy = TokenHierarchy.get(document);
}
示例3: SyntaxHighlighting
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
/** Creates a new instance of SyntaxHighlighting */
public SyntaxHighlighting(Document document) {
this.document = document;
String mimeType = (String) document.getProperty("mimeType"); //NOI18N
if (mimeType != null && mimeType.startsWith("test")) { //NOI18N
this.mimeTypeForOptions = mimeType;
} else {
this.mimeTypeForOptions = null;
}
// Start listening on changes in global colorings since they may affect colorings for target language
findFCSInfo("", null);
hierarchy = TokenHierarchy.get(document);
hierarchy.addTokenHierarchyListener(WeakListeners.create(TokenHierarchyListener.class, this, hierarchy));
}
示例4: indexWithinCurrentLanguage
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
private boolean indexWithinCurrentLanguage(BaseDocument doc, int index) throws BadLocationException{
TokenHierarchy tokenHierarchy = TokenHierarchy.get(doc);
TokenSequence[] tokenSequences = (TokenSequence[]) tokenHierarchy.tokenSequenceList(supportedLanguagePath(), 0, Integer.MAX_VALUE).toArray(new TokenSequence[0]);
for (TokenSequence tokenSequence: tokenSequences){
TextBounds languageBounds = findTokenSequenceBounds(doc, tokenSequence);
if (languageBounds.getAbsoluteStart() <= index && languageBounds.getAbsoluteEnd() >= index){
tokenSequence.move(index);
if (tokenSequence.moveNext()){
// the newly entered \n character may or may not
// form a separate token - work it around
if (isWSToken(tokenSequence.token())){
tokenSequence.movePrevious();
}
return tokenSequence.embedded() == null && !isWSToken(tokenSequence.token());
}
}
}
return false;
}
示例5: scanForSemicolon
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
private static int scanForSemicolon(Document doc, int[] offset, int start, int end) throws BadLocationException {
TokenHierarchy<Document> th = doc != null ? TokenHierarchy.get(doc) : null;
List<TokenSequence<?>> embeddedSequences = th != null ? th.embeddedTokenSequences(offset[0], false) : null;
TokenSequence<?> seq = embeddedSequences != null ? embeddedSequences.get(embeddedSequences.size() - 1) : null;
if (seq == null) {
return offset[0];
}
seq.move(start);
int semicolon = -1;
while(seq.moveNext()) {
int tokenOffset = seq.offset();
if(tokenOffset > end) {
break;
}
Token<?> t = seq.token();
if(t != null && t.id() == JavaTokenId.SEMICOLON ) {
semicolon = tokenOffset;
break;
}
}
return semicolon;
}
示例6: getCssTokenSequence
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
private TokenSequence<CssTokenId> getCssTokenSequence(Document doc, int offset) {
TokenHierarchy th = TokenHierarchy.get(doc);
TokenSequence ts = th.tokenSequence();
if (ts == null) {
return null;
}
ts.move(offset);
while (ts.moveNext() || ts.movePrevious()) {
if (ts.language() == CssTokenId.language()) {
return ts;
}
ts = ts.embedded();
if (ts == null) {
break;
}
//position the embedded ts so we can search deeper
ts.move(offset);
}
return null;
}
示例7: getHighlights
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
@Override
public HighlightsSequence getHighlights(int startOffset, int endOffset) {
synchronized (this) {
if (rubyBackground != null) {
if (hierarchy == null) {
hierarchy = TokenHierarchy.get(document);
if (hierarchy != null) {
hierarchy.addTokenHierarchyListener(WeakListeners.create(TokenHierarchyListener.class, this, hierarchy));
}
}
if (hierarchy != null) {
return new Highlights(version, hierarchy, startOffset, endOffset);
}
}
return HighlightsSequence.EMPTY;
}
}
示例8: getEditorCookie
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
private EditCookie getEditorCookie(Document doc, int offset) {
TokenHierarchy<?> th = TokenHierarchy.get(doc);
TokenSequence ts = th.tokenSequence(Language.find(JavaFXEditorUtils.FXML_MIME_TYPE));
if (ts == null) {
return null;
}
ts.move(offset);
if (!ts.moveNext()) {
return null;
}
Token t = ts.token();
FileObject fo = getFileObject(doc);
String name = t.text().toString();
FileObject props = findFile(fo, name);
if (props != null) {
try {
DataObject dobj = DataObject.find(props);
return dobj.getLookup().lookup(EditCookie.class);
} catch (DataObjectNotFoundException ex) {
Exceptions.printStackTrace(ex);
}
}
return null;
}
示例9: dumpTokens
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private List<Token> dumpTokens() throws IOException {
Logger.getLogger(DumpTokens.class.getName()).info("Dumping tokens");
DataObject dataObj = DataObject.find(FileUtil.toFileObject(file));
EditorCookie ed = dataObj.getCookie(EditorCookie.class);
StyledDocument sDoc = ed.openDocument();
BaseDocument doc = (BaseDocument) sDoc;
TokenHierarchy th = null;
TokenSequence ts = null;
int roundCount = 0;
while ((th == null) || (ts == null)){
th = TokenHierarchy.get(doc);
if (th != null){
ts = th.tokenSequence();
}
roundCount++;
if (roundCount > 50){
throw new AssertionError("Impossible to get token hierarchy " +roundCount+ "times");
}
try {
Thread.sleep(1000);
} catch (InterruptedException interruptedException) {
interruptedException.printStackTrace();
}
}
try{
Logger.getLogger(DumpTokens.class.getName()).info("Parsing token sequence");
List<Token> tok = dumpTokens(ts);
return tok;
}catch(Exception e){
e.printStackTrace();
}
return null;
}
示例10: testJoinEmbeddingDynamicCreationAndRemoval
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
public void testJoinEmbeddingDynamicCreationAndRemoval() throws Exception {
// 000000000011111111112222222222
// 012345678901234567890123456789
String text = "a(x)b(y)c";
ModificationTextDocument doc = new ModificationTextDocument();
doc.insertString(0, text, null);
doc.putProperty(Language.class, TestJoinTextTokenId.language);
// LexerTestUtilities.incCheck(doc, true); // Ensure the whole embedded hierarchy gets created
final TokenHierarchy<?> hi = TokenHierarchy.get(doc);
doc.runAtomic(new Runnable() {
@Override
public void run() {
TokenSequence<?> ts = hi.tokenSequence();
assertTrue(ts.moveNext());
assertTrue(ts.moveNext()); // on "(x)"
// Create embedding that joins sections
ts.createEmbedding(TestPlainTokenId.language(), 1, 1, true);
assertTrue(ts.moveNext()); // on "b"
assertTrue(ts.moveNext()); // on "(y)"
hi.tokenSequenceList(LanguagePath.get(TestJoinTextTokenId.language).
embedded(TestPlainTokenId.language()), 0, Integer.MAX_VALUE);
// Create embedding that joins sections
ts.createEmbedding(TestPlainTokenId.language(), 1, 1, true);
}
});
doc.remove(0, doc.getLength());
}
示例11: noCompletion
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
/**
* No completion inside PI, CDATA, comment section.
* True only inside PI or CDATA section, false otherwise.
* @param target
*/
public boolean noCompletion(JTextComponent target) {
if(target == null || target.getCaret() == null)
return false;
int offset = target.getCaret().getDot();
if(offset < 0)
return false;
//no completion inside CDATA or comment section
BaseDocument document = (BaseDocument)target.getDocument();
((AbstractDocument)document).readLock();
try {
TokenHierarchy th = TokenHierarchy.get(document);
TokenSequence ts = th.tokenSequence();
if(ts == null)
return false;
ts.move(offset);
Token token = ts.token();
if(token == null) {
ts.moveNext();
token = ts.token();
if(token == null)
return false;
}
if( token.id() == XMLTokenId.CDATA_SECTION ||
token.id() == XMLTokenId.BLOCK_COMMENT ||
token.id() == XMLTokenId.PI_START ||
token.id() == XMLTokenId.PI_END ||
token.id() == XMLTokenId.PI_CONTENT ||
token.id() == XMLTokenId.PI_TARGET ) {
return true;
}
} finally {
((AbstractDocument)document).readUnlock();
}
return false;
}
示例12: testFindLanguageForMT
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
public void testFindLanguageForMT() {
Document doc = new PlainDocument();
doc.putProperty("mimeType", "text/x-simple-char");
TokenHierarchy th = TokenHierarchy.get(doc);
assertNotNull("Can't find token hierarchy for a text/x-simple-char document", th);
Language lang = th.tokenSequence().language();
assertNotNull("Can't find language for text/x-simple-char", lang);
assertEquals("Wrong language", "text/x-simple-char", lang.mimeType());
}
示例13: testEmbeddingDynamicUpdate
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
public void testEmbeddingDynamicUpdate() throws Exception {
// 000000000011111111112222222222
// 012345678901234567890123456789
String text = "a%";
ModificationTextDocument doc = new ModificationTextDocument();
doc.insertString(0, text, null);
doc.putProperty(Language.class, TestJoinTopTokenId.language());
LexerTestUtilities.incCheck(doc, true); // Ensure the whole embedded hierarchy gets created
// Logger.getLogger("org.netbeans.lib.lexer.inc.TokenHierarchyUpdate").setLevel(Level.FINEST); // Extra logging
// Logger.getLogger("org.netbeans.lib.lexer.inc.TokenListUpdater").setLevel(Level.FINE); // Extra logging
// Logger.getLogger("org.netbeans.lib.lexer.inc.TokenListListUpdate").setLevel(Level.FINE); // Extra logging
doc.insertString(2, "%", null);
TokenHierarchy<?> hi = TokenHierarchy.get(doc);
LanguagePath embLP;
List<TokenSequence<?>> tsList;
((AbstractDocument)doc).readLock();
try {
embLP = LanguagePath.get(TestJoinTopTokenId.language()).
embedded(TestJoinTextTokenId.inPercentsLanguage);
tsList = hi.tokenSequenceList(embLP, 0, Integer.MAX_VALUE);
assertEquals(1, tsList.size());
} finally {
((AbstractDocument)doc).readUnlock();
}
doc.remove(2, 1);
((AbstractDocument)doc).readLock();
try {
tsList = hi.tokenSequenceList(embLP, 0, Integer.MAX_VALUE);
assertEquals(1, tsList.size()); // contains single token for extra '\n' at the end of doc
} finally {
((AbstractDocument)doc).readUnlock();
}
doc.insertString(2, "%", null); // BTW does not have to be '%'
((AbstractDocument)doc).readLock();
try {
tsList = hi.tokenSequenceList(embLP, 0, Integer.MAX_VALUE);
assertEquals(1, tsList.size());
} finally {
((AbstractDocument)doc).readUnlock();
}
}
示例14: findOrigin
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
@Override
public int[] findOrigin() throws InterruptedException, BadLocationException {
int searchOffset = context.getSearchOffset();
((AbstractDocument) context.getDocument()).readLock();
try {
if (!testMode && MatcherContext.isTaskCanceled()) {
return null;
}
TokenSequence<HTMLTokenId> ts = Utils.getJoinedHtmlSequence(context.getDocument(), searchOffset);
TokenHierarchy<Document> th = TokenHierarchy.get(context.getDocument());
if (ts.language() == HTMLTokenId.language()) {
while (searchOffset != context.getLimitOffset()) {
int diff = ts.move(searchOffset);
searchOffset = searchOffset + (context.isSearchingBackward() ? -1 : +1);
if (diff == 0 && context.isSearchingBackward()) {
//we are searching backward and the offset is at the token boundary
if (!ts.movePrevious()) {
continue;
}
} else {
if (!ts.moveNext()) {
continue;
}
}
Token<HTMLTokenId> t = ts.token();
int toffs = ts.offset();
if (tokenInTag(t)) {
//find the tag beginning
do {
Token<HTMLTokenId> t2 = ts.token();
int t2offs = ts.offset();
if (!tokenInTag(t2)) {
return null;
} else if (t2.id() == HTMLTokenId.TAG_OPEN_SYMBOL) {
//find end
int tagNameEnd = -1;
while (ts.moveNext()) {
Token<HTMLTokenId> t3 = ts.token();
int t3offs = ts.offset();
if (!tokenInTag(t3) || t3.id() == HTMLTokenId.TAG_OPEN_SYMBOL) {
return null;
} else if (t3.id() == HTMLTokenId.TAG_CLOSE_SYMBOL) {
if ("/>".equals(t3.text().toString())) {
//do no match empty tags
return null;
} else {
int from = t2offs;
int to = t3offs + t3.length();
if (tagNameEnd != -1) {
return new int[]{from, to,
from, tagNameEnd,
to - 1, to};
} else {
return new int[]{from, to};
}
}
} else if (t3.id() == HTMLTokenId.TAG_OPEN || t3.id() == HTMLTokenId.TAG_CLOSE) {
tagNameEnd = t3offs + t3.length();
}
}
break;
}
} while (ts.movePrevious());
} else if (t.id() == HTMLTokenId.BLOCK_COMMENT) {
String tokenImage = t.text().toString();
if (tokenImage.startsWith(BLOCK_COMMENT_START) && context.getSearchOffset() < toffs + BLOCK_COMMENT_START.length()) {
return new int[]{toffs, toffs + BLOCK_COMMENT_START.length()};
} else if (tokenImage.endsWith(BLOCK_COMMENT_END) && (context.getSearchOffset() >= toffs + tokenImage.length() - BLOCK_COMMENT_END.length())) {
return new int[]{toffs + t.length() - BLOCK_COMMENT_END.length(), toffs + t.length()};
}
}
}
}
return null;
} finally {
((AbstractDocument) context.getDocument()).readUnlock();
}
}
示例15: getDocRoot
import org.netbeans.api.lexer.TokenHierarchy; //导入方法依赖的package包/类
/**
* Finds the root element of the xml document. It also populates the
* attributes for the root element.
*
* See DocRoot.
*/
public static DocRoot getDocRoot(Document document) {
((AbstractDocument)document).readLock();
try {
TokenHierarchy th = TokenHierarchy.get(document);
TokenSequence ts = th.tokenSequence();
List<DocRootAttribute> attributes = new ArrayList<DocRootAttribute>();
String name = null;
while(ts.moveNext()) {
Token nextToken = ts.token();
if(nextToken.id() == XMLTokenId.TAG) {
String tagName = nextToken.text().toString();
if(name == null && tagName.startsWith("<"))
name = tagName.substring(1, tagName.length());
String lastAttrName = null;
while(ts.moveNext() ) {
Token t = ts.token();
if(t.id() == XMLTokenId.TAG && t.text().toString().equals(">"))
break;
if(t.id() == XMLTokenId.ARGUMENT) {
lastAttrName = t.text().toString();
}
if(t.id() == XMLTokenId.VALUE && lastAttrName != null) {
String value = t.text().toString();
if(value == null || value.length() == 1)
value = null;
else if(value.startsWith("'") || value.startsWith("\""))
value = value.substring(1, value.length()-1);
attributes.add(new DocRootAttribute(lastAttrName, value));
lastAttrName = null;
}
} //while loop
//first start tag with a valid name is the root
if(name != null)
break;
}
} //while loop
return new DocRoot(name, attributes);
} finally {
((AbstractDocument)document).readUnlock();
}
}