本文整理汇总了Java中org.netbeans.api.lexer.TokenSequence.language方法的典型用法代码示例。如果您正苦于以下问题:Java TokenSequence.language方法的具体用法?Java TokenSequence.language怎么用?Java TokenSequence.language使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.netbeans.api.lexer.TokenSequence
的用法示例。
在下文中一共展示了TokenSequence.language方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getJavaTokenSequence
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
/**
* @since 0.21
*/
public static TokenSequence<JavaTokenId> getJavaTokenSequence(final TokenHierarchy hierarchy, final int offset) {
if (hierarchy != null) {
TokenSequence<?> ts = hierarchy.tokenSequence();
while(ts != null && (offset == 0 || ts.moveNext())) {
ts.move(offset);
if (ts.language() == JavaTokenId.language()) {
return (TokenSequence<JavaTokenId>)ts;
}
if (!ts.moveNext() && !ts.movePrevious()) {
return null;
}
ts = ts.embedded();
}
}
return null;
}
示例2: testCreateAnnotation
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private boolean testCreateAnnotation(TokenHierarchy hi, TokenSequence ts, ASTItem item, LanguagesAnnotation la) throws BadLocationException {
if (ts.language () == null)
throw new NullPointerException ("ts.language()==null");
if (ts.language ().mimeType () == null)
throw new NullPointerException ("TokenSequence.mimeType==null");
if (ts.language().mimeType().equals(item.getMimeType())) {
Token t = ts.token();
if (t == null) throw new NullPointerException ();
Position position = doc.createPosition(t.offset(hi));
la.setPosition (position);
doc.addAnnotation(position, t.length(), la);
return true;
} else {
ts = ts.embedded();
if(ts == null) {
return false;
} else {
return ts.moveNext() ? testCreateAnnotation(hi, ts, item, la) : false;
}
}
}
示例3: getTokenSequence
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
/**
* Gets instance of {@link TokenSequence} for the given
* {@link TokenHierarchy} and offset.
*
* @since 1.55
* @param th
* @param offset
* @param language
* @param joined
* @return
*/
public static TokenSequence getTokenSequence(TokenHierarchy th, int offset, Language language, boolean joined) {
TokenSequence ts = th.tokenSequence();
if (ts == null) {
return null;
}
ts.move(offset);
while (ts.moveNext() || ts.movePrevious()) {
if (ts.language() == language) {
return ts;
}
ts = ts.embeddedJoined();
if (ts == null) {
break;
}
//position the embedded ts so we can search deeper
ts.move(offset);
}
return null;
}
示例4: getCssTokenSequence
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private TokenSequence<CssTokenId> getCssTokenSequence(Document doc, int offset) {
TokenHierarchy th = TokenHierarchy.get(doc);
TokenSequence ts = th.tokenSequence();
if (ts == null) {
return null;
}
ts.move(offset);
while (ts.moveNext() || ts.movePrevious()) {
if (ts.language() == CssTokenId.language()) {
return ts;
}
ts = ts.embedded();
if (ts == null) {
break;
}
//position the embedded ts so we can search deeper
ts.move(offset);
}
return null;
}
示例5: getHighlights
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
public synchronized HighlightsSequence getHighlights(int startOffset, int endOffset) {
if (colorings.isEmpty()) {
return HighlightsSequence.EMPTY;
}
TokenHierarchy th = TokenHierarchy.get(doc);
TokenSequence<? extends TokenId> seq = th.tokenSequence();
if (seq == null) { // Null when token hierarchy is inactive
return HighlightsSequence.EMPTY;
}
if (seq.language() == JavaTokenId.language()) {
return new LexerBasedHighlightSequence(this, seq.subSequence(startOffset, endOffset), colorings);
} else {
return new EmbeddedLexerBasedHighlightSequence(this, seq.subSequence(startOffset, endOffset), colorings);
}
}
示例6: getEmbeddedTokenSequences
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
public static List<TokenSequence<?>> getEmbeddedTokenSequences(
TokenHierarchy<?> th, int offset, boolean backwardBias, Language<?> language
) {
List<TokenSequence<?>> sequences = th.embeddedTokenSequences(offset, backwardBias);
for(int i = sequences.size() - 1; i >= 0; i--) {
TokenSequence<?> seq = sequences.get(i);
if (seq.language() == language) {
break;
} else {
sequences.remove(i);
}
}
return sequences;
}
示例7: processTokenSequence
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
public void processTokenSequence(TokenSequence<?> ts, int tokenCount, boolean checkEmbedded, int diff) {
while (--tokenCount >= 0) {
boolean moved = ts.moveNext();
assert (moved);
if (ts.language() == language) {
T id = (T)ts.token().id();
TokenIdPair pair = id2Pair.get(id);
if (pair != null) {
pair.updateBalance(id, diff);
}
}
if (checkEmbedded) {
TokenSequence<?> embeddedTS = ts.embedded();
if (embeddedTS != null)
processTokenSequence(embeddedTS, embeddedTS.tokenCount(), true, diff);
}
}
}
示例8: testGCedE
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
public void testGCedE() {
TokenHierarchy th = TokenHierarchy.create("abc", TestPlainTokenId.language());
TokenSequence tokens = th.tokenSequence();
tokens.moveStart();
assertEquals(true, tokens.moveNext());
TokenSequence embedded = tokens.embedded();
assertNotNull("There should be an embedded language", embedded);
WeakReference<Language> refLang = new WeakReference<Language>(embedded.language());
embedded = null;
WeakReference<Token> refToken = new WeakReference<Token>(tokens.token());
tokens = null;
th = null;
// This no longer works after the language is statically held in the xxTokenId by the new convention
//assertGC("The embedded language has not been GCed", refLang);
assertGC("The token with embedded language has not been GCed", refToken);
}
示例9: moveNext
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private void moveNext(TokenSequence ts) {
AttributeSet as = null;
do {
ts.move(startOffset1);
if (!ts.moveNext()) {
return;
}
Token t = ts.token();
if (ts.language() == null)
throw new NullPointerException ("ts.language()==null: TS " + ts + " : " + document.getProperty("mimeType"));
as = highlighting.get (ts.offset(), ts.offset() + t.length());
if (as != null) {
attributeSet.addAttributes(as);
endOffset1 = ts.offset() + t.length();
return;
}
TokenSequence ts1 = ts.embedded();
if (ts1 != null) {
moveNext(ts1);
}
if (endOffset1 > startOffset1) {
return;
}
if (ts.token() != null) {
startOffset1 = ts.offset() + ts.token().length();
endOffset1 = startOffset1;
} else {
return;
}
} while (startOffset1 < endOffset);
}
示例10: testLanguagesEmbeddingMapMT
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
public void testLanguagesEmbeddingMapMT() throws Exception {
Document doc = new PlainDocument();
doc.putProperty("mimeType", "text/x-simple-plain");
// All words have to be longer than 3 characters
doc.insertString(0, "Hello 1234 0xFF00", SimpleAttributeSet.EMPTY);
TokenHierarchy th = TokenHierarchy.get(doc);
assertNotNull("Can't find token hierarchy for a text/x-simple-plain document", th);
TokenSequence seq = th.tokenSequence();
Language lang = seq.language();
assertNotNull("Can't find language for text/x-simple-plain", lang);
assertEquals("Wrong language", "text/x-simple-plain", lang.mimeType());
for(int i = 0; i < seq.tokenCount(); i++) {
seq.moveIndex(i);
assertTrue(seq.moveNext());
Token token = seq.token();
if (token.id() == SimplePlainTokenId.WORD) {
TokenSequence embeddedSeq = seq.embedded();
assertNotNull("Can't find embedded token sequence", embeddedSeq);
Language embeddedLang = embeddedSeq.language();
assertNotNull("Can't find language of the embedded sequence", embeddedLang);
assertEquals("Wrong language of the embedded sequence", "text/x-simple-char", embeddedLang.mimeType());
embeddedSeq.moveStart();
assertTrue("Embedded sequence has no tokens (moveFirst)", embeddedSeq.moveNext());
assertEquals("Wrong startSkipLength", 1, embeddedSeq.offset() - seq.offset());
embeddedSeq.moveEnd();
assertTrue("Embedded sequence has no tokens (moveLast)", embeddedSeq.movePrevious());
assertEquals("Wrong endSkipLength", 2,
(seq.offset() + seq.token().length()) - (embeddedSeq.offset() + embeddedSeq.token().length()));
}
}
}
示例11: getJoinedHtmlSequence
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
/** returns top most joined html token seuence for the document at the specified offset. */
public static TokenSequence<HTMLTokenId> getJoinedHtmlSequence(TokenHierarchy th, int offset) {
TokenSequence ts = th.tokenSequence();
if(ts == null) {
return null;
}
ts.move(offset);
while(ts.moveNext() || ts.movePrevious()) {
if(ts.language() == HTMLTokenId.language()) {
return ts;
}
ts = ts.embeddedJoined();
if(ts == null) {
break;
}
//position the embedded ts so we can search deeper
//XXX this seems to be wrong, the return code should be checked
ts.move(offset);
}
return null;
}
示例12: findOrigin
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
public int[] findOrigin() throws BadLocationException, InterruptedException {
((AbstractDocument) context.getDocument()).readLock();
try {
int caretOffset = context.getSearchOffset();
boolean backward = context.isSearchingBackward();
TokenHierarchy<Document> th = TokenHierarchy.get(context.getDocument());
List<TokenSequence<?>> sequences = th.embeddedTokenSequences(caretOffset, backward);
for(int i = sequences.size() - 1; i >= 0; i--) {
TokenSequence<? extends TokenId> seq = sequences.get(i);
if (seq.language() == JavadocTokenId.language()) {
jdocSeq = seq;
if (i > 0) {
TokenSequence<? extends TokenId> javaSeq = sequences.get(i - 1);
jdocStart = javaSeq.offset();
jdocEnd = javaSeq.offset() + javaSeq.token().length();
} else {
// jdocSeq is the top level sequence, ie the whole document is just javadoc
jdocStart = 0;
jdocEnd = context.getDocument().getLength();
}
break;
}
}
if (jdocSeq == null) {
if (LOG.isLoggable(Level.FINE)) {
LOG.fine("Not javadoc TokenSequence."); //NOI18N
}
return null;
}
// if (caretOffset >= jdocStart &&
// ((backward && caretOffset <= jdocStart + 3) ||
// (!backward && caretOffset < jdocStart + 3))
// ) {
// matchingArea = new int [] { jdocEnd - 2, jdocEnd };
// return new int [] { jdocStart, jdocStart + 3 };
// }
//
// if (caretOffset <= jdocEnd &&
// ((backward && caretOffset > jdocEnd - 2) ||
// (!backward && caretOffset >= jdocEnd - 2))
// ) {
// matchingArea = new int [] { jdocStart, jdocStart + 3 };
// return new int [] { jdocEnd - 2, jdocEnd };
// }
// look for tags first
jdocSeq.move(caretOffset);
if (jdocSeq.moveNext()) {
if (isTag(jdocSeq.token()) && !isTypeParameterTag(jdocSeq) && !isUninterpretedTag(jdocSeq)) {
if (jdocSeq.offset() < caretOffset || !backward) {
return prepareOffsets(jdocSeq, true);
}
}
while(moveTheSequence(jdocSeq, backward, context.getLimitOffset())) {
if (isTag(jdocSeq.token())) {
if (isTypeParameterTag(jdocSeq) || isUninterpretedTag(jdocSeq)) {
// do not treat type parameter and {@code} and {@literal} content as HTML tag
break;
}
return prepareOffsets(jdocSeq, true);
}
}
}
defaultMatcher = BracesMatcherSupport.defaultMatcher(context, jdocStart, jdocEnd);
return defaultMatcher.findOrigin();
} finally {
((AbstractDocument) context.getDocument()).readUnlock();
}
}
示例13: findOrigin
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
@Override
public int[] findOrigin() throws InterruptedException, BadLocationException {
int searchOffset = context.getSearchOffset();
((AbstractDocument) context.getDocument()).readLock();
try {
if (!testMode && MatcherContext.isTaskCanceled()) {
return null;
}
TokenSequence<HTMLTokenId> ts = Utils.getJoinedHtmlSequence(context.getDocument(), searchOffset);
TokenHierarchy<Document> th = TokenHierarchy.get(context.getDocument());
if (ts.language() == HTMLTokenId.language()) {
while (searchOffset != context.getLimitOffset()) {
int diff = ts.move(searchOffset);
searchOffset = searchOffset + (context.isSearchingBackward() ? -1 : +1);
if (diff == 0 && context.isSearchingBackward()) {
//we are searching backward and the offset is at the token boundary
if (!ts.movePrevious()) {
continue;
}
} else {
if (!ts.moveNext()) {
continue;
}
}
Token<HTMLTokenId> t = ts.token();
int toffs = ts.offset();
if (tokenInTag(t)) {
//find the tag beginning
do {
Token<HTMLTokenId> t2 = ts.token();
int t2offs = ts.offset();
if (!tokenInTag(t2)) {
return null;
} else if (t2.id() == HTMLTokenId.TAG_OPEN_SYMBOL) {
//find end
int tagNameEnd = -1;
while (ts.moveNext()) {
Token<HTMLTokenId> t3 = ts.token();
int t3offs = ts.offset();
if (!tokenInTag(t3) || t3.id() == HTMLTokenId.TAG_OPEN_SYMBOL) {
return null;
} else if (t3.id() == HTMLTokenId.TAG_CLOSE_SYMBOL) {
if ("/>".equals(t3.text().toString())) {
//do no match empty tags
return null;
} else {
int from = t2offs;
int to = t3offs + t3.length();
if (tagNameEnd != -1) {
return new int[]{from, to,
from, tagNameEnd,
to - 1, to};
} else {
return new int[]{from, to};
}
}
} else if (t3.id() == HTMLTokenId.TAG_OPEN || t3.id() == HTMLTokenId.TAG_CLOSE) {
tagNameEnd = t3offs + t3.length();
}
}
break;
}
} while (ts.movePrevious());
} else if (t.id() == HTMLTokenId.BLOCK_COMMENT) {
String tokenImage = t.text().toString();
if (tokenImage.startsWith(BLOCK_COMMENT_START) && context.getSearchOffset() < toffs + BLOCK_COMMENT_START.length()) {
return new int[]{toffs, toffs + BLOCK_COMMENT_START.length()};
} else if (tokenImage.endsWith(BLOCK_COMMENT_END) && (context.getSearchOffset() >= toffs + tokenImage.length() - BLOCK_COMMENT_END.length())) {
return new int[]{toffs + t.length() - BLOCK_COMMENT_END.length(), toffs + t.length()};
}
}
}
}
return null;
} finally {
((AbstractDocument) context.getDocument()).readUnlock();
}
}