本文整理汇总了Java中org.netbeans.api.lexer.TokenId类的典型用法代码示例。如果您正苦于以下问题:Java TokenId类的具体用法?Java TokenId怎么用?Java TokenId使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
TokenId类属于org.netbeans.api.lexer包,在下文中一共展示了TokenId类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: findFCSInfo
import org.netbeans.api.lexer.TokenId; //导入依赖的package包/类
private <T extends TokenId> FCSInfo<T> findFCSInfo(String mimePath, Language<T> language) {
@SuppressWarnings("unchecked")
FCSInfo<T> fcsInfo = (FCSInfo<T>) fcsCache.get(mimePath); // Search local cache
if (fcsInfo == null) { // Search in global cache
synchronized (globalFCSCache) {
@SuppressWarnings("unchecked")
FCSInfo<T> fcsI = (FCSInfo<T>) globalFCSCache.get(mimePath);
fcsInfo = fcsI;
if (fcsInfo == null) {
fcsInfo = new FCSInfo<T>(mimePath, language);
if (mimeTypeForOptions == null) { // Only cache non-test ones globally
globalFCSCache.put(mimePath, fcsInfo);
}
}
}
fcsInfo.addChangeListener(WeakListeners.change(this, fcsInfo));
fcsCache.put(mimePath, fcsInfo);
}
return fcsInfo;
}
示例2: followsToken
import org.netbeans.api.lexer.TokenId; //导入依赖的package包/类
public static Token followsToken(TokenSequence ts, Collection<? extends TokenId> searchedIds, boolean backwards, boolean repositionBack, TokenId... skipIds) {
Collection<TokenId> skip = Arrays.asList(skipIds);
int index = ts.index();
while (backwards ? ts.movePrevious() : ts.moveNext()) {
Token token = ts.token();
TokenId id = token.id();
if (searchedIds.contains(id)) {
if (repositionBack) {
int idx = ts.moveIndex(index);
boolean moved = ts.moveNext();
assert idx == 0 && moved;
}
return token;
}
if (!skip.contains(id)) {
break;
}
}
return null;
}
示例3: findFirstSentence
import org.netbeans.api.lexer.TokenId; //导入依赖的package包/类
private int [] findFirstSentence(TokenSequence<? extends TokenId> seq) {
seq.moveStart();
if (seq.moveNext()) {
int start = seq.offset();
do {
if (seq.token().id() == JavadocTokenId.DOT) {
if (seq.moveNext()) {
if (isWhiteSpace(seq.token())) {
return new int [] { start, seq.offset()};
}
seq.movePrevious();
}
} else if (seq.token().id() == JavadocTokenId.TAG) {
if (seq.movePrevious()) {
if (!seq.token().text().toString().trim().endsWith("{")) {
//not an inline tag
return new int [] { start, seq.offset()};
}
}
seq.moveNext();
}
} while (seq.moveNext());
}
return null;
}
示例4: isTag
import org.netbeans.api.lexer.TokenId; //导入依赖的package包/类
private static boolean isTag(Token<? extends TokenId> tag) {
CharSequence s = tag.text();
int l = s.length();
boolean b = tag.id() == JavadocTokenId.HTML_TAG &&
l >= 3 &&
s.charAt(0) == '<' && //NOI18N
s.charAt(l - 1) == '>'; //NOI18N
if (b) {
if (s.charAt(1) == '/') { //NOI18N
b = l >= 4 && Character.isLetterOrDigit(s.charAt(2));
} else {
b = Character.isLetterOrDigit(s.charAt(1));
}
}
return b;
}
示例5: findNextSpellSpan
import org.netbeans.api.lexer.TokenId; //导入依赖的package包/类
protected int[] findNextSpellSpan() throws BadLocationException {
TokenHierarchy<Document> h = TokenHierarchy.get((Document) doc);
TokenSequence<?> ts = h.tokenSequence();
if (ts == null || hidden) {
return new int[]{-1, -1};
}
ts.move(nextSearchOffset);
while (ts.moveNext()) {
TokenId id = ts.token().id();
if (id == XMLTokenId.BLOCK_COMMENT || id == XMLTokenId.TEXT) {
return new int[]{ts.offset(), ts.offset() + ts.token().length()};
}
}
return new int[]{-1, -1};
}
示例6: testEmbedding
import org.netbeans.api.lexer.TokenId; //导入依赖的package包/类
public void testEmbedding() {
TokenHierarchy th = TokenHierarchy.create("abc xyz 012 0xFF00 0-1-2-3-4-5-6-7-8-9", TestPlainTokenId.language());
TokenSequence tokens = th.tokenSequence();
for( ; tokens.moveNext(); ) {
TokenId id = tokens.token().id();
TokenSequence embedded = tokens.embedded();
if (id == TestPlainTokenId.WHITESPACE) {
assertNull("Whitespace should not have any embedded language", embedded);
} else if (id == TestPlainTokenId.WORD) {
assertNotNull("Word should have an embedded token sequence", embedded);
assertNotNull("Word should have an embedded language", embedded.language());
assertEquals("Wrong embedded language", TestCharTokenId.MIME_TYPE, embedded.language().mimeType());
}
}
}
示例7: createToken
import org.netbeans.api.lexer.TokenId; //导入依赖的package包/类
public Token createToken(TokenId id, int tokenLength) {
if (tokenLength <= 0) {
throw new IllegalArgumentException("tokenLength="
+ tokenLength + " <= 0");
}
if (tokenIndex + tokenLength > inputIndex) {
throw new IllegalArgumentException("tokenLength="
+ tokenLength + " > number-of-read-characters="
+ (inputIndex - tokenIndex)
);
}
Token ret = new StringToken(id, text.substring(tokenIndex,
tokenIndex + tokenLength));
tokenIndex += tokenLength;
return ret;
}
示例8: isTagFirstChar
import org.netbeans.api.lexer.TokenId; //导入依赖的package包/类
public static boolean isTagFirstChar(Token token) {
if (token == null) return false;
TokenId tokenID = token.id();
if (tokenID.equals(XMLTokenId.TAG) || tokenID.equals(XMLTokenId.TEXT)) {
String tokenText = token.text().toString();
if ((! isEndTagPrefix(token)) && tokenText.startsWith(TAG_FIRST_CHAR)) {
return true;
}
}
return false;
}
示例9: isTagLastChar
import org.netbeans.api.lexer.TokenId; //导入依赖的package包/类
public static boolean isTagLastChar(Token token) {
if (token == null) return false;
TokenId tokenID = token.id();
if (tokenID.equals(XMLTokenId.TAG)) {
String tokenText = token.text().toString();
if (tokenText.equals(TAG_LAST_CHAR)) {
return true;
}
}
return false;
}
示例10: tokenList
import org.netbeans.api.lexer.TokenId; //导入依赖的package包/类
/**
* Get token list from the given token sequence for testing purposes.
*/
public static <T extends TokenId> TokenList<T> tokenList(TokenSequence<T> ts) {
try {
if (tokenListField == null) {
tokenListField = ts.getClass().getDeclaredField("tokenList");
tokenListField.setAccessible(true);
}
@SuppressWarnings("unchecked")
TokenList<T> tl = (TokenList<T>)tokenListField.get(ts);
return tl;
} catch (Exception e) {
TestCase.fail(e.getMessage());
return null; // never reached
}
}
示例11: registerEmbedding
import org.netbeans.api.lexer.TokenId; //导入依赖的package包/类
public static void registerEmbedding(String mimePath, TokenId id, LanguageEmbedding<?> embedding) {
// checkInstanceExists();
synchronized (LOCK) {
Map<TokenId,LanguageEmbedding<?>> id2embedding = mime2embeddings.get(mimePath);
if (id2embedding == null) {
id2embedding = new HashMap<TokenId,LanguageEmbedding<?>>();
mime2embeddings.put(mimePath, id2embedding);
}
id2embedding.put(id, embedding);
}
fireChange();
}
示例12: isTextTag
import org.netbeans.api.lexer.TokenId; //导入依赖的package包/类
public static boolean isTextTag(Token token) {
if (token == null) return false;
TokenId tokenID = token.id();
if (! tokenID.equals(XMLTokenId.TEXT)) return false;
String tokenText = token.text().toString();
if (tokenText.equals("<")) {
return true;
}
boolean result = PATTERN_TEXT_TAG_EOLs.matcher(tokenText).matches();
return result;
}
示例13: getInsertingText
import org.netbeans.api.lexer.TokenId; //导入依赖的package包/类
@Override
protected String getInsertingText(JTextComponent component, int textPos, String primaryText, int removeLen) {
createTokenSequence(component);
int d = tokenSequence.move(textPos);
if (!tokenSequence.moveNext()) {
return super.getInsertingText(component, textPos, primaryText, removeLen);
}
Token token = tokenSequence.token();
TokenId id = token.id();
if (d == 0 && context.getTypedChars() == null) {
// creating a completely new attribute; if the token at the caret is another attribute, at least inset a trailing space.
if (id == XMLTokenId.ARGUMENT) {
caretOffset = 2; // inside quotes
return primaryText + " ";
} else {
caretOffset = 1; // inside quotes;
return primaryText;
}
}
if (id != XMLTokenId.ARGUMENT) {
caretOffset = 1; // inside quotes;
return primaryText;
} else {
while (tokenSequence.moveNext()) {
token = tokenSequence.token();
id = token.id();
if (id == XMLTokenId.WS || id == XMLTokenId.OPERATOR) {
continue;
}
if (id == XMLTokenId.VALUE) {
// without ending quotes
caretOffset = 0;
return primaryText.substring(0, primaryText.length() - 1);
}
}
caretOffset = 1; // inside quotes;
return primaryText;
}
}
示例14: updateImpl
import org.netbeans.api.lexer.TokenId; //导入依赖的package包/类
private <T extends TokenId> void updateImpl(IncTokenList<T> incTokenList, Object rootChildrenLanguages) {
incTokenList.incrementModCount();
// Update starts at the top language path an goes to possibly embedded-token-lists (ETLs)
// based on the top-level change. If there are embeddings that join sections
// a token-list-list (TLL) exists for the given language path that maintains
// all ETLs for the whole input source.
// 1. The updating must always go from upper levels to more embedded levels of the token hierarchy
// to ensure that the tokens of the possible joined ETLs get updated properly
// as the tokens created/removed at upper levels may contain embeddings that will
// need to be added/removed from TLL of more embedded level.
// 2. A single insert/remove may produce token updates at several
// places in the document due to joining of ETLs. In turn the added/removed
// ETLs may affect more embedded levels so the update can affect
// multiple places of input source.
// 3. The algorithm must collect both removed and added ETLs
// and process them prior calling the TokenListUpdater to update actual tokens.
// 4. For a removed ETL the updating must check and collect nested ETLs
// because some embedded tokens of the removed ETL might contain
// another ETL that might be maintained as TLL.
// 5. Added ETLs must also be inspected for nested ETLs maintained in a TLL.
// Initialization of added ETLs is done when the particular level is processed
// because TLL can join sections so they must be lexed once the definite additions
// and removals of ETLs are known. For non-joining ETLs this could be done
// immediately but it is not necessary so it's done at the same time as well.
// 6. For all TLLs their parent TLLs (for language path with last language stripped)
// are also maintained mandatorily.
// 7. Algorithm maintains "item-levels" to respect top-down processing
// according to language-path-depth.
itemLevels = new ArrayList<List<UpdateItem<?>>>(3); // Suffice for two-level embedding without realloc
// Create root item first for root token list
UpdateItem<T> rootItem = new UpdateItem<T>(this, null, rootChildrenLanguages);
rootItem.tokenListChange = new TokenListChange<T>(incTokenList);
addItem(rootItem, 0);
processLevelInfos();
}
示例15: HSImpl
import org.netbeans.api.lexer.TokenId; //导入依赖的package包/类
public HSImpl(long version, TokenHierarchy<? extends Document> scanner, int startOffset, int endOffset) {
this.version = version;
this.scanner = scanner;
startOffset = Math.max(startOffset, 0); // Tests may request Integer.MIN_VALUE for startOffset
this.startOffset = startOffset;
this.sequences = new ArrayList<TSInfo<?>>(4);
this.hiStartOffset = startOffset;
this.hiEndOffset = startOffset;
Document doc = scanner.inputSource();
this.docText = DocumentUtilities.getText(doc);
endOffset = Math.min(endOffset, docText.length());
this.endOffset = endOffset;
newlineOffset = -1;
updateNewlineOffset(startOffset);
@SuppressWarnings("unchecked")
TokenSequence<TokenId> seq = (TokenSequence<TokenId>) scanner.tokenSequence();
if (seq != null) {
seq.move(startOffset);
TSInfo<TokenId> tsInfo = new TSInfo<TokenId>(seq);
sequences.add(tsInfo);
state = S_NEXT_TOKEN;
} else {
state = S_DONE;
}
if (LOG.isLoggable(Level.FINE)) {
logHelper = new LogHelper();
logHelper.startTime = System.currentTimeMillis();
LOG.fine("SyntaxHighlighting.HSImpl <" + startOffset + "," + endOffset + ">\n"); // NOI18N
if (LOG.isLoggable(Level.FINEST)) {
LOG.log(Level.FINEST, "Highlighting caller", new Exception()); // NOI18N
}
}
}