本文整理汇总了Java中org.netbeans.api.lexer.Token类的典型用法代码示例。如果您正苦于以下问题:Java Token类的具体用法?Java Token怎么用?Java Token使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Token类属于org.netbeans.api.lexer包,在下文中一共展示了Token类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: processCharacterReference
import org.netbeans.api.lexer.Token; //导入依赖的package包/类
private Token<DTDTokenId> processCharacterReference() {
int ch = input.read();
boolean hex = ch == 'x';
if (hex) {
ch = input.read();
}
boolean first = true;
do {
if (ch == ';') {
break;
}
if (!((ch >= '0' && ch <= '9') ||
hex && ((ch >= 'A' && ch <= 'F') || (ch >= 'a' && ch <= 'f')))) {
return null;
}
first = false;
} while ((ch = input.read()) != LexerInput.EOF);
return createReferenceToken(first ? DTDTokenId.ERROR : DTDTokenId.CHARACTER);
}
示例2: findContentPositions
import org.netbeans.api.lexer.Token; //导入依赖的package包/类
private int[] findContentPositions(PropertySetter p) {
int start = env.getTreeUtilities().positions(p).getStart();
int len = 1;
TokenSequence<XMLTokenId> seq = (TokenSequence<XMLTokenId>)env.getHierarchy().tokenSequence();
seq.move(start);
if (seq.moveNext()) {
Token<XMLTokenId> t = seq.token();
if (t.id() == XMLTokenId.TEXT) {
String tokenText = t.text().toString();
String trimmed = tokenText.trim();
int indexOfTrimmed = tokenText.indexOf(trimmed);
int indexOfNl = trimmed.indexOf('\n');
start = seq.offset() + indexOfTrimmed;
if (indexOfNl > -1) {
len = indexOfNl;
} else {
len = trimmed.length();
}
} else {
start = seq.offset();
len = t.length();
}
}
return new int[] { start, len };
}
示例3: testScriptType_missing
import org.netbeans.api.lexer.Token; //导入依赖的package包/类
public void testScriptType_missing() {
TokenHierarchy th = TokenHierarchy.create("<script>plain</script>", HTMLTokenId.language());
TokenSequence ts = th.tokenSequence();
ts.moveStart();
while(ts.moveNext()) {
Token t = ts.token();
if(t.id() == HTMLTokenId.SCRIPT) {
String scriptType = (String)t.getProperty(HTMLTokenId.SCRIPT_TYPE_TOKEN_PROPERTY);
assertNull(scriptType);
return ;
}
}
assertTrue("Couldn't find any SCRIPT token!", false);
}
示例4: nextToken
import org.netbeans.api.lexer.Token; //导入依赖的package包/类
public Token<XMLTokenId> nextToken() {
boolean ok = false;
try {
subState = state;
Token<XMLTokenId> tok = nextTokenInternal();
if (this.state >= 100) {
throw new IllegalArgumentException("Unexpected state: " + this.state + " at " + this.input);
}
ok = true;
return tok;
} finally {
if (!ok) {
this.state = INIT;
this.subState = INIT;
}
}
}
示例5: testScriptType_value
import org.netbeans.api.lexer.Token; //导入依赖的package包/类
public void testScriptType_value() {
TokenHierarchy th = TokenHierarchy.create("<script type=\"text/plain\">plain</script>", HTMLTokenId.language());
TokenSequence ts = th.tokenSequence();
ts.moveStart();
while(ts.moveNext()) {
Token t = ts.token();
if(t.id() == HTMLTokenId.SCRIPT) {
String scriptType = (String)t.getProperty(HTMLTokenId.SCRIPT_TYPE_TOKEN_PROPERTY);
assertNotNull(scriptType);
assertEquals("text/plain", scriptType);
return ;
}
}
assertTrue("Couldn't find any SCRIPT token!", false);
}
示例6: testGCedE
import org.netbeans.api.lexer.Token; //导入依赖的package包/类
public void testGCedE() {
TokenHierarchy th = TokenHierarchy.create("abc", TestPlainTokenId.language());
TokenSequence tokens = th.tokenSequence();
tokens.moveStart();
assertEquals(true, tokens.moveNext());
TokenSequence embedded = tokens.embedded();
assertNotNull("There should be an embedded language", embedded);
WeakReference<Language> refLang = new WeakReference<Language>(embedded.language());
embedded = null;
WeakReference<Token> refToken = new WeakReference<Token>(tokens.token());
tokens = null;
th = null;
// This no longer works after the language is statically held in the xxTokenId by the new convention
//assertGC("The embedded language has not been GCed", refLang);
assertGC("The token with embedded language has not been GCed", refToken);
}
示例7: parseDisplayName
import org.netbeans.api.lexer.Token; //导入依赖的package包/类
private String parseDisplayName() {
if (token().id() == DeclarativeHintTokenId.CHAR_LITERAL || token().id() == DeclarativeHintTokenId.STRING_LITERAL) {
Token<DeclarativeHintTokenId> t = token();
if (input.moveNext()) {
if (input.token().id() == DeclarativeHintTokenId.COLON) {
String displayName = t.text().subSequence(1, t.text().length() - 1).toString();
nextToken();
return displayName;
} else {
input.movePrevious();
}
}
}
return null;
}
示例8: resolvePrereadText
import org.netbeans.api.lexer.Token; //导入依赖的package包/类
private Token<DeclarativeHintTokenId> resolvePrereadText(int backupLength, int whitespaceLength) {
if (whitespaceLength > 0) {
if (input.readLengthEOF() == whitespaceLength + backupLength) {
input.backup(input.readLengthEOF() - whitespaceLength);
return fact.createToken(DeclarativeHintTokenId.WHITESPACE);
} else {
input.backup(backupLength);
return fact.createToken(DeclarativeHintTokenId.JAVA_SNIPPET);
}
} else {
if (input.readLengthEOF() == backupLength) {
return null;
} else {
input.backup(backupLength);
return fact.createToken(DeclarativeHintTokenId.JAVA_SNIPPET);
}
}
}
示例9: nextToken
import org.netbeans.api.lexer.Token; //导入依赖的package包/类
public Token<TestTokenId> nextToken() {
if (input.read() == LexerInput.EOF) {
return null;
}
input.read();
if (input.readText().toString().startsWith("%%")) {
readUntil("\n");
return factory.createToken(TestTokenId.METADATA);
}
if (readUntil("\n%%")) {
input.backup(2);
}
return factory.createToken(TestTokenId.JAVA_CODE);
}
示例10: checkInitialFold
import org.netbeans.api.lexer.Token; //导入依赖的package包/类
public void checkInitialFold() {
try {
TokenHierarchy<?> th = info.getTokenHierarchy();
TokenSequence<JavaTokenId> ts = th.tokenSequence(JavaTokenId.language());
while (ts.moveNext()) {
if (ts.offset() >= initialCommentStopPos)
break;
Token<JavaTokenId> token = ts.token();
if (token.id() == JavaTokenId.BLOCK_COMMENT || token.id() == JavaTokenId.JAVADOC_COMMENT) {
int startOffset = ts.offset();
addFold(creator.createInitialCommentFold(startOffset, startOffset + token.length()), startOffset);
break;
}
}
} catch (ConcurrentModificationException e) {
//from TokenSequence, document probably changed, stop
stopped = true;
}
}
示例11: findTokenWithText
import org.netbeans.api.lexer.Token; //导入依赖的package包/类
private static Token<JavaTokenId> findTokenWithText(CompilationInfo info, String text, int start, int end) {
TokenHierarchy<?> th = info.getTokenHierarchy();
TokenSequence<JavaTokenId> ts = th.tokenSequence(JavaTokenId.language()).subSequence(start, end);
while (ts.moveNext()) {
Token<JavaTokenId> t = ts.token();
if (t.id() == JavaTokenId.IDENTIFIER) {
boolean nameMatches;
if (!(nameMatches = text.equals(info.getTreeUtilities().decodeIdentifier(t.text()).toString()))) {
ExpressionTree expr = info.getTreeUtilities().parseExpression(t.text().toString(), new SourcePositions[1]);
nameMatches = expr.getKind() == Kind.IDENTIFIER && text.contentEquals(((IdentifierTree) expr).getName());
}
if (nameMatches) {
return t;
}
}
}
return null;
}
示例12: skipWhitespace
import org.netbeans.api.lexer.Token; //导入依赖的package包/类
/**
* Skips whitespaces within declaration, produces DTDToken if whitespace
* is found. Does not change state / substate.
* @return
*/
private Token<DTDTokenId> skipWhitespace() {
int ch;
int start = input.readLength();
while ((ch = input.read()) != LexerInput.EOF) {
if (!Character.isWhitespace(ch)) {
input.backup(1);
if ((input.readLength() - start) > 0) {
return tokenFactory.createToken(DTDTokenId.WS, input.readLength() - start);
}
break;
}
}
if ((input.readLength() - start) > 0) {
return tokenFactory.createToken(DTDTokenId.WS, input.readLength() - start);
} else {
return null;
}
}
示例13: compute
import org.netbeans.api.lexer.Token; //导入依赖的package包/类
@Override
protected void compute(CompletionContext context) throws IOException {
Token<XMLTokenId> attribToken = ContextUtilities.getAttributeToken(context.getDocumentContext());
if (attribToken == null) {
return;
}
String attribName = attribToken.text().toString();
if (!ContextUtilities.isPNamespaceName(context.getDocumentContext(), attribName)) {
return;
}
if (!attribName.endsWith("-ref")) { // NOI18N
return;
}
// XXX: Ideally find out the property name and it's expected type
// to list bean proposals intelligently
BeansRefCompletor beansRefCompletor = new BeansRefCompletor(true, context.getCaretOffset());
SpringCompletionResult result = beansRefCompletor.complete(context);
for (SpringXMLConfigCompletionItem item : result.getItems()) {
addCacheItem(item);
}
}
示例14: findGenericMatchForward
import org.netbeans.api.lexer.Token; //导入依赖的package包/类
private int[] findGenericMatchForward(TokenSequence ts, String endTag) {
Token token = ts.token();
int start = ts.offset() + token.length()-endTag.length();
int end = start+endTag.length();
if(token.text().toString().endsWith(endTag)) {
return new int[]{start, end};
}
while(ts.moveNext()) {
Token t = ts.token();
if(t.id() == token.id() && t.text().toString().endsWith(endTag)) {
start = ts.offset() + t.length()-endTag.length();
end = start+endTag.length();
return new int[]{start, end};
}
}
return null;
}
示例15: tokenOffset
import org.netbeans.api.lexer.Token; //导入依赖的package包/类
@Override
public int tokenOffset(int index) {
Token<?> token = existingToken(index);
if (token.isFlyweight()) {
int offset = 0;
while (--index >= 0) {
token = existingToken(index);
offset += token.length();
if (!token.isFlyweight()) {
// Return from here instead of break; - see code after while()
return offset + token.offset(null);
}
}
// might remove token sequence starting with flyweight
return removedTokensStartOffset + offset;
} else { // non-flyweight offset
return token.offset(null);
}
}