本文整理汇总了Java中org.netbeans.api.lexer.TokenSequence.token方法的典型用法代码示例。如果您正苦于以下问题:Java TokenSequence.token方法的具体用法?Java TokenSequence.token怎么用?Java TokenSequence.token使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.netbeans.api.lexer.TokenSequence
的用法示例。
在下文中一共展示了TokenSequence.token方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getMarkList
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private List<FoldMarkInfo> getMarkList(TokenSequence seq) {
List<FoldMarkInfo> markList = null;
for(seq.moveStart(); seq.moveNext(); ) {
Token token = seq.token();
FoldMarkInfo info;
try {
info = scanToken(token);
} catch (BadLocationException e) {
LOG.log(Level.WARNING, null, e);
info = null;
}
if (info != null) {
if (markList == null) {
markList = new ArrayList<FoldMarkInfo>();
}
markList.add(info);
}
}
return markList;
}
示例2: getAttribute
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private Token<XMLTokenId> getAttribute(TokenSequence<XMLTokenId> ts) {
Token<XMLTokenId> tok = ts.token();
if (tok.id() == XMLTokenId.VALUE) {
while (ts.movePrevious()) {
tok = ts.token();
switch (tok.id()) {
case ARGUMENT:
return tok;
case OPERATOR:
case EOL:
case ERROR:
case WS:
continue;
default:
return null;
}
}
}
return null;
}
示例3: tokenize
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private static List<Token> tokenize(CharSequence input) {
List<Token> stack = new LinkedList<>();
TokenHierarchy<CharSequence> th = TokenHierarchy.create(input, CssTokenId.language());
TokenSequence<CssTokenId> ts = th.tokenSequence(CssTokenId.language());
ts.moveStart();
while(ts.moveNext()) {
org.netbeans.api.lexer.Token<CssTokenId> t = ts.token();
switch(t.id()) {
case WS:
case NL:
continue; //ignore WS
}
stack.add(new Token(t.id(), ts.offset(), t.length(), input));
}
return stack;
}
示例4: dumpTokens
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private static CharSequence dumpTokens(TokenSequence<?> seq) {
seq.moveStart();
StringBuilder builder = new StringBuilder();
Token<?> token = null;
while (seq.moveNext()) {
if (token != null) {
builder.append('\n');
}
token = seq.token();
builder.append(token.id());
PartType part = token.partType();
if (part != PartType.COMPLETE) {
builder.append(' ');
builder.append(token.partType());
}
builder.append(' ');
builder.append('\'');
builder.append(token.text());
builder.append('\'');
}
return builder;
}
示例5: getEditorCookie
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private EditCookie getEditorCookie(Document doc, int offset) {
TokenHierarchy<?> th = TokenHierarchy.get(doc);
TokenSequence ts = th.tokenSequence(Language.find(JavaFXEditorUtils.FXML_MIME_TYPE));
if (ts == null) {
return null;
}
ts.move(offset);
if (!ts.moveNext()) {
return null;
}
Token t = ts.token();
FileObject fo = getFileObject(doc);
String name = t.text().toString();
FileObject props = findFile(fo, name);
if (props != null) {
try {
DataObject dobj = DataObject.find(props);
return dobj.getLookup().lookup(EditCookie.class);
} catch (DataObjectNotFoundException ex) {
Exceptions.printStackTrace(ex);
}
}
return null;
}
示例6: isInsideReference
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
/**
* Checks if the passed position {@code pos} is inside java reference of
* some javadoc tag. This lightweight implementation ignores method parameters
*
* @param jdts javadoc token sequence to search
* @param pos position to check
* @return {@code true} if the position is inside the reference.
*/
public static boolean isInsideReference(TokenSequence<JavadocTokenId> jdts, int pos) {
int delta = jdts.move(pos);
if (jdts.moveNext() && JavadocTokenId.IDENT == jdts.token().id()
|| delta == 0 && jdts.movePrevious() && JavadocTokenId.IDENT == jdts.token().id()) {
// go back and find tag
boolean isBeforeWS = false; // is current tage before white space?
while (jdts.movePrevious()) {
Token<JavadocTokenId> jdt = jdts.token();
switch (jdt.id()) {
case DOT:
case HASH:
case IDENT:
if (isBeforeWS) {
return false;
} else {
continue;
}
case OTHER_TEXT:
isBeforeWS |= JavadocCompletionUtils.isWhiteSpace(jdt);
isBeforeWS |= JavadocCompletionUtils.isLineBreak(jdt);
if (isBeforeWS) {
continue;
} else {
return false;
}
case TAG:
return isBeforeWS && isReferenceTag(jdt);
case HTML_TAG:
return false;
default:
return false;
}
}
}
return false;
}
示例7: getNamespacesFromStartTags
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
/**
* Finds namespaces declared in all start tags in the document and keeps a map
* of namespaces to their prefixes.
* @param document
* @return
*/
public static HashMap<String, String> getNamespacesFromStartTags(Document document) {
HashMap<String, String> map = new HashMap<String, String>();
((AbstractDocument)document).readLock();
try {
TokenHierarchy th = TokenHierarchy.get(document);
TokenSequence ts = th.tokenSequence();
String lastNS = null;
while(ts.moveNext()) {
Token t = ts.token();
if(t.id() == XMLTokenId.ARGUMENT &&
t.text().toString().startsWith(XMLConstants.XMLNS_ATTRIBUTE)) {
lastNS = t.text().toString();
}
if(t.id() == XMLTokenId.VALUE && lastNS != null) {
String value = t.text().toString();
if(value.length() >= 2 && (value.startsWith("'") || value.startsWith("\""))) {
value = value.substring(1, value.length()-1);
}
map.put(value, CompletionUtil.getPrefixFromXMLNS(lastNS));
lastNS = null;
}
} //while loop
} finally {
((AbstractDocument)document).readUnlock();
}
return map;
}
示例8: skipAttributeValue
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
/**
* Handle fuzziness of attribute end detection. Advances the passed TokenSequence
* to the token <b>after</b> attribute value end delimiter. The delimiter (quote, doublequote)
* is passed as a parameter. The method returns the token after the attribute value if the delimiter is
* found and positions the TokenSequence to the returned token. If there's no delimiter,
* the method returns {@code null} and the TokenSequence position/state is not defined.
*
* @return Token after attribute value or null.
*/
public static Token<XMLTokenId> skipAttributeValue(TokenSequence ts, char delim) {
boolean ok = true;
for (; ok; ok = ts.moveNext()) {
Token<XMLTokenId> next = ts.token();
CharSequence cs = next.text();
if (cs.charAt(cs.length() - 1) == delim) {
ts.moveNext();
return ts.token();
}
}
return null;
}
示例9: precedesClosingTag
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private boolean precedesClosingTag(TokenSequence seq) {
if (!seq.moveNext()) {
return false;
}
// all whitespace should have been skipped by now
Token tukac = seq.token();
if (tukac.id() != XMLTokenId.TAG) {
return false;
}
String text = tukac.text().toString();
return text.startsWith("</");
}
示例10: testLanguagesEmbeddingMapMT
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
public void testLanguagesEmbeddingMapMT() throws Exception {
Document doc = new PlainDocument();
doc.putProperty("mimeType", "text/x-simple-plain");
// All words have to be longer than 3 characters
doc.insertString(0, "Hello 1234 0xFF00", SimpleAttributeSet.EMPTY);
TokenHierarchy th = TokenHierarchy.get(doc);
assertNotNull("Can't find token hierarchy for a text/x-simple-plain document", th);
TokenSequence seq = th.tokenSequence();
Language lang = seq.language();
assertNotNull("Can't find language for text/x-simple-plain", lang);
assertEquals("Wrong language", "text/x-simple-plain", lang.mimeType());
for(int i = 0; i < seq.tokenCount(); i++) {
seq.moveIndex(i);
assertTrue(seq.moveNext());
Token token = seq.token();
if (token.id() == SimplePlainTokenId.WORD) {
TokenSequence embeddedSeq = seq.embedded();
assertNotNull("Can't find embedded token sequence", embeddedSeq);
Language embeddedLang = embeddedSeq.language();
assertNotNull("Can't find language of the embedded sequence", embeddedLang);
assertEquals("Wrong language of the embedded sequence", "text/x-simple-char", embeddedLang.mimeType());
embeddedSeq.moveStart();
assertTrue("Embedded sequence has no tokens (moveFirst)", embeddedSeq.moveNext());
assertEquals("Wrong startSkipLength", 1, embeddedSeq.offset() - seq.offset());
embeddedSeq.moveEnd();
assertTrue("Embedded sequence has no tokens (moveLast)", embeddedSeq.movePrevious());
assertEquals("Wrong endSkipLength", 2,
(seq.offset() + seq.token().length()) - (embeddedSeq.offset() + embeddedSeq.token().length()));
}
}
}
示例11: readCurrentContent
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private void readCurrentContent(TokenSequence<XMLTokenId> seq) {
if (tagStartOffset == -1) {
return;
}
int diff = seq.move(tagStartOffset);
if (diff > 0) {
throw new IllegalStateException();
}
if (!seq.moveNext()) {
return;
}
Token<XMLTokenId> t = seq.token();
if (t.id() == XMLTokenId.TAG) {
// the tag can be self-closed, without any arguments:
if (t.text().toString().endsWith("/>")) {
finished = true;
tagEndOffset = seq.offset() + t.length();
selfClosed = true;
return;
}
if (rootTagStartOffset == seq.offset()) {
currentIsRoot = true;
}
readTagContent(seq);
} else if (t.id() == XMLTokenId.PI_START) {
readPIContent(seq);
}
}
示例12: isUnclosedStartTagFoundBefore
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private static boolean isUnclosedStartTagFoundBefore(int caretPos,
TokenSequence tokenSequence) {
tokenSequence.move(caretPos);
boolean startTagFound = false, tagLastCharFound = false;
Stack<String> existingEndTags = new Stack<String>();
String startTagName, endTagName;
while (tokenSequence.movePrevious()) {
Token token = tokenSequence.token();
if (isTagLastChar(token)) {
tagLastCharFound = true;
} else if (isEndTagPrefix(token)) {
tagLastCharFound = startTagFound = false;
endTagName = getTokenTagName(token);
if (endTagName != null) {
existingEndTags.push(endTagName);
}
} else if (isTagFirstChar(token) && tagLastCharFound) {
startTagName = getTokenTagName(token);
endTagName = existingEndTags.isEmpty() ? null : existingEndTags.peek();
if ((startTagName != null) && (endTagName != null) &&
startTagName.equals(endTagName)) {
existingEndTags.pop();
tagLastCharFound = startTagFound = false;
continue;
}
startTagFound = true;
break;
}
}
return startTagFound;
}
示例13: insideFQN
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private void insideFQN (
TokenSequence<JavadocTokenId> tokenSequence
) {
StringBuilder sb = new StringBuilder ();
STOP: while (tokenSequence.moveNext ()) {
Token<JavadocTokenId> token = tokenSequence.token();
switch(token.id()) {
case IDENT:
sb.append(token.text());
if (begin < 0) {
begin = tokenSequence.offset();
}
end = tokenSequence.offset() + token.length();
break;
case HASH:
if (begin < 0) {
begin = tokenSequence.offset();
}
end = tokenSequence.offset() + token.length();
insideMember(tokenSequence);
break STOP;
case DOT:
if (sb.length() == 0 || '.' == sb.charAt(sb.length() - 1)) {
break STOP;
}
sb.append('.');
end = tokenSequence.offset() + token.length();
break;
default:
tokenSequence.movePrevious ();
break STOP;
}
}
if (sb.length() > 0) {
fqn = sb;
}
}
示例14: assertTokenEquals
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
private static void assertTokenEquals(String message, TokenSequence<?> ts, TokenId id, String text, int offset, int length) {
message = messagePrefix(message);
Token<?> t = ts.token();
TestCase.assertNotNull("Token is null", t);
TokenId tId = t.id();
TestCase.assertEquals(message + "Invalid token.id() for text=\"" + debugTextOrNull(t.text()) + '"', id, tId);
if (length != -1) {
TestCase.assertEquals("Invalid token length", length, t.length());
}
if (text != null) {
CharSequence tText = t.text();
assertTextEquals(message + "Invalid token.text() for id=" + LexerUtilsConstants.idToString(id), text, tText);
TestCase.assertEquals(message + "Invalid token.length()", text.length(), t.length());
}
if (offset != -1) {
int tsOffset = ts.offset();
TestCase.assertEquals(message + "Invalid tokenSequence.offset()", offset, tsOffset);
// It should also be true that if the token is non-flyweight then
// ts.offset() == t.offset()
// and if it's flyweight then t.offset() == -1
int tOffset = t.offset(null);
assertTokenOffsetMinusOneForFlyweight(t.isFlyweight(), tOffset);
if (!t.isFlyweight()) {
assertTokenOffsetsEqual(message, tOffset, offset);
}
}
}
示例15: getEmbeddings
import org.netbeans.api.lexer.TokenSequence; //导入方法依赖的package包/类
@Override
public List<Embedding> getEmbeddings(Snapshot snapshot) {
TokenHierarchy<?> th = snapshot.getTokenHierarchy();
TokenSequence<XhtmlElTokenId> sequence = th.tokenSequence(XhtmlElTokenId.language());
List<Embedding> embeddings = new ArrayList<>();
sequence.moveStart();
boolean htmlSectionEndsWithQuotation = false;
while (sequence.moveNext()) {
Token t = sequence.token();
//unbelievable hack
if(t.id() == XhtmlElTokenId.HTML) {
char c = t.text().charAt(t.length() - 1);
htmlSectionEndsWithQuotation = c == '"' || c == '\'';
}
if (t.id() == XhtmlElTokenId.EL) {
embeddings.add(snapshot.create(sequence.offset(), t.length(), "text/x-el")); //NOI18N
if(htmlSectionEndsWithQuotation) {
//it *looks like* the EL is inside an attribute
//there's a need to distinguish between ELs inside or outside of attribute values
embeddings.add(snapshot.create(ATTRIBUTE_EL_MARKER, "text/x-el")); //NOI18N
}
// just to separate expressions for easier handling in EL parser
embeddings.add(snapshot.create(Constants.LANGUAGE_SNIPPET_SEPARATOR, "text/x-el")); //NOI18N
}
}
if (embeddings.isEmpty()) {
return Collections.emptyList();
} else {
return Collections.singletonList(Embedding.create(embeddings));
}
}