本文整理汇总了Java中org.netbeans.api.lexer.Token.length方法的典型用法代码示例。如果您正苦于以下问题:Java Token.length方法的具体用法?Java Token.length怎么用?Java Token.length使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.netbeans.api.lexer.Token
的用法示例。
在下文中一共展示了Token.length方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: findMatchingPair
import org.netbeans.api.lexer.Token; //导入方法依赖的package包/类
/**
* Match paired tokens such as PI_START/PI_END.
*/
private int[] findMatchingPair(TokenSequence ts, XMLTokenId idToMatch, boolean isForward) {
while(isForward?ts.moveNext():ts.movePrevious()) {
Token t = ts.token();
//we don't want to scan the entire document
//if it hits a tag before match, return null
if(t.id() == XMLTokenId.TAG)
return null;
if(t.id() == idToMatch) {
int start = ts.offset();
int end = start+t.length();
return new int[] {start, end};
}
}
return null;
}
示例2: findGenericOrigin
import org.netbeans.api.lexer.Token; //导入方法依赖的package包/类
/**
* For CDATA and XML comments, there is no start and end tokens differentiator.
* XML lexer just gives us one token and we have to find the origin in there.
*/
private int[] findGenericOrigin(TokenSequence ts, String startTag, String endTag) {
Token token = ts.token();
String text = token.text().toString();
int start = ts.offset();
int end = start+startTag.length();
//if context.getSearchOffset() is inside start tag such as "<!--"
if(text.startsWith(startTag) &&
start <= searchOffset && end > searchOffset)
return new int[] {start, end};
//if context.getSearchOffset() is inside end tag such as "-->"
start = ts.offset() + token.length()-endTag.length();
end = start+endTag.length();
if(text.toString().endsWith(endTag) &&
start <= searchOffset && end >= searchOffset)
return new int[] {start, end};
//if none works return null
return null;
}
示例3: readRootElement
import org.netbeans.api.lexer.Token; //导入方法依赖的package包/类
/**
* Skips to and reads the root Element. Reads root element's attributes,
* so we can detect whether required namespace(s) are present.
*
* This MUST be called prior to readCurrentContent().
* @param s
*/
private void readRootElement(TokenSequence<XMLTokenId> seq) {
seq.move(0);
while (seq.moveNext()) {
Token<XMLTokenId> t = seq.token();
XMLTokenId id = t.id();
if (id == XMLTokenId.TAG && t.length() > 1) {
int startOffset = seq.offset();
readTagContent(seq);
// reassign stuff:
copyToRoot();
rootTagStartOffset = startOffset;
rootAttrInsertOffset = startOffset + t.length();
if (t.text().charAt(t.length() - 1) == '>') {
rootAttrInsertOffset--;
if (t.length() > 2 && t.text().charAt(t.length() - 2) == '/') {
rootAttrInsertOffset--;
}
}
findRootInsertionPoint();
return;
}
}
}
示例4: findIdentifierSpan
import org.netbeans.api.lexer.Token; //导入方法依赖的package包/类
public static int[] findIdentifierSpan( final TreePath decl, final CompilationInfo info, final Document doc) {
final int[] result = new int[] {-1, -1};
Runnable r = new Runnable() {
public void run() {
Token<JavaTokenId> t = findIdentifierSpan(info, doc, decl);
if (t != null) {
result[0] = t.offset(null);
result[1] = t.offset(null) + t.length();
}
}
};
if (doc != null) {
doc.render(r);
} else {
r.run();
}
return result;
}
示例5: findContentPositions
import org.netbeans.api.lexer.Token; //导入方法依赖的package包/类
private int[] findContentPositions(PropertySetter p) {
int start = env.getTreeUtilities().positions(p).getStart();
int len = 1;
TokenSequence<XMLTokenId> seq = (TokenSequence<XMLTokenId>)env.getHierarchy().tokenSequence();
seq.move(start);
if (seq.moveNext()) {
Token<XMLTokenId> t = seq.token();
if (t.id() == XMLTokenId.TEXT) {
String tokenText = t.text().toString();
String trimmed = tokenText.trim();
int indexOfTrimmed = tokenText.indexOf(trimmed);
int indexOfNl = trimmed.indexOf('\n');
start = seq.offset() + indexOfTrimmed;
if (indexOfNl > -1) {
len = indexOfNl;
} else {
len = trimmed.length();
}
} else {
start = seq.offset();
len = t.length();
}
}
return new int[] { start, len };
}
示例6: getIdentifierSpan
import org.netbeans.api.lexer.Token; //导入方法依赖的package包/类
public static int[] getIdentifierSpan(Document doc, int offset, Token[] token) {
FileObject fo = getFileObject(doc);
if (fo == null) {
//do nothing if FO is not attached to the document - the goto would not work anyway:
return null;
}
Project prj = FileOwnerQuery.getOwner(fo);
if (prj == null) {
return null;
}
NbModuleProvider module = prj.getLookup().lookup(NbModuleProvider.class);
if (module == null) {
return null;
}
TokenHierarchy<?> th = TokenHierarchy.get(doc);
TokenSequence ts = th.tokenSequence(Language.find("text/x-manifest"));
if (ts == null)
return null;
ts.move(offset);
if (!ts.moveNext()) {
return null;
}
Token t = ts.token();
if (findFile(fo, t.toString()) != null) {
return new int [] { ts.offset(), ts.offset() + t.length() };
}
return null;
}
示例7: compute
import org.netbeans.api.lexer.Token; //导入方法依赖的package包/类
private void compute (
TokenSequence tokenSequence,
int offset,
Result resultSet,
Document doc,
Language language
) {
String start = null;
Token token = tokenSequence.token ();
start = token.text ().toString ();
String tokenType = language.getTokenType (token.id ().ordinal ());
List<Feature> features = language.getFeatureList ().getFeatures (COMPLETION, tokenType);
Iterator<Feature> it = features.iterator ();
while (it.hasNext ()) {
Feature feature = it.next ();
String completionType = getCompletionType (feature, token.id ().name ());
int tokenOffset = tokenSequence.offset();
if (completionType == null) continue;
if (COMPLETION_APPEND.equals (completionType) &&
offset < tokenOffset + token.length ()
)
continue;
start = COMPLETION_COMPLETE.equals (completionType) ?
start.substring (0, offset - tokenOffset).trim () :
"";
ignoreCase = false;
Feature f = language.getFeatureList ().getFeature ("PROPERTIES");
if (f != null)
ignoreCase = f.getBoolean ("ignoreCase", false);
if (ignoreCase) start = start.toLowerCase ();
addTags (feature, start, Context.create (doc, offset), resultSet);
}
}
示例8: findUnresolvedElementSpan
import org.netbeans.api.lexer.Token; //导入方法依赖的package包/类
private static int[] findUnresolvedElementSpan(CompilationInfo info, int offset) throws IOException {
Token t = findUnresolvedElementToken(info, offset);
if (t != null) {
return new int[] {
t.offset(null),
t.offset(null) + t.length()
};
}
return null;
}
示例9: isIn
import org.netbeans.api.lexer.Token; //导入方法依赖的package包/类
private boolean isIn(int caretPosition, Token span) {
// System.err.println("caretPosition = " + caretPosition );
// System.err.println("span[0]= " + span[0]);
// System.err.println("span[1]= " + span[1]);
if (span == null)
return false;
return span.offset(null) <= caretPosition && caretPosition <= span.offset(null) + span.length();
}
示例10: findPiContentOffsets
import org.netbeans.api.lexer.Token; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private int[] findPiContentOffsets(FxNode node) {
TokenSequence<XMLTokenId> seq = (TokenSequence<XMLTokenId>)hierarchy.tokenSequence();
int start = -1;
int end = -1;
int s = nodes.positions(node).getStart();
seq.move(s);
boolean cont = true;
while (cont && seq.moveNext()) {
Token<XMLTokenId> token = seq.token();
switch (token.id()) {
case PI_TARGET:
case PI_START:
case WS:
break;
case PI_CONTENT:
if (start == -1) {
start = seq.offset();
}
end = seq.offset() + token.length();
break;
default:
cont = false;
}
}
return new int[] { start, end };
}
示例11: getIdentifierSpan
import org.netbeans.api.lexer.Token; //导入方法依赖的package包/类
public static int[] getIdentifierSpan(Document doc, int offset, Token<JavaTokenId>[] token) {
FileObject fo = getFileObject(doc);
if (fo == null) {
//do nothing if FO is not attached to the document - the goto would not work anyway:
return null;
}
Project prj = FileOwnerQuery.getOwner(fo);
if (prj == null) {
return null;
}
NbModuleProvider module = prj.getLookup().lookup(NbModuleProvider.class);
if (module == null) {
return null;
}
((AbstractDocument) doc).readLock();
TokenHierarchy th = TokenHierarchy.get(doc);
TokenSequence<JavaTokenId> ts = null;
try {
ts = SourceUtils.getJavaTokenSequence(th, offset);
} finally {
((AbstractDocument) doc).readUnlock();
}
if (ts == null)
return null;
ts.move(offset);
if (!ts.moveNext())
return null;
Token<JavaTokenId> t = ts.token();
boolean hasMessage = false;
boolean hasNbBundle = false;
if (USABLE_TOKEN_IDS.contains(t.id())) {
for (int i = 0; i < 10; i++) {
if (!ts.movePrevious()) {
break;
}
Token<JavaTokenId> tk = ts.token();
if (TokenUtilities.equals("getMessage", tk.text())) {
hasMessage = true;
}
else if (TokenUtilities.equals("NbBundle", tk.text())) {
hasNbBundle = true;
}
}
if (hasNbBundle && hasMessage) {
ts.move(offset);
ts.moveNext();
return new int [] {ts.offset(), ts.offset() + t.length()};
}
}
return null;
}
示例12: removeExtraEnumSemicolon
import org.netbeans.api.lexer.Token; //导入方法依赖的package包/类
/**
* When the enumeration contains just methods, it is necessary to preced them with single ;. If a constant is
* inserted, it must be inserted first; and the semicolon should be removed. This method will attempt to remove entire
* lines of whitespace around the semicolon. Preceding or following comments are preserved.
*
* @param insertHint the local Pointer value
* @return new localPointer value
*/
private int removeExtraEnumSemicolon(int insertHint) {
int startWS = -1;
int rewind = tokenSequence.offset();
tokenSequence.move(insertHint);
tokenSequence.moveNext();
boolean semi = false;
out: do {
Token<JavaTokenId> t = tokenSequence.token();
switch (t.id()) {
case WHITESPACE:
if (semi) {
// after semicolon, find the last newline
int nl = t.text().toString().lastIndexOf('\n');
if (nl == -1) {
startWS = tokenSequence.offset();
} else {
startWS = tokenSequence.offset() + nl + 1;
}
} else {
// before semicolon, select the 1st complete line.
if (startWS == -1) {
startWS = t.text().toString().indexOf('\n');
if (startWS == -1) {
startWS = tokenSequence.offset();
} else {
startWS += tokenSequence.offset() + 1;
}
}
}
break;
case SEMICOLON:
if (startWS >= 0) {
// copy up to the WS immediately preceding the semicolon.
copyTo(insertHint, startWS);
insertHint = tokenSequence.offset() + t.length();
}
startWS = -1;
semi = true;
break;
case LINE_COMMENT: case BLOCK_COMMENT: case JAVADOC_COMMENT:
if (semi) {
break out;
}
startWS = -1;
break;
default:
break out;
}
} while (tokenSequence.moveNext());
if (semi && startWS > -1) {
insertHint = startWS;
}
tokenSequence.move(rewind);
tokenSequence.moveNext();
return insertHint;
}
示例13: printBreakContinueTree
import org.netbeans.api.lexer.Token; //导入方法依赖的package包/类
/**
* Rewrites <code>break</code> or <code>continue</code> tree.
* @param bounds original bounds
* @param oldTLabel old label
* @param newTlabel new label
* @param oldT the tree to be rewritten
* @return new bounds
*/
private int printBreakContinueTree(int[] bounds, final Name oldTLabel, final Name newTlabel, JCStatement oldT) {
int localPointer = bounds[0];
String stmt = oldT.getKind() == Kind.BREAK ? "break" : "continue"; //NOI18N
// PENDING: inner comments should be handled - inner comment should be printed in between break and its label,
// or after the break with no label.
if (nameChanged(oldTLabel, newTlabel)) {
int labelPos = -1;
copyTo(localPointer, localPointer = getOldPos(oldT));
printer.print(stmt);
localPointer += stmt.length();
int commentStart = -1;
int commentEnd = -1;
if (oldTLabel != null && oldTLabel.length() > 0) {
tokenSequence.move(localPointer);
while (tokenSequence.moveNext()) {
Token<JavaTokenId> tukac = tokenSequence.token();
if (isComment(tukac.id())) {
if (commentStart == -1) {
commentStart = tokenSequence.offset();
}
commentEnd = tokenSequence.offset() + tukac.length();
} else if (tukac.id() != JavaTokenId.WHITESPACE) {
break;
}
}
if (commentStart != -1) {
// replicate whitespace before the comment + all the comments up to the last one:
localPointer = copyUpTo(localPointer, commentEnd);
}
// start of the old label
labelPos = tokenSequence.offset();
}
if (newTlabel != null && newTlabel.length() > 0) {
if (oldTLabel != null) {
// replicate the original whitespaces
localPointer = copyUpTo(localPointer, labelPos);
} else {
printer.print(" ");
}
printer.print(newTlabel);
}
if (oldTLabel != null) {
localPointer = labelPos + oldTLabel.length();
}
}
copyTo(localPointer, bounds[1]);
return bounds[1];
}
示例14: findAttributePos
import org.netbeans.api.lexer.Token; //导入方法依赖的package包/类
public int[] findAttributePos(FxNode node, String uri, String name, boolean value) {
NodeInfo ni = accessor.i(node);
if (!ni.isElement()) {
throw new IllegalArgumentException();
}
TokenSequence<XMLTokenId> seq = hierarchy.tokenSequence();
seq.move(ni.getStart());
int state = 0;
while (seq.moveNext()) {
Token<XMLTokenId> t = seq.token();
if (ni.isDefined(TextPositions.Position.ContentStart) &&
seq.offset() >= ni.getContentStart()) {
return null;
}
XMLTokenId id = t.id();
switch (id) {
case TAG:
if (t.text().charAt(0) == '>' || seq.offset() != ni.getStart()) {
// broken tag or something
return new int[] { ni.getStart(), ni.getContentStart() };
}
break;
case ARGUMENT: {
String n = t.text().toString();
int pos = n.indexOf(':');
// HACK HACK, FIXME - the namespace must be translated into
// the prefix, but I don't have prefixes in the model at this moment.
if (uri != null && pos == -1) {
break;
}
if (pos != -1) {
n = n.substring(pos + 1);
}
if (name.equals(n)) {
if (!value) {
return new int[] {
seq.offset(),
seq.offset() + t.length()
};
}
state = 1;
}
break;
}
case VALUE:
if (state != 1) {
break;
}
return new int[] {
seq.offset() + 1,
seq.offset() + t.length() + 1
};
}
}
return null;
}
示例15: readPIContent
import org.netbeans.api.lexer.Token; //导入方法依赖的package包/类
/**
* Scans the processing instruction, reads target and data if it present
*
* @param seq
*/
private void readPIContent(TokenSequence<XMLTokenId> seq) {
boolean cont = true;
Token<XMLTokenId> t;
while (cont && seq.moveNext()) {
t = seq.token();
XMLTokenId id = t.id();
switch (id) {
case TAG:
markUnclosed(seq.offset());
return;
// OK for tag content, not OK for PI
case ARGUMENT:
case OPERATOR:
case VALUE:
case CHARACTER:
case BLOCK_COMMENT:
case CDATA_SECTION:
case DECLARATION:
case TEXT:
case PI_START:
case ERROR:
markUnclosed(seq.offset());
return;
// not OK for tag
case PI_TARGET:
this.piTarget = t.text().toString();
break;
case PI_CONTENT:
this.piData = t.text().toString();
break;
case PI_END:
selfClosed = true;
finished = true;
tagEndOffset = seq.offset() + t.length();
return;
// this is OK for all
case WS:
break;
default:
markUnclosed(seq.offset());
return;
}
}
}