本文整理汇总了Java中org.fife.ui.rsyntaxtextarea.Token类的典型用法代码示例。如果您正苦于以下问题:Java Token类的具体用法?Java Token怎么用?Java Token使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Token类属于org.fife.ui.rsyntaxtextarea包,在下文中一共展示了Token类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testGetTokenList_nullStart_Identifiers
import org.fife.ui.rsyntaxtextarea.Token; //导入依赖的package包/类
@Test
public void testGetTokenList_nullStart_Identifiers() {
String code = "foo foo_bar ";
code += ". , ;"; // "separators2"
Segment segment = createSegment(code);
UnixShellTokenMaker tm = new UnixShellTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
String[] keywords = code.split(" +");
for (int i = 0; i < keywords.length; i++) {
Assert.assertEquals(keywords[i], token.getLexeme());
Assert.assertEquals(TokenTypes.IDENTIFIER, token.getType());
if (i < keywords.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue(token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
Assert.assertTrue(token.getType() == TokenTypes.NULL);
}
示例2: testJS_CharLiterals_valid
import org.fife.ui.rsyntaxtextarea.Token; //导入依赖的package包/类
@Test
public void testJS_CharLiterals_valid() {
String[] charLiterals = {
"'a'", "'\\b'", "'\\t'", "'\\r'", "'\\f'", "'\\n'", "'\\u00fe'",
"'\\u00FE'", "'\\111'", "'\\222'", "'\\333'",
"'\\x77'",
"'\\11'", "'\\22'", "'\\33'",
"'\\1'",
"'My name is Robert and I \\", // Continued onto another line
};
for (String code : charLiterals) {
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, HTMLTokenMaker.INTERNAL_IN_JS, 0);
Assert.assertEquals(TokenTypes.LITERAL_CHAR, token.getType());
}
}
示例3: testSeparators
import org.fife.ui.rsyntaxtextarea.Token; //导入依赖的package包/类
@Test
public void testSeparators() {
String code = "( ) [ ] { }";
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
String[] separators = code.split(" +");
for (int i = 0; i < separators.length; i++) {
Assert.assertEquals(separators[i], token.getLexeme());
Assert.assertEquals(TokenTypes.SEPARATOR, token.getType());
// Just one extra test here
Assert.assertTrue(token.isSingleChar(TokenTypes.SEPARATOR, separators[i].charAt(0)));
if (i < separators.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue("Not a single space: " + token, token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
Assert.assertTrue(token.getType() == TokenTypes.NULL);
}
示例4: testStringLiterals_errors
import org.fife.ui.rsyntaxtextarea.Token; //导入依赖的package包/类
@Test
public void testStringLiterals_errors() {
String[] stringLiterals = {
"\"foo \\x bar\"",
"\"foo unterminated string",
};
for (String code : stringLiterals) {
Segment segment = createSegment(code);
JsonTokenMaker tm = new JsonTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
Assert.assertEquals("Invalid error-string: " + token, TokenTypes.ERROR_STRING_DOUBLE, token.getType());
}
}
示例5: testJS_BooleanLiterals
import org.fife.ui.rsyntaxtextarea.Token; //导入依赖的package包/类
@Test
public void testJS_BooleanLiterals() {
String code = "true false";
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, HTMLTokenMaker.INTERNAL_IN_JS, 0);
String[] keywords = code.split(" +");
for (int i = 0; i < keywords.length; i++) {
Assert.assertEquals(keywords[i], token.getLexeme());
Assert.assertEquals(TokenTypes.LITERAL_BOOLEAN, token.getType());
if (i < keywords.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue(token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
}
示例6: getTagCloseInfo
import org.fife.ui.rsyntaxtextarea.Token; //导入依赖的package包/类
/**
* Grabs the token representing the closing of a tag (i.e.
* "<code>></code>" or "<code>/></code>"). This should only be
* called after a tag name has been parsed, to ensure the "closing" of
* other tags is not identified.
*
* @param tagNameToken The token denoting the name of the tag.
* @param textArea The text area whose contents are being parsed.
* @param line The line we're currently on.
* @param info On return, information about the closing of the tag is
* returned in this object.
* @return The line number of the closing tag token.
*/
private int getTagCloseInfo(Token tagNameToken, RSyntaxTextArea textArea,
int line, TagCloseInfo info) {
info.reset();
Token t = tagNameToken.getNextToken();
do {
while (t!=null && t.getType()!=Token.MARKUP_TAG_DELIMITER) {
t = t.getNextToken();
}
if (t!=null) {
info.closeToken = t;
info.line = line;
break;
}
} while (++line<textArea.getLineCount() &&
(t=textArea.getTokenListForLine(line))!=null);
return line;
}
示例7: testObjectClassMethodAdditions
import org.fife.ui.rsyntaxtextarea.Token; //导入依赖的package包/类
@Test
public void testObjectClassMethodAdditions() {
String[] additions = {
"addShutdownHook", "any", "asBoolean", "asType", "collect", "dump",
"each", "eachWithIndex", "every", "find", "findAll", "findIndexOf",
"findIndexValues", "findLastIndexOf", "getAt", "getMetaClass",
"getMetaPropertyValues", "getProperties", "grep", "hasProperty",
"identity", "inject", "inspect", "invokeMethod", "is", "isCase",
"iterator", "metaClass", "print", "printf", "println", "putAt",
"respondsTo", "setMetaClass", "sleep", "split", "sprintf",
"toString", "use", "with",
};
for (String code : additions) {
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
Assert.assertTrue(token.is(TokenTypes.FUNCTION, code));
}
}
示例8: testXML_comment_URL
import org.fife.ui.rsyntaxtextarea.Token; //导入依赖的package包/类
@Test
public void testXML_comment_URL() {
String code = "<!-- Hello world http://www.google.com -->";
Segment segment = createSegment(code);
XMLTokenMaker tm = new XMLTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
Assert.assertFalse(token.isHyperlink());
Assert.assertTrue("Token is not type MARKUP_COMMENT: " + token,
token.is(TokenTypes.MARKUP_COMMENT, "<!-- Hello world "));
token = token.getNextToken();
Assert.assertTrue(token.isHyperlink());
Assert.assertTrue(token.is(TokenTypes.MARKUP_COMMENT, "http://www.google.com"));
token = token.getNextToken();
Assert.assertFalse(token.isHyperlink());
Assert.assertTrue(token.is(TokenTypes.MARKUP_COMMENT, " -->"));
}
示例9: testHexLiterals
import org.fife.ui.rsyntaxtextarea.Token; //导入依赖的package包/类
@Test
public void testHexLiterals() {
String code = "0x1 0xfe 0x333333333333 0X1 0Xfe 0X33333333333 0xFE 0XFE " +
"0x1l 0xfel 0x333333333333l 0X1l 0Xfel 0X33333333333l 0xFEl 0XFEl " +
"0x1L 0xfeL 0x333333333333L 0X1L 0XfeL 0X33333333333L 0xFEL 0XFEL";
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
String[] keywords = code.split(" +");
for (int i = 0; i < keywords.length; i++) {
Assert.assertEquals(keywords[i], token.getLexeme());
Assert.assertEquals("Invalid hex literal: " + token, TokenTypes.LITERAL_NUMBER_HEXADECIMAL, token.getType());
if (i < keywords.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue(token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
Assert.assertTrue(token.getType() == TokenTypes.NULL);
}
示例10: testCharLiterals
import org.fife.ui.rsyntaxtextarea.Token; //导入依赖的package包/类
@Test
public void testCharLiterals() {
String[] chars = {
"'Hello world'",
"'Hello world", // Unterminated char literals not flagged as errors yet
"'Hello \\q world'", // Any escapes are ignored
"''",
};
for (String code : chars) {
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
Assert.assertEquals(TokenTypes.LITERAL_CHAR, token.getType());
}
}
示例11: testFunctions
import org.fife.ui.rsyntaxtextarea.Token; //导入依赖的package包/类
@Test
public void testFunctions() {
String[] functions = {
"Array", "Float", "Integer", "String", "at_exit", "autoload",
"binding", "caller", "catch", "chop", "chop!", "chomp", "chomp!",
"eval", "exec", "exit", "exit!", "fail", "fork", "format", "gets",
"global_variables", "gsub", "gsub!", "iterator?", "lambda", "load",
"local_variables", "loop", "open", "p", "print", "printf", "proc",
"putc", "puts", "raise", "rand", "readline", "readlines", "require",
"select", "sleep", "split", "sprintf", "srand", "sub", "sub!",
"syscall", "system", "test", "trace_var", "trap", "untrace_var",
};
for (String code : functions) {
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
Assert.assertEquals(TokenTypes.FUNCTION, token.getType());
}
}
示例12: testMxml_comment_URL
import org.fife.ui.rsyntaxtextarea.Token; //导入依赖的package包/类
@Test
public void testMxml_comment_URL() {
String code = "<!-- Hello world http://www.google.com -->";
Segment segment = createSegment(code);
MxmlTokenMaker tm = new MxmlTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
Assert.assertFalse(token.isHyperlink());
Assert.assertTrue("Token is not type MARKUP_COMMENT: " + token,
token.is(TokenTypes.MARKUP_COMMENT, "<!-- Hello world "));
token = token.getNextToken();
Assert.assertTrue(token.isHyperlink());
Assert.assertTrue(token.is(TokenTypes.MARKUP_COMMENT, "http://www.google.com"));
token = token.getNextToken();
Assert.assertFalse(token.isHyperlink());
Assert.assertTrue(token.is(TokenTypes.MARKUP_COMMENT, " -->"));
}
示例13: testJS_Functions
import org.fife.ui.rsyntaxtextarea.Token; //导入依赖的package包/类
@Test
public void testJS_Functions() {
String code = "eval parseInt parseFloat escape unescape isNaN isFinite";
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
String[] functions = code.split(" +");
for (int i = 0; i < functions.length; i++) {
Assert.assertEquals(functions[i], token.getLexeme());
Assert.assertEquals("Not a function token: " + token, TokenTypes.FUNCTION, token.getType());
if (i < functions.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue(token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
Assert.assertTrue(token.getType() == TokenTypes.NULL);
}
示例14: testJS_BooleanLiterals
import org.fife.ui.rsyntaxtextarea.Token; //导入依赖的package包/类
@Test
public void testJS_BooleanLiterals() {
String code = "true false";
Segment segment = createSegment(code);
JSPTokenMaker tm = new JSPTokenMaker();
Token token = tm.getTokenList(segment, JSPTokenMaker.INTERNAL_IN_JS, 0);
String[] keywords = code.split(" +");
for (int i = 0; i < keywords.length; i++) {
Assert.assertEquals(keywords[i], token.getLexeme());
Assert.assertEquals(TokenTypes.LITERAL_BOOLEAN, token.getType());
if (i < keywords.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue(token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
}
示例15: testJS_HexLiterals
import org.fife.ui.rsyntaxtextarea.Token; //导入依赖的package包/类
@Test
public void testJS_HexLiterals() {
String code = "0x1 0xfe 0x333333333333 0X1 0Xfe 0X33333333333 0xFE 0XFE " +
"0x1l 0xfel 0x333333333333l 0X1l 0Xfel 0X33333333333l 0xFEl 0XFEl " +
"0x1L 0xfeL 0x333333333333L 0X1L 0XfeL 0X33333333333L 0xFEL 0XFEL ";
Segment segment = createSegment(code);
JSPTokenMaker tm = new JSPTokenMaker();
Token token = tm.getTokenList(segment,
JSPTokenMaker.INTERNAL_IN_JS, 0);
String[] literals = code.split(" +");
for (int i = 0; i < literals.length; i++) {
Assert.assertEquals("Not expected hex literal: " + token, literals[i], token.getLexeme());
Assert.assertEquals("Not a hex literal: " + token, TokenTypes.LITERAL_NUMBER_HEXADECIMAL, token.getType());
if (i < literals.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue(token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
}