本文整理汇总了Java中org.fife.ui.rsyntaxtextarea.TokenMaker.getTokenList方法的典型用法代码示例。如果您正苦于以下问题:Java TokenMaker.getTokenList方法的具体用法?Java TokenMaker.getTokenList怎么用?Java TokenMaker.getTokenList使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.fife.ui.rsyntaxtextarea.TokenMaker
的用法示例。
在下文中一共展示了TokenMaker.getTokenList方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testTS_BooleanLiterals
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入方法依赖的package包/类
@Test
public void testTS_BooleanLiterals() {
String code = "true false";
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TS_PREV_TOKEN_TYPE, 0);
String[] keywords = code.split(" +");
for (int i = 0; i < keywords.length; i++) {
Assert.assertEquals(keywords[i], token.getLexeme());
Assert.assertEquals(TokenTypes.LITERAL_BOOLEAN, token.getType());
if (i < keywords.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue(token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
Assert.assertTrue(token.getType() == TokenTypes.NULL);
}
示例2: testLess_EolComments_URL
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入方法依赖的package包/类
@Test
public void testLess_EolComments_URL() {
String[] eolCommentLiterals = {
"// Hello world http://www.sas.com",
};
for (String code : eolCommentLiterals) {
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
Assert.assertEquals(TokenTypes.COMMENT_EOL, token.getType());
token = token.getNextToken();
Assert.assertTrue(token.isHyperlink());
Assert.assertEquals(TokenTypes.COMMENT_EOL, token.getType());
Assert.assertEquals("http://www.sas.com", token.getLexeme());
}
}
示例3: testJS_BooleanLiterals
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入方法依赖的package包/类
@Test
public void testJS_BooleanLiterals() {
String code = "true false";
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
String[] keywords = code.split(" +");
for (int i = 0; i < keywords.length; i++) {
Assert.assertEquals(keywords[i], token.getLexeme());
Assert.assertEquals(TokenTypes.LITERAL_BOOLEAN, token.getType());
if (i < keywords.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue(token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
Assert.assertTrue(token.getType() == TokenTypes.NULL);
}
示例4: testHexLiterals
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入方法依赖的package包/类
@Test
public void testHexLiterals() {
String[] hexLiterals = {
"0x1", "0xfe", "0x333333333333 ",
"0xf_e", "0x333_33_3", // Underscores
};
for (String code : hexLiterals) {
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
Assert.assertEquals("Invalid hex literal: " + token, TokenTypes.LITERAL_NUMBER_HEXADECIMAL, token.getType());
}
}
示例5: testHexLiterals
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入方法依赖的package包/类
@Test
public void testHexLiterals() {
String code = "0x1 0xf 0x333333333333 0X1 0Xf 0X33333333333 0xF 0XF " +
"0x1 0xf 0x333333333333 0X1 0Xf 0X33333333333 0xF 0XF";
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
String[] keywords = code.split(" +");
for (int i = 0; i < keywords.length; i++) {
Assert.assertEquals(keywords[i], token.getLexeme());
Assert.assertEquals("Invalid hex literal: " + token, TokenTypes.LITERAL_NUMBER_HEXADECIMAL, token.getType());
if (i < keywords.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue(token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
Assert.assertTrue(token.getType() == TokenTypes.NULL);
}
示例6: testOperators
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入方法依赖的package包/类
@Test
public void testOperators() {
String assignmentOperators = "+ - <= ^ ++ < * >= % -- > / != ? >> ! & == : >> ~ | &&";
String nonAssignmentOperators = "= -= *= /= |= &= ^= += %= <<= >>=";
String code = assignmentOperators + " " + nonAssignmentOperators;
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
String[] keywords = code.split(" +");
for (int i = 0; i < keywords.length; i++) {
Assert.assertEquals(keywords[i], token.getLexeme());
Assert.assertEquals(TokenTypes.OPERATOR, token.getType());
if (i < keywords.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue("Not a single space: " + token, token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
Assert.assertTrue(token.getType() == TokenTypes.NULL);
}
示例7: testEolComments_URL
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入方法依赖的package包/类
@Test
public void testEolComments_URL() {
String[] eolCommentLiterals = {
"// Hello world http://www.sas.com",
};
for (String code : eolCommentLiterals) {
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
Assert.assertEquals(TokenTypes.COMMENT_EOL, token.getType());
token = token.getNextToken();
Assert.assertTrue(token.isHyperlink());
Assert.assertEquals(TokenTypes.COMMENT_EOL, token.getType());
Assert.assertEquals("http://www.sas.com", token.getLexeme());
}
}
示例8: testJS_StringLiterals_valid
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入方法依赖的package包/类
@Test
public void testJS_StringLiterals_valid() {
String[] stringLiterals = {
"\"\"", "\"hi\"", "\"\\u00fe\"", "\"\\\"\"",
};
for (String code : stringLiterals) {
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
Assert.assertEquals("Not identified as string literal: " + token,
TokenTypes.LITERAL_STRING_DOUBLE_QUOTE, token.getType());
}
}
示例9: testSeparators
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入方法依赖的package包/类
@Test
public void testSeparators() {
String code = "( ) [ ] { }";
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
String[] separators = code.split(" +");
for (int i = 0; i < separators.length; i++) {
Assert.assertEquals(separators[i], token.getLexeme());
Assert.assertEquals(TokenTypes.SEPARATOR, token.getType());
// Just one extra test here
Assert.assertTrue(token.isSingleChar(TokenTypes.SEPARATOR, separators[i].charAt(0)));
if (i < separators.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue("Not a single space: " + token, token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
Assert.assertTrue(token.getType() == TokenTypes.NULL);
}
示例10: testJS_Keywords
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入方法依赖的package包/类
@Test
public void testJS_Keywords() {
String code = "break case catch class const continue " +
"debugger default delete do else export extends finally for function if " +
"import in instanceof let new super switch " +
"this throw try typeof void while with " +
"NaN Infinity " +
"let"; // As of 1.7, which is our default version
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, HTMLTokenMaker.INTERNAL_IN_JS, 0);
String[] keywords = code.split(" +");
for (int i = 0; i < keywords.length; i++) {
Assert.assertEquals(keywords[i], token.getLexeme());
Assert.assertEquals("Not a keyword token: " + token, TokenTypes.RESERVED_WORD, token.getType());
if (i < keywords.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue(token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
segment = createSegment("return");
token = tm.getTokenList(segment, HTMLTokenMaker.INTERNAL_IN_JS, 0);
Assert.assertEquals("return", token.getLexeme());
Assert.assertEquals(TokenTypes.RESERVED_WORD_2, token.getType());
}
示例11: testMultiLineComments
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入方法依赖的package包/类
@Test
public void testMultiLineComments() {
String[] mlcLiterals = {
"/* Hello world */",
};
for (String code : mlcLiterals) {
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
Assert.assertEquals(TokenTypes.COMMENT_MULTILINE, token.getType());
}
}
示例12: testCss_id
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入方法依赖的package包/类
@Test
public void testCss_id() {
String code = "#mainContent";
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, CSS_PREV_TOKEN_TYPE, 0);
Assert.assertTrue(token.is(TokenTypes.ANNOTATION, "#mainContent"));
}
示例13: testEolComments
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入方法依赖的package包/类
@Test
public void testEolComments() {
String[] eolCommentLiterals = {
"// Hello world",
};
for (String code : eolCommentLiterals) {
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
Assert.assertEquals(TokenTypes.COMMENT_EOL, token.getType());
}
}
示例14: testJS_FloatingPointLiterals
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入方法依赖的package包/类
@Test
public void testJS_FloatingPointLiterals() {
String code =
// Basic doubles
"3.0 4.2 3.0 4.2 .111 " +
// Basic floats ending in f, F, d, or D
"3f 3F 3d 3D 3.f 3.F 3.d 3.D 3.0f 3.0F 3.0d 3.0D .111f .111F .111d .111D " +
// lower-case exponent, no sign
"3e7f 3e7F 3e7d 3e7D 3.e7f 3.e7F 3.e7d 3.e7D 3.0e7f 3.0e7F 3.0e7d 3.0e7D .111e7f .111e7F .111e7d .111e7D " +
// Upper-case exponent, no sign
"3E7f 3E7F 3E7d 3E7D 3.E7f 3.E7F 3.E7d 3.E7D 3.0E7f 3.0E7F 3.0E7d 3.0E7D .111E7f .111E7F .111E7d .111E7D " +
// Lower-case exponent, positive
"3e+7f 3e+7F 3e+7d 3e+7D 3.e+7f 3.e+7F 3.e+7d 3.e+7D 3.0e+7f 3.0e+7F 3.0e+7d 3.0e+7D .111e+7f .111e+7F .111e+7d .111e+7D " +
// Upper-case exponent, positive
"3E+7f 3E+7F 3E+7d 3E+7D 3.E+7f 3.E+7F 3.E+7d 3.E+7D 3.0E+7f 3.0E+7F 3.0E+7d 3.0E+7D .111E+7f .111E+7F .111E+7d .111E+7D " +
// Lower-case exponent, negative
"3e-7f 3e-7F 3e-7d 3e-7D 3.e-7f 3.e-7F 3.e-7d 3.e-7D 3.0e-7f 3.0e-7F 3.0e-7d 3.0e-7D .111e-7f .111e-7F .111e-7d .111e-7D " +
// Upper-case exponent, negative
"3E-7f 3E-7F 3E-7d 3E-7D 3.E-7f 3.E-7F 3.E-7d 3.E-7D 3.0E-7f 3.0E-7F 3.0E-7d 3.0E-7D .111E-7f .111E-7F .111E-7d .111E-7D";
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, HTMLTokenMaker.INTERNAL_IN_JS, 0);
String[] keywords = code.split(" +");
for (int i = 0; i < keywords.length; i++) {
Assert.assertEquals(keywords[i], token.getLexeme());
Assert.assertEquals(TokenTypes.LITERAL_NUMBER_FLOAT, token.getType());
if (i < keywords.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue(token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
}
示例15: testHexLiterals
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入方法依赖的package包/类
@Test
public void testHexLiterals() {
String code = "0x1 0xfe 0x333333333333 0X1 0Xfe 0X33333333333 0xFE 0XFE " +
"0x1l 0xfel 0x333333333333l 0X1l 0Xfel 0X33333333333l 0xFEl 0XFEl " +
"0x1L 0xfeL 0x333333333333L 0X1L 0XfeL 0X33333333333L 0xFEL 0XFEL " +
"0x1u 0xfeu 0x333333333333u 0X1u 0Xfeu 0X33333333333u 0xFEu 0XFEu " +
"0x1U 0xfeU 0x333333333333U 0X1U 0XfeU 0X33333333333U 0xFEU 0XFEU " +
"0x1lu 0xfelu 0x333333333333lu 0X1lu 0Xfelu 0X33333333333lu 0xFElu 0XFElu " +
"0x1LU 0xfeLU 0x333333333333LU 0X1LU 0XfeLU 0X33333333333LU 0xFELU 0XFELU " +
"0x1ul 0xfeul 0x333333333333ul 0X1ul 0Xfeul 0X33333333333ul 0xFEul 0XFEul " +
"0x1UL 0xfeUL 0x333333333333UL 0X1UL 0XfeUL 0X33333333333UL 0xFEUL 0XFEUL";
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
String[] keywords = code.split(" +");
for (int i = 0; i < keywords.length; i++) {
Assert.assertEquals(keywords[i], token.getLexeme());
Assert.assertEquals("Invalid hex literal: " + token, TokenTypes.LITERAL_NUMBER_HEXADECIMAL, token.getType());
if (i < keywords.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue(token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
Assert.assertTrue(token.getType() == TokenTypes.NULL);
}