本文整理汇总了Java中org.fife.ui.rsyntaxtextarea.TokenMaker类的典型用法代码示例。如果您正苦于以下问题:Java TokenMaker类的具体用法?Java TokenMaker怎么用?Java TokenMaker使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
TokenMaker类属于org.fife.ui.rsyntaxtextarea包,在下文中一共展示了TokenMaker类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testEolComments_URL
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入依赖的package包/类
@Test
public void testEolComments_URL() {
String[] eolCommentLiterals = {
"// Hello world http://www.sas.com",
};
for (String code : eolCommentLiterals) {
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
Assert.assertEquals(TokenTypes.COMMENT_EOL, token.getType());
token = token.getNextToken();
Assert.assertTrue(token.isHyperlink());
Assert.assertEquals(TokenTypes.COMMENT_EOL, token.getType());
Assert.assertEquals("http://www.sas.com", token.getLexeme());
}
}
示例2: testOperators
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入依赖的package包/类
@Test
public void testOperators() {
String code = "^ @ : = < > + - / *";
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
String[] operators = code.split(" +");
for (int i = 0; i < operators.length; i++) {
Assert.assertEquals(operators[i], token.getLexeme());
Assert.assertEquals(TokenTypes.OPERATOR, token.getType());
if (i < operators.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue("Not a single space: " + token, token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
Assert.assertTrue(token.getType() == TokenTypes.NULL);
}
示例3: testJS_StringLiterals_valid
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入依赖的package包/类
@Test
public void testJS_StringLiterals_valid() {
String[] stringLiterals = {
"\"\"", "\"hi\"", "\"\\x77\"", "\"\\u00fe\"", "\"\\\"\"",
"\"My name is Robert and I \\", // String continued on another line
};
for (String code : stringLiterals) {
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, HTMLTokenMaker.INTERNAL_IN_JS, 0);
Assert.assertEquals(TokenTypes.LITERAL_STRING_DOUBLE_QUOTE, token.getType());
}
}
示例4: testOperators
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入依赖的package包/类
@Test
public void testOperators() {
String assignmentOperators = "+ - <= ^ ++ < * >= % -- > / != ? >> ! & == : >> ~ | && >>>";
String nonAssignmentOperators = "= -= *= /= |= &= ^= += %= <<= >>= >>>=";
String code = assignmentOperators + " " + nonAssignmentOperators;
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
String[] keywords = code.split(" +");
for (int i = 0; i < keywords.length; i++) {
Assert.assertEquals(keywords[i], token.getLexeme());
Assert.assertEquals(TokenTypes.OPERATOR, token.getType());
if (i < keywords.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue("Not a single space: " + token, token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
Assert.assertTrue(token.getType() == TokenTypes.NULL);
}
示例5: testJS_Functions
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入依赖的package包/类
@Test
public void testJS_Functions() {
String code = "eval parseInt parseFloat escape unescape isNaN isFinite";
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
String[] functions = code.split(" +");
for (int i = 0; i < functions.length; i++) {
Assert.assertEquals(functions[i], token.getLexeme());
Assert.assertEquals("Not a function token: " + token, TokenTypes.FUNCTION, token.getType());
if (i < functions.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue(token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
Assert.assertTrue(token.getType() == TokenTypes.NULL);
}
示例6: testJS_Operators
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入依赖的package包/类
@Test
public void testJS_Operators() {
String assignmentOperators = "+ - <= ^ ++ < * >= % -- > / != ? >> ! & == : >> ~ && >>>";
String nonAssignmentOperators = "= -= *= /= |= &= ^= += %= <<= >>= >>>=";
String code = assignmentOperators + " " + nonAssignmentOperators;
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
String[] keywords = code.split(" +");
for (int i = 0; i < keywords.length; i++) {
Assert.assertEquals(keywords[i], token.getLexeme());
Assert.assertEquals("Not an operator: " + token, TokenTypes.OPERATOR, token.getType());
if (i < keywords.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue("Not a single space: " + token, token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
Assert.assertTrue(token.getType() == TokenTypes.NULL);
}
示例7: testTS_CharLiterals_valid
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入依赖的package包/类
@Test
public void testTS_CharLiterals_valid() {
String[] charLiterals = {
"'a'", "'\\b'", "'\\t'", "'\\r'", "'\\f'", "'\\n'", "'\\u00fe'",
"'\\u00FE'", "'\\111'", "'\\222'", "'\\333'",
"'\\x77'",
"'\\11'", "'\\22'", "'\\33'",
"'\\1'",
"'My name is Robert and I \\", // Continued onto another line
};
for (String code : charLiterals) {
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TS_PREV_TOKEN_TYPE, 0);
Assert.assertEquals(TokenTypes.LITERAL_CHAR, token.getType());
}
}
示例8: testJS_StringLiterals_valid
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入依赖的package包/类
@Test
public void testJS_StringLiterals_valid() {
String[] stringLiterals = {
"\"\"", "\"hi\"", "\"\\u00fe\"", "\"\\\"\"",
};
for (String code : stringLiterals) {
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
Assert.assertEquals("Not identified as string literal: " + token,
TokenTypes.LITERAL_STRING_DOUBLE_QUOTE, token.getType());
}
}
示例9: testHexLiterals
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入依赖的package包/类
@Test
public void testHexLiterals() {
String[] hexLiterals = {
"0x1", "0xfe", "0x333333333333 ",
"0xf_e", "0x333_33_3", // Underscores
};
for (String code : hexLiterals) {
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
Assert.assertEquals("Invalid hex literal: " + token, TokenTypes.LITERAL_NUMBER_HEXADECIMAL, token.getType());
}
}
示例10: testHeredoc_EOT
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入依赖的package包/类
@Test
public void testHeredoc_EOT() {
// Note that the terminating "EOT" should be on another line in real
// Ruby scripts, but our lexer does not discern that.
String[] eofs = {
"<<EOT Hello world EOT",
"<< \"EOT\" Hello world EOT",
"<< \t\"EOT\" Hello world EOT",
"<< 'EOT' Hello world EOT",
"<< \t'EOT' Hello world EOT",
};
for (String code : eofs) {
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
Assert.assertTrue(token.is(TokenTypes.PREPROCESSOR, code));
}
}
示例11: testSeparators
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入依赖的package包/类
@Test
public void testSeparators() {
String code = "( ) [ ] { }";
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
String[] separators = code.split(" +");
for (int i = 0; i < separators.length; i++) {
Assert.assertEquals(separators[i], token.getLexeme());
Assert.assertEquals(TokenTypes.SEPARATOR, token.getType());
// Just one extra test here
Assert.assertTrue(token.isSingleChar(TokenTypes.SEPARATOR, separators[i].charAt(0)));
if (i < separators.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue("Not a single space: " + token, token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
Assert.assertTrue(token.getType() == TokenTypes.NULL);
}
示例12: testBinaryLiterals
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入依赖的package包/类
@Test
public void testBinaryLiterals() {
String code =
"0b0 0b1 0B0 0B1 0b010 0B010 0b0_10 0B0_10";
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
String[] keywords = code.split(" +");
for (int i = 0; i < keywords.length; i++) {
Assert.assertEquals(keywords[i], token.getLexeme());
Assert.assertEquals(TokenTypes.LITERAL_NUMBER_DECIMAL_INT, token.getType());
if (i < keywords.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue(token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
Assert.assertTrue(token.getType() == TokenTypes.NULL);
}
示例13: testTS_StringLiterals_valid
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入依赖的package包/类
@Test
public void testTS_StringLiterals_valid() {
String[] stringLiterals = {
"\"\"", "\"hi\"", "\"\\x77\"", "\"\\u00fe\"", "\"\\\"\"",
"\"My name is Robert and I \\", // String continued on another line
};
for (String code : stringLiterals) {
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TS_PREV_TOKEN_TYPE, 0);
Assert.assertEquals(TokenTypes.LITERAL_STRING_DOUBLE_QUOTE, token.getType());
}
}
示例14: testJS_DataTypes
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入依赖的package包/类
@Test
public void testJS_DataTypes() {
String code = "String Number int uint Boolean Null";
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
String[] keywords = code.split(" +");
for (int i = 0; i < keywords.length; i++) {
Assert.assertEquals(keywords[i], token.getLexeme());
Assert.assertEquals(TokenTypes.DATA_TYPE, token.getType());
if (i < keywords.length - 1) {
token = token.getNextToken();
Assert.assertTrue("Not a whitespace token: " + token, token.isWhitespace());
Assert.assertTrue(token.is(TokenTypes.WHITESPACE, " "));
}
token = token.getNextToken();
}
Assert.assertTrue(token.getType() == TokenTypes.NULL);
}
示例15: testLess_EolComments_URL
import org.fife.ui.rsyntaxtextarea.TokenMaker; //导入依赖的package包/类
@Test
public void testLess_EolComments_URL() {
String[] eolCommentLiterals = {
"// Hello world http://www.sas.com",
};
for (String code : eolCommentLiterals) {
Segment segment = createSegment(code);
TokenMaker tm = createTokenMaker();
Token token = tm.getTokenList(segment, TokenTypes.NULL, 0);
Assert.assertEquals(TokenTypes.COMMENT_EOL, token.getType());
token = token.getNextToken();
Assert.assertTrue(token.isHyperlink());
Assert.assertEquals(TokenTypes.COMMENT_EOL, token.getType());
Assert.assertEquals("http://www.sas.com", token.getLexeme());
}
}