本文整理汇总了Java中com.sonar.sslr.api.GenericTokenType类的典型用法代码示例。如果您正苦于以下问题:Java GenericTokenType类的具体用法?Java GenericTokenType怎么用?Java GenericTokenType使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
GenericTokenType类属于com.sonar.sslr.api包,在下文中一共展示了GenericTokenType类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: visitToken
import com.sonar.sslr.api.GenericTokenType; //导入依赖的package包/类
@Override
public void visitToken(Token token) {
if (token.getType().equals(GenericTokenType.EOF)) {
return;
}
if(token.getType() == GenericTokenType.UNKNOWN_CHAR){
linesOfCode.add(token.getLine());
}
if(token.getType() == GenericTokenType.COMMENT){
linesOfComments.add(token.getLine());
}
}
示例2: startElement
import com.sonar.sslr.api.GenericTokenType; //导入依赖的package包/类
public void startElement(String uri, String name, String qName, Attributes atts) {
int line = locator.getLineNumber();
int column = locator.getColumnNumber();
if(FlowLexer.FlowTypes.isInEnum("START_" + name.toUpperCase())){
logger.debug("Start element: " + qName + "[" + line + "," + column + "]" + "[START_" + name.toUpperCase() + "]");
Token token = tokenBuilder.setType(FlowLexer.FlowTypes.valueOf("START_" + name.toUpperCase())).setValueAndOriginalValue(name.toUpperCase(),name)
.setURI(lex.getURI()).setLine(line).setColumn(0).build();
lex.addToken(token);
// CHECK THE ATTRIBUTES
for(int i=0; i<atts.getLength();i++){
if(FlowLexer.FlowAttTypes.isInEnum(atts.getQName(i).toUpperCase())){
token = tokenBuilder.setType(FlowLexer.FlowAttTypes.getEnum(atts.getQName(i).toUpperCase())).setValueAndOriginalValue(atts.getValue(i).toUpperCase(),atts.getValue(i))
.setURI(lex.getURI()).setLine(line).setColumn(0).build();
lex.addToken(token);
logger.debug("TOKEN " + token.getValue() + "[" + token.getLine() + "," + token.getColumn() + "]");
}else{
token = tokenBuilder.setType(GenericTokenType.IDENTIFIER).setValueAndOriginalValue(atts.getQName(i))
.setURI(lex.getURI()).setLine(line).setColumn(0).build();
lex.addToken(token);
logger.debug("IDENTIFIER " + token.getValue() + "[" + token.getLine() + "," + token.getColumn() + "]");
token = tokenBuilder.setType(GenericTokenType.LITERAL).setValueAndOriginalValue(atts.getValue(i))
.setURI(lex.getURI()).setLine(line).setColumn(0).build();
lex.addToken(token);
logger.debug("LITERAL " + token.getValue() + "[" + token.getLine() + "," + token.getColumn() + "]");
}
}
}
}
示例3: afterParse
import com.sonar.sslr.api.GenericTokenType; //导入依赖的package包/类
@Override
public void afterParse(final PostParseEvent<Token.Builder> event)
{
final int length = reader.length();
/*
* We want a match
*/
final ParsingResult<Token.Builder> result = event.getResult();
if (!result.isSuccess())
throw new GrappaException("match failure (consumed: "
+ consumed + " out of " + length + ')');
/*
* Check that we did consume all the text
*/
if (consumed != length)
throw new GrappaException("was expecting to fully match, but only "
+ consumed + " chars were matched out of " + length);
final ValueStack<Token.Builder> stack = result.getValueStack();
final URI uri = lexer.getURI();
final int size = stack.size();
Token token;
for (int index = size - 1; index >= 0; index--) {
token = stack.peek(index).setURI(uri).build();
if (token.getType() == GenericTokenType.COMMENT)
lexer.addTrivia(Trivia.createComment(token));
else
lexer.addToken(token);
}
}
示例4: createTerminal
import com.sonar.sslr.api.GenericTokenType; //导入依赖的package包/类
@Override
public Object createTerminal(Input input, int startIndex, int endIndex, List<Trivia> trivias, TokenType type) {
char[] fileChars = input.input();
boolean hasByteOrderMark = fileChars.length > 0 && fileChars[0] == BYTE_ORDER_MARK;
boolean isEof = GenericTokenType.EOF.equals(type);
LineColumnValue lineColumnValue = tokenPosition(input, startIndex, endIndex);
return new InternalSyntaxToken(
lineColumnValue.line,
column(hasByteOrderMark, lineColumnValue.line, lineColumnValue.column),
lineColumnValue.value,
isEof,
isByteOrderMark(input, startIndex, endIndex));
}
示例5: visitToken
import com.sonar.sslr.api.GenericTokenType; //导入依赖的package包/类
public void visitToken(Token token) {
if (token.getType().equals(GenericTokenType.EOF)) {
return;
}
linesOfCode.add(token.getLine());
List<Trivia> trivias = token.getTrivia();
for (Trivia trivia : trivias) {
if (trivia.isComment()) {
linesOfComments.add(trivia.getToken().getLine());
}
}
}
示例6: visitToken
import com.sonar.sslr.api.GenericTokenType; //导入依赖的package包/类
@Override
public void visitToken(Token token) {
if (token.getType().equals(GenericTokenType.EOF)) {
return;
}
linesOfCode.add(token.getLine());
List<Trivia> trivias = token.getTrivia();
for (Trivia trivia : trivias) {
if (trivia.isComment()) {
linesOfComments.add(trivia.getToken().getLine());
}
}
}
示例7: create
import com.sonar.sslr.api.GenericTokenType; //导入依赖的package包/类
public static Lexer create(LuaConfiguration conf) {
return Lexer.builder()
.withCharset(conf.getCharset())
.withFailIfNoChannelToConsumeOneCharacter(true)
.withChannel(new BomCharacterChannel())
.withChannel(new BlackHoleChannel("\\s++"))
// Comments
.withChannel(commentRegexp("//[^\\n\\r]*+"))
.withChannel(commentRegexp("/\\*[\\s\\S]*?\\*/"))
// String Literals
.withChannel(regexp(GenericTokenType.LITERAL, "\"([^\"\\\\]*+(\\\\[\\s\\S])?+)*+\""))
.withChannel(regexp(GenericTokenType.LITERAL, "\'([^\'\\\\]*+(\\\\[\\s\\S])?+)*+\'"))
// Regular Expression Literal
.withChannel(new LuaRegularExpressionLiteralChannel())
// Numbers
.withChannel(regexp(LuaTokenType.NUMERIC_LITERAL, "0[xX][0-9a-fA-F]++"))
.withChannel(regexp(LuaTokenType.NUMERIC_LITERAL, "[0-9]++\\.([0-9]++)?+" + EXP + "?+"))
.withChannel(regexp(LuaTokenType.NUMERIC_LITERAL, "\\.[0-9]++" + EXP + "?+"))
.withChannel(regexp(LuaTokenType.NUMERIC_LITERAL, "[0-9]++" + EXP + "?+"))
.withChannel(new IdentifierAndKeywordChannel("\\p{javaJavaIdentifierStart}++\\p{javaJavaIdentifierPart}*+", true, LuaKeyword.values()))
.withChannel(new PunctuatorChannel(LuaPunctuator.values()))
.withChannel(new UnknownCharacterChannel())
.build();
}
示例8: visitFile
import com.sonar.sslr.api.GenericTokenType; //导入依赖的package包/类
@Override
public void visitFile(@Nullable AstNode astNode) {
NewHighlighting highlighting = context.newHighlighting();
File file = getContext().getFile();
InputFile inputFile = context.fileSystem().inputFile(context.fileSystem().predicates().is(file));
highlighting.onFile(inputFile);
NewCpdTokens cpdTokens = context.newCpdTokens();
cpdTokens.onFile(inputFile);
Iterator<Token> iterator = lexer.lex(file).iterator();
while (iterator.hasNext()) {
Token token = iterator.next();
TokenType tokenType = token.getType();
if (!tokenType.equals(GenericTokenType.EOF)) {
TokenLocation tokenLocation = new TokenLocation(token);
cpdTokens.addToken(tokenLocation.startLine(), tokenLocation.startCharacter(), tokenLocation.endLine(), tokenLocation.endCharacter(), getTokenImage(token));
}
if (tokenType.equals(LuaTokenType.NUMBER)) {
highlight(highlighting, token, TypeOfText.CONSTANT);
} else if (tokenType.equals(GenericTokenType.LITERAL)) {
highlight(highlighting, token, TypeOfText.STRING);
} else if (KEYWORDS.contains(tokenType)) {
highlight(highlighting, token, TypeOfText.KEYWORD);
}
for (Trivia trivia : token.getTrivia()) {
highlight(highlighting, trivia.getToken(), TypeOfText.COMMENT);
}
}
highlighting.save();
cpdTokens.save();
}
示例9: getTokenImage
import com.sonar.sslr.api.GenericTokenType; //导入依赖的package包/类
private static String getTokenImage(Token token) {
if (token.getType().equals(GenericTokenType.LITERAL)) {
return NORMALIZED_CHARACTER_LITERAL;
} else if (token.getType().equals(LuaTokenType.NUMBER)) {
return NORMALIZED_NUMERIC_LITERAL;
}
return token.getValue();
}
示例10: getTokenImage
import com.sonar.sslr.api.GenericTokenType; //导入依赖的package包/类
private String getTokenImage(Token token) {
if (token.getType() == GenericTokenType.LITERAL) {
return GenericTokenType.LITERAL.getValue();
}
return token.getValue();
}
示例11: init
import com.sonar.sslr.api.GenericTokenType; //导入依赖的package包/类
@Override
public void init() {
subscribeTo(GenericTokenType.EOF);
}
示例12: create
import com.sonar.sslr.api.GenericTokenType; //导入依赖的package包/类
public static Lexer create(PlSQLConfiguration conf) {
return Lexer.builder()
.withCharset(conf.getCharset())
.withFailIfNoChannelToConsumeOneCharacter(true)
// Channels, which consumes more frequently should come first.
// Whitespace character occurs more frequently than any other, and thus come first:
.withChannel(new BlackHoleChannel("[" + LINE_TERMINATOR + WHITESPACE + "]++"))
// Comments
.withChannel(commentRegexp(COMMENT))
// String Literals
.withChannel(regexp(GenericTokenType.LITERAL, LITERAL))
// Regular Expression Literals
//TODO: check if required .withChannel(new PlSQLRegexpChannel())
.withChannel(regexp(PlSQLTokenType.NUMERIC_LITERAL, NUMERIC_LITERAL))
.withChannel(new IdentifierAndKeywordChannel(IDENTIFIER, false, PlSQLKeyword.values()))
.withChannel(new PunctuatorChannel(PlSQLPunctuator.values()))
.withChannel(new UnknownCharacterChannel(true))
.build();
}