本文整理汇总了Java中org.antlr.v4.runtime.Lexer类的典型用法代码示例。如果您正苦于以下问题:Java Lexer类的具体用法?Java Lexer怎么用?Java Lexer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Lexer类属于org.antlr.v4.runtime包,在下文中一共展示了Lexer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: nextToken
import org.antlr.v4.runtime.Lexer; //导入依赖的package包/类
@Override
public Token nextToken() {
if (stashedNext != null) {
previous = stashedNext;
stashedNext = null;
return previous;
}
Token next = super.nextToken();
if (insertSemicolon(previous, next)) {
stashedNext = next;
previous = _factory.create(new Pair<TokenSource, CharStream>(this, _input), PainlessLexer.SEMICOLON, ";",
Lexer.DEFAULT_TOKEN_CHANNEL, next.getStartIndex(), next.getStopIndex(), next.getLine(), next.getCharPositionInLine());
return previous;
} else {
previous = next;
return next;
}
}
示例2: makeBasicParser
import org.antlr.v4.runtime.Lexer; //导入依赖的package包/类
private static <T extends Parser> T makeBasicParser(Class<T> parserClass,
ANTLRInputStream stream, String parsedObjectName, List<AntlrError> errors) {
Lexer lexer;
Parser parser;
if (parserClass.isAssignableFrom(SQLParser.class)) {
lexer = new SQLLexer(stream);
parser = new SQLParser(new CommonTokenStream(lexer));
} else if (parserClass.isAssignableFrom(IgnoreListParser.class)) {
lexer = new IgnoreListLexer(stream);
parser = new IgnoreListParser(new CommonTokenStream(lexer));
} else {
throw new IllegalArgumentException("Unknown parser class: " + parserClass);
}
CustomAntlrErrorListener err = new CustomAntlrErrorListener(parsedObjectName, errors);
lexer.removeErrorListeners();
lexer.addErrorListener(err);
parser.removeErrorListeners();
parser.addErrorListener(err);
return parserClass.cast(parser);
}
示例3: newParser
import org.antlr.v4.runtime.Lexer; //导入依赖的package包/类
public static <L extends Lexer, P extends Parser> P newParser(
Function<CharStream, L> lexerFactory,
Function<TokenStream, P> parserFactory,
String input,
boolean useBailErrorStrategy,
boolean removeErrorListeners) {
CharStream charStream = new ANTLRInputStream(input);
L lexer = lexerFactory.apply(charStream);
if (removeErrorListeners) {
lexer.removeErrorListeners();
}
TokenStream tokenStream = new CommonTokenStream(lexer);
P parser = parserFactory.apply(tokenStream);
if (useBailErrorStrategy) {
parser.setErrorHandler(new BailErrorStrategy());
}
if (removeErrorListeners) {
parser.removeErrorListeners();
}
return parser;
}
示例4: LangDescriptor
import org.antlr.v4.runtime.Lexer; //导入依赖的package包/类
public LangDescriptor(String name,
String corpusDir,
String fileRegex,
Class<? extends Lexer> lexerClass,
Class<? extends Parser> parserClass,
String startRuleName,
int indentSize,
int singleLineCommentType)
{
this.name = name;
this.corpusDir = corpusDir;
this.fileRegex = fileRegex;
this.lexerClass = lexerClass;
this.parserClass = parserClass;
this.startRuleName = startRuleName;
this.indentSize = indentSize;
this.singleLineCommentType = singleLineCommentType;
}
示例5: getTokenName
import org.antlr.v4.runtime.Lexer; //导入依赖的package包/类
/**
* Gets the name by which a token can be referenced in the generated code.
* For tokens defined in a {@code tokens{}} block or via a lexer rule, this
* is the declared name of the token. For token types generated by the use
* of a string literal within a parser rule of a combined grammar, this is
* the automatically generated token type which includes the
* {@link #AUTO_GENERATED_TOKEN_NAME_PREFIX} prefix. For types which are not
* associated with a defined token, this method returns
* {@link #INVALID_TOKEN_NAME}.
*
* @param ttype The token type.
* @return The name of the token with the specified type.
*/
public String getTokenName(int ttype) {
// inside any target's char range and is lexer grammar?
if ( isLexer() &&
ttype >= Lexer.MIN_CHAR_VALUE && ttype <= Lexer.MAX_CHAR_VALUE )
{
return CharSupport.getANTLRCharLiteralForChar(ttype);
}
if ( ttype==Token.EOF ) {
return "EOF";
}
if (ttype >= 0 && ttype < typeToTokenList.size() && typeToTokenList.get(ttype) != null) {
return typeToTokenList.get(ttype);
}
return INVALID_TOKEN_NAME;
}
示例6: getANTLRCharLiteralForChar
import org.antlr.v4.runtime.Lexer; //导入依赖的package包/类
/** Return a string representing the escaped char for code c. E.g., If c
* has value 0x100, you will get "\u0100". ASCII gets the usual
* char (non-hex) representation. Control characters are spit out
* as unicode. While this is specially set up for returning Java strings,
* it can be used by any language target that has the same syntax. :)
*/
public static String getANTLRCharLiteralForChar(int c) {
if ( c< Lexer.MIN_CHAR_VALUE ) {
return "'<INVALID>'";
}
if ( c<ANTLRLiteralCharValueEscape.length && ANTLRLiteralCharValueEscape[c]!=null ) {
return '\''+ANTLRLiteralCharValueEscape[c]+'\'';
}
if ( Character.UnicodeBlock.of((char)c)==Character.UnicodeBlock.BASIC_LATIN &&
!Character.isISOControl((char)c) ) {
if ( c=='\\' ) {
return "'\\\\'";
}
if ( c=='\'') {
return "'\\''";
}
return '\''+Character.toString((char)c)+'\'';
}
// turn on the bit above max "\uFFFF" value so that we pad with zeros
// then only take last 4 digits
String hex = Integer.toHexString(c|0x10000).toUpperCase().substring(1,5);
String unicodeStr = "'\\u"+hex+"'";
return unicodeStr;
}
示例7: getLeadingComments
import org.antlr.v4.runtime.Lexer; //导入依赖的package包/类
private List<Token> getLeadingComments(Token token) {
List<Token> hiddenTokens = tokenStream.getHiddenTokensToLeft(token.getTokenIndex(), Lexer.HIDDEN);
if (hiddenTokens == null || hiddenTokens.isEmpty()) {
return Collections.emptyList();
}
List<Token> comments = new ArrayList<>(hiddenTokens.size());
for (Token hiddenToken : hiddenTokens) {
if (isComment(hiddenToken) && !trailingDocTokenIndexes.get(hiddenToken.getTokenIndex())) {
comments.add(hiddenToken);
}
}
return comments;
}
示例8: getTrailingComments
import org.antlr.v4.runtime.Lexer; //导入依赖的package包/类
/**
* Read comments following the given token, until the first newline is encountered.
*
* INVARIANT:
* Assumes that the parse tree is being walked top-down, left to right!
*
* Trailing-doc tokens are marked as such, so that subsequent searches for "leading"
* doc don't grab tokens already used as "trailing" doc. If the walk order is *not*
* top-down, left-to-right, then the assumption underpinning the separation of leading
* and trailing comments is broken.
*
* @param endToken the token from which to search for trailing comment tokens.
* @return a list, possibly empty, of all trailing comment tokens.
*/
private List<Token> getTrailingComments(Token endToken) {
List<Token> hiddenTokens = tokenStream.getHiddenTokensToRight(endToken.getTokenIndex(), Lexer.HIDDEN);
if (hiddenTokens == null || hiddenTokens.isEmpty()) {
return Collections.emptyList();
}
Token maybeTrailingDoc = hiddenTokens.get(0); // only one trailing comment is possible
if (isComment(maybeTrailingDoc)) {
trailingDocTokenIndexes.set(maybeTrailingDoc.getTokenIndex());
return Collections.singletonList(maybeTrailingDoc);
}
return Collections.emptyList();
}
示例9: parsePHP
import org.antlr.v4.runtime.Lexer; //导入依赖的package包/类
public static Pair<Parser, Lexer> parsePHP(String filePath) {
AntlrCaseInsensitiveFileStream input;
try {
input = new AntlrCaseInsensitiveFileStream(filePath);
} catch (IOException e) {
e.printStackTrace();
return null;
}
PHPLexer lexer = new PHPLexer(input);
CommonTokenStream tokens = new CommonTokenStream(lexer);
PHPParser parser = new InterruptablePHPParser(tokens, filePath);
/* turn on prediction mode to speed up parsing */
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
Pair<Parser, Lexer> retval = new Pair<Parser, Lexer>(parser, lexer);
return retval;
}
示例10: processFile
import org.antlr.v4.runtime.Lexer; //导入依赖的package包/类
public static Document processFile(String filePath) {
Pair<Parser, Lexer> pl = parsePHP(filePath);
PHPParser parser = (PHPParser) pl.a;
parser.setBuildParseTree(true);
/*
* htmlDocument is the start rule (the top-level rule)
* for the PHP grammar
*/
ParserRuleContext tree = parser.htmlDocument();
List<String> ruleNames = Arrays.asList(parser.getRuleNames());
Map<Integer, String> invTokenMap = getInvTokenMap(parser);
TokenStream tokenStream = parser.getTokenStream();
ParseTreeDOMSerializer ptSerializer = new ParseTreeDOMSerializer(ruleNames, invTokenMap, tokenStream);
ParseTreeWalker.DEFAULT.walk(ptSerializer, tree);
Document result= ptSerializer.getDOMDocument();
return result;
}
示例11: parseFile
import org.antlr.v4.runtime.Lexer; //导入依赖的package包/类
/**
* Parses the supplied input using the OTLDListener and returns it after walking it
* @param reader input to parse
* @return walked OTLDListener
* @throws IOException
*/
public static OTLDListener parseFile(InputStream reader) throws IOException {
OTLDErrorListener errorListener = new OTLDErrorListener();
ANTLRInputStream stream = new ANTLRInputStream(reader);
Lexer lexer = new otldLexer(stream);
lexer.removeErrorListeners();
lexer.addErrorListener(errorListener);
TokenStream tokens = new CommonTokenStream(lexer);
otldParser parser = new otldParser(tokens);
parser.removeErrorListeners();
parser.addErrorListener(errorListener);
ParseTree tree = parser.program();
OTLDListener railroad = new OTLDListener();
if (errorListener.getErrors().isEmpty()) {
ParseTreeWalker walker = new ParseTreeWalker();
walker.walk(railroad, tree);
} else {
railroad.errors.addAll(errorListener.getErrors());
}
return railroad;
}
示例12: parse
import org.antlr.v4.runtime.Lexer; //导入依赖的package包/类
@Override
public void parse(Snapshot snapshot, Task task, SourceModificationEvent event) throws ParseException {
this.snapshot = snapshot;
String text = snapshot.getText().toString();
ANTLRInputStream input = new ANTLRInputStream(text);
Lexer lexer = new EditorConfigLexer(input);
lexer.removeErrorListeners();
CommonTokenStream tokens = new CommonTokenStream(lexer);
parser = new EditorConfigParser(tokens);
parser.removeErrorListeners();
syntaxErrors = new ArrayList<>();
EditorConfigErrorListener errorListener = new EditorConfigErrorListener(syntaxErrors);
parser.addErrorListener(errorListener);
EditorConfigParser.FileContext root = parser.file();
result = new ECParserResult(snapshot, parser, root);
}
示例13: syntaxError
import org.antlr.v4.runtime.Lexer; //导入依赖的package包/类
@Override
public void syntaxError(Recognizer<?, ?> recognizer,
Object offendingSymbol,
int line,
int charPositionInLine,
String msg,
RecognitionException e)
{
if ( offendingSymbol==null ) {
final Lexer lexer = (Lexer) recognizer;
int i = lexer.getCharIndex();
final int n = lexer.getInputStream().size();
if (i >= n) {
i = n - 1;
}
final String text = lexer.getInputStream().getText(new Interval(i, i));
CommonToken t = (CommonToken) lexer.getTokenFactory().create(Token.INVALID_TYPE, text);
t.setStartIndex(i);
t.setStopIndex(i);
t.setLine(line);
t.setCharPositionInLine(charPositionInLine);
offendingSymbol = t;
}
// System.out.println("lex error: " + offendingSymbol);
issues.add(new Issue(msg, (Token)offendingSymbol));
}
示例14: isContext
import org.antlr.v4.runtime.Lexer; //导入依赖的package包/类
boolean isContext(Token token, int offset, boolean allowInStrings, boolean allowInActions) {
if (token == null) {
return false;
}
switch (token.getType()) {
case GrammarLexer.LEXER_CHAR_SET:
case GrammarLexer.ACTION_COMMENT:
return false;
case GrammarLexer.STRING_LITERAL:
case GrammarLexer.DOUBLE_QUOTE_STRING_LITERAL:
return allowInStrings;
case GrammarLexer.ARG_ACTION_WORD:
case GrammarLexer.ACTION_WORD:
return allowInActions;
case GrammarLexer.WS:
return true;
default:
return token.getChannel() == Lexer.DEFAULT_TOKEN_CHANNEL;
}
}
示例15: isGoContext
import org.antlr.v4.runtime.Lexer; //导入依赖的package包/类
static boolean isGoContext(Token token, int offset, boolean allowInStrings) {
if (token == null) {
return false;
}
switch (token.getType()) {
case GoLexer.COMMENT:
return false;
case GoLexer.CharLiteral:
case GoLexer.StringLiteral:
return allowInStrings;
case GoLexer.WS:
case GoLexer.NEWLINE:
return true;
default:
return token.getChannel() == Lexer.DEFAULT_TOKEN_CHANNEL;
}
}