本文整理汇总了Java中org.antlr.v4.runtime.TokenSource类的典型用法代码示例。如果您正苦于以下问题:Java TokenSource类的具体用法?Java TokenSource怎么用?Java TokenSource使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
TokenSource类属于org.antlr.v4.runtime包,在下文中一共展示了TokenSource类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: nextToken
import org.antlr.v4.runtime.TokenSource; //导入依赖的package包/类
@Override
public Token nextToken() {
if (stashedNext != null) {
previous = stashedNext;
stashedNext = null;
return previous;
}
Token next = super.nextToken();
if (insertSemicolon(previous, next)) {
stashedNext = next;
previous = _factory.create(new Pair<TokenSource, CharStream>(this, _input), PainlessLexer.SEMICOLON, ";",
Lexer.DEFAULT_TOKEN_CHANNEL, next.getStartIndex(), next.getStopIndex(), next.getLine(), next.getCharPositionInLine());
return previous;
} else {
previous = next;
return next;
}
}
示例2: StatementSplitter
import org.antlr.v4.runtime.TokenSource; //导入依赖的package包/类
public StatementSplitter(String sql, Set<String> delimiters)
{
TokenSource tokens = getLexer(sql, delimiters);
ImmutableList.Builder<Statement> list = ImmutableList.builder();
StringBuilder sb = new StringBuilder();
while (true) {
Token token = tokens.nextToken();
if (token.getType() == Token.EOF) {
break;
}
if (token.getType() == SqlBaseParser.DELIMITER) {
String statement = sb.toString().trim();
if (!statement.isEmpty()) {
list.add(new Statement(statement, token.getText()));
}
sb = new StringBuilder();
}
else {
sb.append(token.getText());
}
}
this.completeStatements = list.build();
this.partialStatement = sb.toString().trim();
}
示例3: squeezeStatement
import org.antlr.v4.runtime.TokenSource; //导入依赖的package包/类
public static String squeezeStatement(String sql)
{
TokenSource tokens = getLexer(sql, ImmutableSet.of());
StringBuilder sb = new StringBuilder();
while (true) {
Token token = tokens.nextToken();
if (token.getType() == Token.EOF) {
break;
}
if (token.getType() == SqlBaseLexer.WS) {
sb.append(' ');
}
else {
sb.append(token.getText());
}
}
return sb.toString().trim();
}
示例4: squeezeStatement
import org.antlr.v4.runtime.TokenSource; //导入依赖的package包/类
public static String squeezeStatement(String sql)
{
TokenSource tokens = getLexer(sql, ImmutableSet.<String>of());
StringBuilder sb = new StringBuilder();
while (true) {
Token token = tokens.nextToken();
if (token.getType() == Token.EOF) {
break;
}
if (token.getType() == SqlBaseLexer.WS) {
sb.append(' ');
}
else {
sb.append(token.getText());
}
}
return sb.toString().trim();
}
示例5: underlineError
import org.antlr.v4.runtime.TokenSource; //导入依赖的package包/类
private void underlineError(final TokenSource tokens, final Token offendingToken, final int line, final int charPositionInLine, final int length) {
final String input = tokens.getInputStream().toString() + "\n ";
final String[] lines = input.split("\n");
final String errorLine = lines[line - 1];
System.err.println(errorLine.replaceAll("\t", " "));
int stop = Math.min(charPositionInLine, errorLine.length());
for (int i = 0; i < stop; i++)
if (errorLine.charAt(i) == '\t')
System.err.print(" ");
else
System.err.print(" ");
int stop2 = Math.min(stop + length, errorLine.length());
for (int i = stop; i < stop2; i++)
if (errorLine.charAt(i) == '\t')
System.err.print("^^^^");
else
System.err.print("^");
System.err.println();
}
示例6: nextToken
import org.antlr.v4.runtime.TokenSource; //导入依赖的package包/类
/** Create an ANTLR Token from the current token type of the builder
* then advance the builder to next token (which ultimately calls an
* ANTLR lexer). The {@link ANTLRLexerAdaptor} creates tokens via
* an ANTLR lexer but converts to {@link TokenIElementType} and here
* we have to convert back to an ANTLR token using what info we
* can get from the builder. We lose info such as the original channel.
* So, whitespace and comments (typically hidden channel) will look like
* real tokens. Jetbrains uses {@link ParserDefinition#getWhitespaceTokens()}
* and {@link ParserDefinition#getCommentTokens()} to strip these before
* our ANTLR parser sees them.
*/
@Override
public Token nextToken() {
ProgressIndicatorProvider.checkCanceled();
TokenIElementType ideaTType = (TokenIElementType)builder.getTokenType();
int type = ideaTType!=null ? ideaTType.getANTLRTokenType() : Token.EOF;
int channel = Token.DEFAULT_CHANNEL;
Pair<TokenSource, CharStream> source = new Pair<TokenSource, CharStream>(this, null);
String text = builder.getTokenText();
int start = builder.getCurrentOffset();
int length = text != null ? text.length() : 0;
int stop = start + length - 1;
// PsiBuilder doesn't provide line, column info
int line = 0;
int charPositionInLine = 0;
Token t = tokenFactory.create(source, type, text, channel, start, stop, line, charPositionInLine);
builder.advanceLexer();
// System.out.println("TOKEN: "+t);
return t;
}
示例7: error
import org.antlr.v4.runtime.TokenSource; //导入依赖的package包/类
public void error(final String kind, final TokenSource tokens, final Object offendingSymbol, final int line, final int charPositionInLine, final int length, final String msg, final Exception e) {
hasError = true;
final String filename = tokens.getSourceName();
System.err.print(filename.substring(filename.lastIndexOf(File.separator) + 1) + ": compilation failed: ");
System.err.print("Encountered " + kind + " error ");
if (offendingSymbol != null)
System.err.print("\"" + offendingSymbol + "\" ");
System.err.println("at line " + line + ", column " + charPositionInLine + ". " + msg);
underlineError(tokens, (Token)offendingSymbol, line, charPositionInLine, length);
if (e != null)
for (final StackTraceElement st : e.getStackTrace())
System.err.println("\tat " + st);
else
System.err.println("\tat unknown stack");
}
示例8: underlineError
import org.antlr.v4.runtime.TokenSource; //导入依赖的package包/类
private void underlineError(final TokenSource tokens, final Token offendingToken, final int line, final int charPositionInLine, final int length) {
final String input = tokens.getInputStream().toString() + "\n ";
final String[] lines = input.split("\n");
final String errorLine = lines[line - 1];
System.err.println(errorLine.replaceAll("\t", " "));
int stop = Math.min(charPositionInLine, errorLine.length());
for (int i = 0; i < stop; i++)
if (errorLine.charAt(i) == '\t')
System.err.print(" ");
else
System.err.print(" ");
int stop2 = Math.min(stop + length, errorLine.length());
for (int i = stop; i < stop2; i++)
if (errorLine.charAt(i) == '\t')
System.err.print("^^^^");
else
System.err.print("^");
System.err.println();
}
示例9: nextToken
import org.antlr.v4.runtime.TokenSource; //导入依赖的package包/类
public Token nextToken() {
Token t = null;
consumeUnknown();
int c = input.LA(1);
int i = input.index();
if ( c == CharStream.EOF ) {
t = getTokenFactory().create(Token.EOF, "<EOF>");
}
else {
Integer ttypeI = charToTokenType.get(c);
t = getTokenFactory().create(
new Pair<TokenSource,CharStream>(this,input),
ttypeI, String.valueOf((char)c), Token.DEFAULT_CHANNEL, i, i,
line, charPosInLine);
}
// System.out.println(t.getText());
consume();
return t;
}
示例10: nextToken
import org.antlr.v4.runtime.TokenSource; //导入依赖的package包/类
@Override
public Token nextToken() {
if (previousTag != null && previousTag.getToken().getType() == Token.EOF) {
return previousTag.getToken();
}
if (tagIterator.hasNext()) {
previousTag = tagIterator.next().getTag();
} else {
TokenSource source = this;
String text = null;
int channel = Token.DEFAULT_CHANNEL;
int start = snapshot.length();
int stop = start - 1;
int lineCount = snapshot.getLineCount();
int lineLength = snapshot.findLineFromLineNumber(lineCount - 1).getLength();
previousTag = new TokenTag<>(tokenFactory.create(getTokenFactorySourcePair(), Token.EOF, text, channel, start, stop, lineCount, lineLength));
}
line = -1;
charPositionInLine = -1;
return previousTag.getToken();
}
示例11: getSourceInterval
import org.antlr.v4.runtime.TokenSource; //导入依赖的package包/类
public static Interval getSourceInterval(@NonNull ParserRuleContext context) {
Parameters.notNull("context", context);
int startIndex = context.start.getStartIndex();
Token stopSymbol = getStopSymbol(context);
if (stopSymbol == null) {
return new Interval(startIndex, startIndex - 1);
}
int stopIndex;
if (stopSymbol.getType() != Token.EOF) {
stopIndex = stopSymbol.getStopIndex();
} else {
TokenSource tokenSource = context.getStart().getTokenSource();
CharStream inputStream = tokenSource != null ? tokenSource.getInputStream() : null;
if (inputStream != null) {
stopIndex = inputStream.size() - 1;
} else {
stopIndex = context.start.getStartIndex() - 1;
}
}
stopIndex = Math.max(stopIndex, startIndex - 1);
return new Interval(startIndex, stopIndex);
}
示例12: nextRealToken
import org.antlr.v4.runtime.TokenSource; //导入依赖的package包/类
public static Token nextRealToken(CommonTokenStream tokens, int i) {
int n = tokens.size();
i++; // search after current i token
if ( i>=n || i<0 ) return null;
Token t = tokens.get(i);
while ( t.getChannel()==Token.HIDDEN_CHANNEL ) {
if ( t.getType()==Token.EOF ) {
TokenSource tokenSource = tokens.getTokenSource();
if ( tokenSource==null ) {
return new CommonToken(Token.EOF, "EOF");
}
TokenFactory<?> tokenFactory = tokenSource.getTokenFactory();
if ( tokenFactory==null ) {
return new CommonToken(Token.EOF, "EOF");
}
return tokenFactory.create(Token.EOF, "EOF");
}
i++;
if ( i>=n ) return null; // just in case no EOF
t = tokens.get(i);
}
return t;
}
示例13: nextToken
import org.antlr.v4.runtime.TokenSource; //导入依赖的package包/类
@Override
public Token nextToken() {
TokenIElementType ideaTType = (TokenIElementType)builder.getTokenType();
int type;
if ( ideaTType==null ) {
type = Token.EOF;
}
else {
type = ideaTType.getType();
}
int channel = Token.DEFAULT_CHANNEL;
Pair<TokenSource, CharStream> source = new Pair<TokenSource, CharStream>(this, null);
String text = builder.getTokenText();
int start = builder.getCurrentOffset();
int length = text != null ? text.length() : 0;
int stop = start + length - 1;
// PsiBuilder doesn't provide line, column info
int line = 0;
int charPositionInLine = 0;
Token t = factory.create(source, type, text, channel, start, stop, line, charPositionInLine);
builder.advanceLexer();
// System.out.println("TOKEN: "+t);
return t;
}
示例14: isEmptyStatement
import org.antlr.v4.runtime.TokenSource; //导入依赖的package包/类
public static boolean isEmptyStatement(String sql)
{
TokenSource tokens = getLexer(sql, ImmutableSet.of());
while (true) {
Token token = tokens.nextToken();
if (token.getType() == Token.EOF) {
return true;
}
if (token.getChannel() != Token.HIDDEN_CHANNEL) {
return false;
}
}
}
示例15: AntlrDocument
import org.antlr.v4.runtime.TokenSource; //导入依赖的package包/类
/**
* @requires text != null && tokenSource != null
* @effects Makes this be a new Document d with d.text = text and d.tokens
* set to the tokens produced by tokenSource
*/
protected AntlrDocument(String text, TokenSource tokenSource) {
Assert.notNull(text);
Assert.notNull(tokenSource);
this.text = text;
this.tokens = new LinkedList<>();
initTokens(tokenSource);
}