本文整理匯總了Java中org.antlr.v4.runtime.Token.EOF屬性的典型用法代碼示例。如果您正苦於以下問題:Java Token.EOF屬性的具體用法?Java Token.EOF怎麽用?Java Token.EOF使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類org.antlr.v4.runtime.Token
的用法示例。
在下文中一共展示了Token.EOF屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: reportNoViableAlternative
@Override
protected void reportNoViableAlternative(Parser recognizer, NoViableAltException e) {
// change error message from default implementation
TokenStream tokens = recognizer.getInputStream();
String input;
if (tokens != null) {
if (e.getStartToken().getType() == Token.EOF) {
input = "the end";
} else {
input = escapeWSAndQuote(tokens.getText(e.getStartToken(), e.getOffendingToken()));
}
} else {
input = escapeWSAndQuote("<unknown input>");
}
String msg = "inadmissible input at " + input;
recognizer.notifyErrorListeners(e.getOffendingToken(), msg, e);
}
示例2: getTokenErrorDisplay
@Override
protected String getTokenErrorDisplay(Token t) {
// overwrite standard behavior to use "the end" instead of <EOF>
if (t == null) {
return "<no token>";
}
String s = getSymbolText(t).replace("<EOF>", "the end");
if (s == null) {
if (getSymbolType(t) == Token.EOF) {
s = "the end";
} else {
s = escapeWSAndQuote("<" + getSymbolType(t) + ">");
}
}
return s;
}
示例3: StatementSplitter
public StatementSplitter(String sql, Set<String> delimiters)
{
TokenSource tokens = getLexer(sql, delimiters);
ImmutableList.Builder<Statement> list = ImmutableList.builder();
StringBuilder sb = new StringBuilder();
while (true) {
Token token = tokens.nextToken();
if (token.getType() == Token.EOF) {
break;
}
if (token.getType() == SqlBaseParser.DELIMITER) {
String statement = sb.toString().trim();
if (!statement.isEmpty()) {
list.add(new Statement(statement, token.getText()));
}
sb = new StringBuilder();
}
else {
sb.append(token.getText());
}
}
this.completeStatements = list.build();
this.partialStatement = sb.toString().trim();
}
示例4: squeezeStatement
public static String squeezeStatement(String sql)
{
TokenSource tokens = getLexer(sql, ImmutableSet.of());
StringBuilder sb = new StringBuilder();
while (true) {
Token token = tokens.nextToken();
if (token.getType() == Token.EOF) {
break;
}
if (token.getType() == SqlBaseLexer.WS) {
sb.append(' ');
}
else {
sb.append(token.getText());
}
}
return sb.toString().trim();
}
示例5: computeFeatureVectorForToken
public void computeFeatureVectorForToken(int i) {
Token curToken = tokens.get(i);
if ( curToken.getType()==Token.EOF ) return;
int[] features = getFeatures(i);
int injectNL_WS = getInjectWSCategory(tokens, i);
int aligned = -1; // "don't care"
if ( (injectNL_WS&0xFF)==CAT_INJECT_NL ) {
TerminalNode node = tokenToNodeMap.get(curToken);
aligned = getAlignmentCategory(doc, node, indentSize);
}
// track feature -> injectws, align decisions for token i
corpus.addExemplar(doc, features, injectNL_WS, aligned);
}
示例6: getMatchingSymbolEndsLine
public static int getMatchingSymbolEndsLine(Corpus corpus,
InputDocument doc,
TerminalNode node)
{
TerminalNode matchingLeftNode = getMatchingLeftSymbol(corpus, doc, node);
if ( matchingLeftNode != null ) {
Token matchingLeftToken = matchingLeftNode.getSymbol();
int i = matchingLeftToken.getTokenIndex();
Token tokenAfterMatchingToken = doc.tokens.getNextRealToken(i);
// System.out.printf("doc=%s node=%s, pair=%s, after=%s\n",
// new File(doc.fileName).getName(), node.getSymbol(), matchingLeftToken, tokenAfterMatchingToken);
if ( tokenAfterMatchingToken!=null ) {
if ( tokenAfterMatchingToken.getType()==Token.EOF ) {
return 1;
}
return tokenAfterMatchingToken.getLine()>matchingLeftToken.getLine() ? 1 : 0;
}
}
return NOT_PAIR;
}
示例7: getTokenName
/**
* Gets the name by which a token can be referenced in the generated code.
* For tokens defined in a {@code tokens{}} block or via a lexer rule, this
* is the declared name of the token. For token types generated by the use
* of a string literal within a parser rule of a combined grammar, this is
* the automatically generated token type which includes the
* {@link #AUTO_GENERATED_TOKEN_NAME_PREFIX} prefix. For types which are not
* associated with a defined token, this method returns
* {@link #INVALID_TOKEN_NAME}.
*
* @param ttype The token type.
* @return The name of the token with the specified type.
*/
public String getTokenName(int ttype) {
// inside any target's char range and is lexer grammar?
if ( isLexer() &&
ttype >= Lexer.MIN_CHAR_VALUE && ttype <= Lexer.MAX_CHAR_VALUE )
{
return CharSupport.getANTLRCharLiteralForChar(ttype);
}
if ( ttype==Token.EOF ) {
return "EOF";
}
if (ttype >= 0 && ttype < typeToTokenList.size() && typeToTokenList.get(ttype) != null) {
return typeToTokenList.get(ttype);
}
return INVALID_TOKEN_NAME;
}
示例8: nextToken
public Token nextToken() {
initIfFirstRun();
Token t = dentsBuffer.isEmpty()
? pullToken()
: dentsBuffer.remove();
if (reachedEof) {
return t;
}
final Token r;
if (t.getType() == nlToken) {
r = handleNewlineToken(t);
} else if (t.getType() == Token.EOF) {
r = eofHandler.apply(t);
} else {
r = t;
}
return r;
}
示例9: compareWithAntrl
@Test
public void compareWithAntrl() {
String s = "select " + "*" + "from dbo.test";
AntrlResult result = Antlr4Utils.getFull(s);
SourceLinesProvider p = new SourceLinesProvider();
SourceLine[] lines = p.getLines(new StringBufferInputStream(s), Charset.defaultCharset());
FillerRequest file = new FillerRequest(null, null, result.getTree(), lines);
for (Token t : result.getStream().getTokens()) {
if (t.getType() == Token.EOF) {
continue;
}
int[] start = file.getLineAndColumn(t.getStartIndex());
int[] end = file.getLineAndColumn(t.getStopIndex());
Assert.assertNotNull(start);
Assert.assertNotNull(end);
Assert.assertEquals(t.getLine(), start[0]);
System.out.println(t.getText() + Arrays.toString(start) + " " + t.getCharPositionInLine() + " "
+ t.getLine() + " " + Arrays.toString(end));
Assert.assertEquals(t.getCharPositionInLine(), start[1]);
}
}
示例10: circularInclude
@Test
public void circularInclude() {
final URL url = getClass().getResource("/includes/circular-include.raml");
final RAMLCustomLexer lexer = lexer(url);
for (Token token = lexer.nextToken(); token.getType() != Token.EOF; token = lexer.nextToken()) {
}
}
示例11: isEmptyStatement
public static boolean isEmptyStatement(String sql)
{
TokenSource tokens = getLexer(sql, ImmutableSet.of());
while (true) {
Token token = tokens.nextToken();
if (token.getType() == Token.EOF) {
return true;
}
if (token.getChannel() != Token.HIDDEN_CHANNEL) {
return false;
}
}
}
示例12: initTokens
/**
* @requires the tokens of this are not initialized yet && source != null
* @modifies this
* @effects Initializes the tokens of this with the given token source.
*/
private void initTokens(TokenSource source) {
Assert.isTrue(tokens.isEmpty());
Token token;
do {
token = source.nextToken();
tokens.add(token);
} while (token.getType() != Token.EOF);
}
示例13: noViableAlt
public static String noViableAlt(Parser recognizer, NoViableAltException e) {
TokenStream tokens = recognizer.getInputStream();
String input = null;
if (tokens != null) {
Token startToken = e.getStartToken();
if (startToken.getType() == Token.EOF) {
input = "<EOF>";
} else {
input = tokens.getText(
startToken, e.getOffendingToken()
);
}
}
return "syntax error at input:" + input;
}
示例14: docDiff
/** Compute a document difference metric 0-1.0 between two documents that
* are identical other than (likely) the whitespace and comments.
*
* 1.0 means the docs are maximally different and 0 means docs are identical.
*
* The Levenshtein distance between the docs counts only
* whitespace diffs as the non-WS content is identical.
* Levenshtein distance is bounded by 0..max(len(doc1),len(doc2)) so
* we normalize the distance by dividing by max WS count.
*
* TODO: can we simplify this to a simple walk with two
* cursors through the original vs formatted counting
* mismatched whitespace? real text are like anchors.
*/
public static double docDiff(String original,
String formatted,
Class<? extends Lexer> lexerClass)
throws Exception
{
// Grammar must strip all but real tokens and whitespace (and put that on hidden channel)
CodeBuffTokenStream original_tokens = Tool.tokenize(original, lexerClass);
// String s = original_tokens.getText();
CodeBuffTokenStream formatted_tokens = Tool.tokenize(formatted, lexerClass);
// String t = formatted_tokens.getText();
// walk token streams and examine whitespace in between tokens
int i = -1;
int ws_distance = 0;
int original_ws = 0;
int formatted_ws = 0;
while ( true ) {
Token ot = original_tokens.LT(i); // TODO: FIX THIS! can't use LT()
if ( ot==null || ot.getType()==Token.EOF ) break;
List<Token> ows = original_tokens.getHiddenTokensToLeft(ot.getTokenIndex());
original_ws += tokenText(ows).length();
Token ft = formatted_tokens.LT(i); // TODO: FIX THIS! can't use LT()
if ( ft==null || ft.getType()==Token.EOF ) break;
List<Token> fws = formatted_tokens.getHiddenTokensToLeft(ft.getTokenIndex());
formatted_ws += tokenText(fws).length();
ws_distance += whitespaceEditDistance(tokenText(ows), tokenText(fws));
i++;
}
// it's probably ok to ignore ws diffs after last real token
int max_ws = Math.max(original_ws, formatted_ws);
double normalized_ws_distance = ((float) ws_distance)/max_ws;
return normalized_ws_distance;
}
示例15: getRealTokens
public static List<Token> getRealTokens(CommonTokenStream tokens) {
List<Token> real = new ArrayList<>();
for (int i=0; i<tokens.size(); i++) {
Token t = tokens.get(i);
if ( t.getType()!=Token.EOF &&
t.getChannel()==Lexer.DEFAULT_TOKEN_CHANNEL )
{
real.add(t);
}
}
return real;
}