本文整理汇总了Java中org.antlr.v4.runtime.CommonToken.getText方法的典型用法代码示例。如果您正苦于以下问题:Java CommonToken.getText方法的具体用法?Java CommonToken.getText怎么用?Java CommonToken.getText使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.antlr.v4.runtime.CommonToken
的用法示例。
在下文中一共展示了CommonToken.getText方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: syntaxError
import org.antlr.v4.runtime.CommonToken; //导入方法依赖的package包/类
/**
* @see BaseErrorListener#reportAmbiguity
*/
@Override
public void syntaxError(Recognizer<?, ?> recognizer, Object offendingSymbol, int line,
int charPositionInLine, String msg, RecognitionException e) {
List<String> stack = ((Parser) recognizer).getRuleInvocationStack();
Collections.reverse(stack);
String logMsg = "Parser ERROR: line " + line + ":" + charPositionInLine + " at "
+ offendingSymbol + ": " + msg;
CommonToken tok = (CommonToken) offendingSymbol;
String s = tok.getText();
logMsg += ": offending token " + s;
if (s.equals("<EOF>")) {
logMsg += ". Look for tag=(null or empty).";
} else {
try {
Integer.parseInt(s);
} catch (NumberFormatException ex) {
logMsg += " not a number. ";
}
}
FixRulesParserErrorListener.logger.error(logMsg + " Tree = {}", stack);
throw new RuntimeException(logMsg);
}
示例2: run
import org.antlr.v4.runtime.CommonToken; //导入方法依赖的package包/类
@Override
public void run(Parser.Result result, SchedulerEvent event) {
SilverstripeParserResult parserResult = (SilverstripeParserResult) result;
List<SilverstripeParserException> exceptions = parserResult.getParserExceptions();
Document document = result.getSnapshot().getSource().getDocument(false);
List<ErrorDescription> errors = new ArrayList<>();
for(SilverstripeParserException ex:exceptions) {
CommonToken token = ex.getToken();
int start = NbDocument.findLineOffset((StyledDocument) document, token.getLine()-1) + token.getCharPositionInLine();
int tokenLength = 0;
if(token.getText() != null && token.getType() != -1) {
tokenLength = token.getText().length();
}
int end = start + tokenLength;
try {
ErrorDescription errorDescription = ErrorDescriptionFactory.createErrorDescription(
Severity.ERROR,
ex.getMessage() != null ? ex.getMessage() : "Non-parseable source",
document,
document.createPosition(start),
document.createPosition(end)
);
errors.add(errorDescription);
} catch (BadLocationException ex1) {
LOG.log(Level.WARNING, ex1.getMessage(), ex1);
}
}
HintsController.setErrors(document, "ss-template", errors);
}
示例3: exitUnary
import org.antlr.v4.runtime.CommonToken; //导入方法依赖的package包/类
@Override
public void exitUnary(UnaryContext ctx) {
if (ctx.getChildCount() == 1) {
return;
}
final Token token;
if (ctx.cast() != null) {
CommonToken ct = new CommonToken(ctx.cast().type);
switch (ct.getText()) {
case "long":
ct.setType(CAST_LONG);
break;
case "bool":
ct.setType(CAST_BOOL);
break;
case "string":
ct.setType(CAST_STRING);
break;
case "double":
ct.setType(CAST_DOUBLE);
break;
}
token = ct;
} else {
token = ctx.getStart();
}
Expression arg = stack.pop();
if (token.getType() != COUNT) {
arg = dereference(token, arg, null);
}
stack.push(ExpressionFactory.createOpExpr(token, arg));
}
示例4: processToken
import org.antlr.v4.runtime.CommonToken; //导入方法依赖的package包/类
public void processToken(int indexIntoRealTokens, int tokenIndexInStream, boolean collectAnalysis) {
CommonToken curToken = (CommonToken)testDoc.tokens.get(tokenIndexInStream);
String tokText = curToken.getText();
TerminalNode node = tokenToNodeMap.get(curToken);
int[] features = getFeatures(testDoc, tokenIndexInStream);
int[] featuresForAlign = new int[features.length];
System.arraycopy(features, 0, featuresForAlign, 0, features.length);
int injectNL_WS = wsClassifier.classify(k, features, Trainer.MAX_WS_CONTEXT_DIFF_THRESHOLD);
injectNL_WS = emitCommentsToTheLeft(tokenIndexInStream, injectNL_WS);
int newlines = 0;
int ws = 0;
if ( (injectNL_WS&0xFF)==CAT_INJECT_NL ) {
newlines = Trainer.unnlcat(injectNL_WS);
}
else if ( (injectNL_WS&0xFF)==CAT_INJECT_WS ) {
ws = Trainer.unwscat(injectNL_WS);
}
if ( newlines==0 && ws==0 && cannotJoin(realTokens.get(indexIntoRealTokens-1), curToken) ) { // failsafe!
ws = 1;
}
int alignOrIndent = CAT_ALIGN;
if ( newlines>0 ) {
output.append(Tool.newlines(newlines));
line+=newlines;
charPosInLine = 0;
// getFeatures() doesn't know what line curToken is on. If \n, we need to find exemplars that start a line
featuresForAlign[INDEX_FIRST_ON_LINE] = 1; // use \n prediction to match exemplars for alignment
alignOrIndent = hposClassifier.classify(k, featuresForAlign, MAX_ALIGN_CONTEXT_DIFF_THRESHOLD);
if ( (alignOrIndent&0xFF)==CAT_ALIGN_WITH_ANCESTOR_CHILD ) {
align(alignOrIndent, node);
}
else if ( (alignOrIndent&0xFF)==CAT_INDENT_FROM_ANCESTOR_CHILD ) {
indent(alignOrIndent, node);
}
else if ( (alignOrIndent&0xFF)==CAT_ALIGN ) {
List<Token> tokensOnPreviousLine = getTokensOnPreviousLine(testDoc.tokens, tokenIndexInStream, line);
if ( tokensOnPreviousLine.size()>0 ) {
Token firstTokenOnPrevLine = tokensOnPreviousLine.get(0);
int indentCol = firstTokenOnPrevLine.getCharPositionInLine();
charPosInLine = indentCol;
output.append(Tool.spaces(indentCol));
}
}
else if ( (alignOrIndent&0xFF)==CAT_INDENT ) {
indent(alignOrIndent, node);
}
}
else {
// inject whitespace instead of \n?
output.append(Tool.spaces(ws));
charPosInLine += ws;
}
// update Token object with position information now that we are about
// to emit it.
curToken.setLine(line);
curToken.setCharPositionInLine(charPosInLine);
TokenPositionAnalysis tokenPositionAnalysis =
getTokenAnalysis(features, featuresForAlign, tokenIndexInStream, injectNL_WS, alignOrIndent, collectAnalysis);
analysis.set(tokenIndexInStream, tokenPositionAnalysis);
int n = tokText.length();
tokenPositionAnalysis.charIndexStart = output.length();
tokenPositionAnalysis.charIndexStop = tokenPositionAnalysis.charIndexStart + n - 1;
// emit
output.append(tokText);
charPosInLine += n;
}