本文整理汇总了Java中org.antlr.v4.runtime.Vocabulary类的典型用法代码示例。如果您正苦于以下问题:Java Vocabulary类的具体用法?Java Vocabulary怎么用?Java Vocabulary使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Vocabulary类属于org.antlr.v4.runtime包,在下文中一共展示了Vocabulary类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: possibleIdentifiers
import org.antlr.v4.runtime.Vocabulary; //导入依赖的package包/类
private static Set<String> possibleIdentifiers()
{
ImmutableSet.Builder<String> names = ImmutableSet.builder();
Vocabulary vocabulary = SqlBaseLexer.VOCABULARY;
for (int i = 0; i <= vocabulary.getMaxTokenType(); i++) {
String name = nullToEmpty(vocabulary.getLiteralName(i));
Matcher matcher = IDENTIFIER.matcher(name);
if (matcher.matches()) {
names.add(matcher.group(1));
}
}
return names.build();
}
示例2: getMatchingLeftSymbol
import org.antlr.v4.runtime.Vocabulary; //导入依赖的package包/类
public static TerminalNode getMatchingLeftSymbol(Corpus corpus,
InputDocument doc,
TerminalNode node)
{
ParserRuleContext parent = (ParserRuleContext)node.getParent();
int curTokensParentRuleIndex = parent.getRuleIndex();
Token curToken = node.getSymbol();
if (corpus.ruleToPairsBag != null) {
String ruleName = doc.parser.getRuleNames()[curTokensParentRuleIndex];
RuleAltKey ruleAltKey = new RuleAltKey(ruleName, parent.getAltNumber());
List<Pair<Integer, Integer>> pairs = corpus.ruleToPairsBag.get(ruleAltKey);
if ( pairs!=null ) {
// Find appropriate pair given current token
// If more than one pair (a,b) with b=current token pick first one
// or if a common pair like ({,}), then give that one preference.
// or if b is punctuation, prefer a that is punct
List<Integer> viableMatchingLeftTokenTypes = viableLeftTokenTypes(parent, curToken, pairs);
Vocabulary vocab = doc.parser.getVocabulary();
if ( !viableMatchingLeftTokenTypes.isEmpty() ) {
int matchingLeftTokenType =
CollectTokenPairs.getMatchingLeftTokenType(curToken, viableMatchingLeftTokenTypes, vocab);
List<TerminalNode> matchingLeftNodes = parent.getTokens(matchingLeftTokenType);
// get matching left node by getting last node to left of current token
List<TerminalNode> nodesToLeftOfCurrentToken =
filter(matchingLeftNodes, n -> n.getSymbol().getTokenIndex()<curToken.getTokenIndex());
TerminalNode matchingLeftNode = nodesToLeftOfCurrentToken.get(nodesToLeftOfCurrentToken.size()-1);
if (matchingLeftNode == null) {
System.err.println("can't find matching node for "+node.getSymbol());
}
return matchingLeftNode;
}
}
}
return null;
}
示例3: GrammarParserInterpreter
import org.antlr.v4.runtime.Vocabulary; //导入依赖的package包/类
public GrammarParserInterpreter(Grammar g,
String grammarFileName,
Vocabulary vocabulary,
Collection<String> ruleNames,
ATN atn,
TokenStream input) {
super(grammarFileName, vocabulary, ruleNames, atn, input);
this.g = g;
}
示例4: createLexer
import org.antlr.v4.runtime.Vocabulary; //导入依赖的package包/类
@Override
protected TokenSourceWithStateV4<SimpleLexerState> createLexer(CharStream input, SimpleLexerState startState) {
ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
Vocabulary vocabulary = lexerInterpreterData.vocabulary;
String grammarFileName = lexerInterpreterData.grammarFileName;
List<String> ruleNames = lexerInterpreterData.ruleNames;
List<String> modeNames = lexerInterpreterData.modeNames;
ParserDebuggerLexerWrapper lexer = new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, input);
startState.apply(lexer);
return lexer;
}
示例5: getEffectiveTokenSource
import org.antlr.v4.runtime.Vocabulary; //导入依赖的package包/类
@Override
protected TokenSource getEffectiveTokenSource(TokenSourceWithStateV4<SimpleLexerState> lexer) {
ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
Vocabulary vocabulary = lexerInterpreterData.vocabulary;
String grammarFileName = lexerInterpreterData.grammarFileName;
List<String> ruleNames = lexerInterpreterData.ruleNames;
List<String> modeNames = lexerInterpreterData.modeNames;
return new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, lexer.getInputStream());
}
示例6: getVocabulary
import org.antlr.v4.runtime.Vocabulary; //导入依赖的package包/类
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
示例7: getVocabulary
import org.antlr.v4.runtime.Vocabulary; //导入依赖的package包/类
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
示例8: _toString
import org.antlr.v4.runtime.Vocabulary; //导入依赖的package包/类
public static String _toString(FeatureMetaData[] FEATURES, InputDocument doc, int[] features,
boolean showInfo) {
Vocabulary v = doc.parser.getVocabulary();
String[] ruleNames = doc.parser.getRuleNames();
StringBuilder buf = new StringBuilder();
for (int i=0; i<FEATURES.length; i++) {
if ( FEATURES[i].type.equals(UNUSED) ) continue;
if ( i>0 ) buf.append(" ");
if ( i==INDEX_CUR_TOKEN_TYPE ) {
buf.append("| "); // separate prev from current tokens
}
int displayWidth = FEATURES[i].type.displayWidth;
switch ( FEATURES[i].type ) {
case TOKEN :
String tokenName = v.getDisplayName(features[i]);
String abbrev = StringUtils.abbreviateMiddle(tokenName, "*", displayWidth);
String centered = StringUtils.center(abbrev, displayWidth);
buf.append(String.format("%"+displayWidth+"s", centered));
break;
case RULE :
if ( features[i]>=0 ) {
String ruleName = ruleNames[unrulealt(features[i])[0]];
int ruleAltNum = unrulealt(features[i])[1];
ruleName += ":"+ruleAltNum;
abbrev = StringUtils.abbreviateMiddle(ruleName, "*", displayWidth);
buf.append(String.format("%"+displayWidth+"s", abbrev));
}
else {
buf.append(Tool.sequence(displayWidth, " "));
}
break;
case INT :
case INFO_LINE:
case INFO_CHARPOS:
if ( showInfo ) {
if ( features[i]>=0 ) {
buf.append(String.format("%"+displayWidth+"s", StringUtils.center(String.valueOf(features[i]), displayWidth)));
}
else {
buf.append(Tool.sequence(displayWidth, " "));
}
}
break;
case INFO_FILE:
if ( showInfo ) {
String fname = new File(doc.fileName).getName();
fname = StringUtils.abbreviate(fname, displayWidth);
buf.append(String.format("%"+displayWidth+"s", fname));
}
break;
case BOOL :
if ( features[i]!=-1 ) {
buf.append(features[i] == 1 ? "true " : "false");
}
else {
buf.append(Tool.sequence(displayWidth, " "));
}
break;
default :
System.err.println("NO STRING FOR FEATURE TYPE: "+ FEATURES[i].type);
}
}
return buf.toString();
}
示例9: CollectTokenPairs
import org.antlr.v4.runtime.Vocabulary; //导入依赖的package包/类
public CollectTokenPairs(Vocabulary vocab, String[] ruleNames) {
this.vocab = vocab;
this.ruleNames = ruleNames;
}
示例10: getVocabulary
import org.antlr.v4.runtime.Vocabulary; //导入依赖的package包/类
/**
* Gets a {@link Vocabulary} instance describing the vocabulary used by the
* grammar.
*/
public Vocabulary getVocabulary() {
return new VocabularyImpl(getTokenLiteralNames(), getTokenSymbolicNames());
}
示例11: getVocabulary
import org.antlr.v4.runtime.Vocabulary; //导入依赖的package包/类
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
示例12: getVocabulary
import org.antlr.v4.runtime.Vocabulary; //导入依赖的package包/类
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
示例13: parse
import org.antlr.v4.runtime.Vocabulary; //导入依赖的package包/类
@Override
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
throws InterruptedException, ExecutionException {
//ParserDebuggerEditorKit.LEX
synchronized (lock) {
ParserData<FileParseResult> fileParseResultData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
ParserData<ParserRuleContext> parseTreeResult = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
if (fileParseResultData == null || parseTreeResult == null) {
Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.LEXER_TOKENS);
Tagger<TokenTag<Token>> tagger = futureTokensData.get().getData();
TaggerTokenSource tokenSource = new TaggerTokenSource(tagger, snapshot);
InterruptableTokenStream tokenStream = new InterruptableTokenStream(tokenSource);
ParserRuleContext parseResult;
ParserInterpreterData parserInterpreterData = (ParserInterpreterData)snapshot.getVersionedDocument().getDocument().getProperty(ParserDebuggerEditorKit.PROP_PARSER_INTERP_DATA);
String grammarFileName = parserInterpreterData.grammarFileName;
Vocabulary vocabulary = parserInterpreterData.vocabulary;
List<String> ruleNames = parserInterpreterData.ruleNames;
ATN atn = new ATNDeserializer().deserialize(parserInterpreterData.serializedAtn.toCharArray());
TracingParserInterpreter parser = new TracingParserInterpreter(grammarFileName, vocabulary, ruleNames, atn, tokenStream);
long startTime = System.nanoTime();
parser.setInterpreter(new StatisticsParserATNSimulator(parser, atn));
parser.getInterpreter().optimize_ll1 = false;
parser.getInterpreter().reportAmbiguities = true;
parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
parser.removeErrorListeners();
parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
parser.addErrorListener(new StatisticsParserErrorListener());
SyntaxErrorListener syntaxErrorListener = new SyntaxErrorListener(snapshot);
parser.addErrorListener(syntaxErrorListener);
parser.setBuildParseTree(true);
parser.setErrorHandler(new DefaultErrorStrategy());
parseResult = parser.parse(parserInterpreterData.startRuleIndex);
String sourceName = (String)document.getDocument().getProperty(Document.TitleProperty);
FileParseResult fileParseResult = new FileParseResult(sourceName, 0, parseResult, syntaxErrorListener.getSyntaxErrors(), tokenStream.size(), startTime, null, parser);
fileParseResultData = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, snapshot, fileParseResult);
parseTreeResult = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, snapshot, parseResult);
}
results.addResult(fileParseResultData);
results.addResult(parseTreeResult);
}
}
示例14: TracingParserInterpreter
import org.antlr.v4.runtime.Vocabulary; //导入依赖的package包/类
public TracingParserInterpreter(String grammarFileName, Vocabulary vocabulary, Collection<String> ruleNames, ATN atn, TokenStream input) {
super(grammarFileName, vocabulary, ruleNames, atn, input);
}
示例15: ParserDebuggerLexerWrapper
import org.antlr.v4.runtime.Vocabulary; //导入依赖的package包/类
public ParserDebuggerLexerWrapper(String grammarFileName, Vocabulary vocabulary, Collection<String> ruleNames, Collection<String> modeNames, ATN atn, CharStream input) {
super(grammarFileName, vocabulary, ruleNames, modeNames, atn, input);
}