本文整理汇总了Java中org.antlr.v4.tool.Grammar.isTokenName方法的典型用法代码示例。如果您正苦于以下问题:Java Grammar.isTokenName方法的具体用法?Java Grammar.isTokenName怎么用?Java Grammar.isTokenName使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.antlr.v4.tool.Grammar
的用法示例。
在下文中一共展示了Grammar.isTokenName方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkInvalidRuleDef
import org.antlr.v4.tool.Grammar; //导入方法依赖的package包/类
void checkInvalidRuleDef(Token ruleID) {
String fileName = null;
if ( ruleID.getInputStream()!=null ) {
fileName = ruleID.getInputStream().getSourceName();
}
if ( g.isLexer() && Character.isLowerCase(ruleID.getText().charAt(0)) ) {
g.tool.errMgr.grammarError(ErrorType.PARSER_RULES_NOT_ALLOWED,
fileName, ruleID, ruleID.getText());
}
if ( g.isParser() &&
Grammar.isTokenName(ruleID.getText()) )
{
g.tool.errMgr.grammarError(ErrorType.LEXER_RULES_NOT_ALLOWED,
fileName, ruleID, ruleID.getText());
}
}
示例2: emit
import org.antlr.v4.tool.Grammar; //导入方法依赖的package包/类
@Override
public Token emit() {
if (_type == ID) {
String firstChar = _input.getText(Interval.of(_tokenStartCharIndex, _tokenStartCharIndex));
if (Grammar.isTokenName(firstChar)) {
_type = TOKEN_REF;
} else {
_type = RULE_REF;
}
if (_ruleType == Token.INVALID_TYPE) {
_ruleType = _type;
}
} else if (_type == SEMI) {
_ruleType = Token.INVALID_TYPE;
}
return super.emit();
}
示例3: getRuleDefNameNode
import org.antlr.v4.tool.Grammar; //导入方法依赖的package包/类
public static TerminalNode getRuleDefNameNode(Parser parser, ParseTree tree, String ruleName) {
Collection<ParseTree> ruleDefRuleNodes;
if ( Grammar.isTokenName(ruleName) ) {
ruleDefRuleNodes = XPath.findAll(tree, "//lexerRule/TOKEN_REF", parser);
}
else {
ruleDefRuleNodes = XPath.findAll(tree, "//parserRuleSpec/RULE_REF", parser);
}
for (ParseTree node : ruleDefRuleNodes) {
String r = node.getText(); // always a TerminalNode; just get rule name of this def
if ( r.equals(ruleName) ) {
return (TerminalNode)node;
}
}
return null;
}
示例4: getAllRuleRefNodes
import org.antlr.v4.tool.Grammar; //导入方法依赖的package包/类
public static List<TerminalNode> getAllRuleRefNodes(Parser parser, ParseTree tree, String ruleName) {
List<TerminalNode> nodes = new ArrayList<TerminalNode>();
Collection<ParseTree> ruleRefs;
if ( Grammar.isTokenName(ruleName) ) {
ruleRefs = XPath.findAll(tree, "//lexerRuleBlock//TOKEN_REF", parser);
}
else {
ruleRefs = XPath.findAll(tree, "//ruleBlock//RULE_REF", parser);
}
for (ParseTree node : ruleRefs) {
TerminalNode terminal = (TerminalNode)node;
Token rrefToken = terminal.getSymbol();
String r = rrefToken.getText();
if ( r.equals(ruleName) ) {
nodes.add(terminal);
}
}
if ( nodes.size()==0 ) return null;
return nodes;
}
示例5: checkTokenDefinition
import org.antlr.v4.tool.Grammar; //导入方法依赖的package包/类
void checkTokenDefinition(Token tokenID) {
String fileName = tokenID.getInputStream().getSourceName();
if ( !Grammar.isTokenName(tokenID.getText()) ) {
g.tool.errMgr.grammarError(ErrorType.TOKEN_NAMES_MUST_START_UPPER,
fileName,
tokenID,
tokenID.getText());
}
}
示例6: getIdentifierAttributes
import org.antlr.v4.tool.Grammar; //导入方法依赖的package包/类
private AttributeSet getIdentifierAttributes(GrammarHighlighterLexer lexer, String text) {
if (lexer.isInOptions()) {
return identifierAttributes;
}
if (Grammar.isTokenName(text)) {
return lexerRuleAttributes;
} else {
return parserRuleAttributes;
}
}
示例7: getCategory
import org.antlr.v4.tool.Grammar; //导入方法依赖的package包/类
protected static String getCategory(String label) {
Parameters.notNull("label", label);
if (label.startsWith("'")) {
return "stringliteral";
} else if (!label.isEmpty() && Grammar.isTokenName(label)) {
return "lexerrule";
} else {
return "parserrule";
}
}
示例8: getIcon
import org.antlr.v4.tool.Grammar; //导入方法依赖的package包/类
@Override
protected ImageIcon getIcon() {
if (rule instanceof GrammarNodeDescription) {
switch (((GrammarNodeDescription)rule).getDeclarationKind()) {
case PARSER_RULE:
return PARSER_ICON;
case LEXER_RULE:
return LEXER_ICON;
case FRAGMENT_RULE:
return FRAGMENT_ICON;
case TOKEN:
return TOKEN_ICON;
case MODE:
return MODE_ICON;
case CHANNEL:
return CHANNEL_ICON;
case UNKNOWN:
case UNDEFINED:
default:
break;
}
}
String name = rule.getName();
if (Grammar.isTokenName(name)) {
return LEXER_ICON;
} else {
return PARSER_ICON;
}
}
示例9: processRules
import org.antlr.v4.tool.Grammar; //导入方法依赖的package包/类
private void processRules(DocumentSnapshot snapshot, CompiledFileModelV4 result, Collection<? extends GrammarAST> rules, Collection<Description> parserRules, Collection<Description> lexerRules) {
for (GrammarAST child : rules) {
if (child.getChild(0) instanceof GrammarASTErrorNode) {
continue;
}
if (((GrammarAST)child.getChild(0)).g != result.getGrammar()) {
continue;
}
String ruleName = child.getChild(0).getText();
if ("Tokens".equals(ruleName)) {
continue;
}
DeclarationKind declarationKind;
if (Grammar.isTokenName(ruleName)) {
declarationKind = DeclarationKind.LEXER_RULE;
} else {
declarationKind = DeclarationKind.PARSER_RULE;
}
GrammarNode.GrammarNodeDescription ruleDescription = new GrammarNode.GrammarNodeDescription(declarationKind, ruleName);
ruleDescription.setOffset(snapshot, result.getFileObject(), getElementOffset(child));
ruleDescription.setSpan(getSpan(snapshot, result, child));
ruleDescription.setInherited(snapshot == null); // for now, go on the fact that snapshots aren't available for imported files
if (Grammar.isTokenName(ruleName)) {
lexerRules.add(ruleDescription);
} else {
parserRules.add(ruleDescription);
}
}
}
示例10: getGoToElementToolTip
import org.antlr.v4.tool.Grammar; //导入方法依赖的package包/类
public static String getGoToElementToolTip(StyledDocument document, int offset, boolean goToSource, String key) {
Token token = getContext(document, offset);
if (token == null) {
return "";
}
switch (token.getType()) {
case GrammarParser.RULE_REF:
case GrammarParser.TOKEN_REF:
case GrammarParser.ACTION_REFERENCE:
break;
case GrammarLexer.ARG_ACTION_WORD:
if (token.getText().charAt(0) != '$') {
return "";
}
break;
default:
return "";
}
String text = token.getText();
if (text.length() == 0) {
return "";
}
if (text.charAt(0) == '$') {
return "reference";
} else if (Grammar.isTokenName(text)) {
return "lexer rule " + text;
} else {
return "parser rule " + text;
}
}
示例11: isLexerRule
import org.antlr.v4.tool.Grammar; //导入方法依赖的package包/类
public boolean isLexerRule() {
String name = getRuleName();
return name!=null && Grammar.isTokenName(name);
}
示例12: assignLexerTokenTypes
import org.antlr.v4.tool.Grammar; //导入方法依赖的package包/类
void assignLexerTokenTypes(Grammar g, List<GrammarAST> tokensDefs) {
Grammar G = g.getOutermostGrammar(); // put in root, even if imported
for (GrammarAST def : tokensDefs) {
// tokens { id (',' id)* } so must check IDs not TOKEN_REF
if ( Grammar.isTokenName(def.getText()) ) {
G.defineTokenName(def.getText());
}
}
/* Define token types for nonfragment rules which do not include a 'type(...)'
* or 'more' lexer command.
*/
for (Rule r : g.rules.values()) {
if ( !r.isFragment() && !hasTypeOrMoreCommand(r) ) {
G.defineTokenName(r.name);
}
}
// FOR ALL X : 'xxx'; RULES, DEFINE 'xxx' AS TYPE X
List<Pair<GrammarAST,GrammarAST>> litAliases =
Grammar.getStringLiteralAliasesFromLexerRules(g.ast);
Set<String> conflictingLiterals = new HashSet<String>();
if ( litAliases!=null ) {
for (Pair<GrammarAST,GrammarAST> pair : litAliases) {
GrammarAST nameAST = pair.a;
GrammarAST litAST = pair.b;
if ( !G.stringLiteralToTypeMap.containsKey(litAST.getText()) ) {
G.defineTokenAlias(nameAST.getText(), litAST.getText());
}
else {
// oops two literal defs in two rules (within or across modes).
conflictingLiterals.add(litAST.getText());
}
}
for (String lit : conflictingLiterals) {
// Remove literal if repeated across rules so it's not
// found by parser grammar.
Integer value = G.stringLiteralToTypeMap.remove(lit);
if (value != null && value > 0 && value < G.typeToStringLiteralList.size() && lit.equals(G.typeToStringLiteralList.get(value))) {
G.typeToStringLiteralList.set(value, null);
}
}
}
}
示例13: emit
import org.antlr.v4.tool.Grammar; //导入方法依赖的package包/类
@Override
public Token emit() {
switch (_type) {
case TOKENS:
handleAcceptPositionForKeyword("tokens");
setInTokens(true);
break;
case OPTIONS:
handleAcceptPositionForKeyword("options");
setInOptions(true);
break;
case CHANNELS:
handleAcceptPositionForKeyword("channels");
break;
case LABEL:
handleAcceptPositionForIdentifier();
if (isInOptions()) {
_type = ValidGrammarOption;
} else if (isInTokens()) {
_type = IDENTIFIER;
}
break;
case RCURLY:
setInTokens(false);
setInOptions(false);
break;
case SEMI:
setRuleType(Token.INVALID_TYPE);
break;
case IDENTIFIER:
if (_ruleType == Token.INVALID_TYPE) {
String firstChar = _input.getText(Interval.of(_tokenStartCharIndex, _tokenStartCharIndex));
if (Grammar.isTokenName(firstChar)) {
_ruleType = GrammarLexer.TOKEN_REF;
} else {
_ruleType = GrammarLexer.RULE_REF;
}
}
break;
default:
break;
}
return super.emit();
}
示例14: getIcon
import org.antlr.v4.tool.Grammar; //导入方法依赖的package包/类
@Override
public Image getIcon(int type) {
if (getDescription() instanceof GrammarNodeDescription) {
switch (((GrammarNodeDescription)getDescription()).getDeclarationKind()) {
case PARSER_RULE:
return PARSER_IMAGE;
case LEXER_RULE:
return LEXER_IMAGE;
case FRAGMENT_RULE:
return FRAGMENT_IMAGE;
case TOKEN:
return TOKEN_IMAGE;
case MODE:
return MODE_IMAGE;
case CHANNEL:
return CHANNEL_IMAGE;
case UNKNOWN:
case UNDEFINED:
default:
break;
}
}
String name = getDescription().getName();
if (!getDescription().getChildren().isEmpty()) {
name = getDescription().getChildren().iterator().next().getName();
}
if (name == null) {
return super.getIcon(type);
} else if (Grammar.isTokenName(name)) {
return LEXER_IMAGE;
} else {
return PARSER_IMAGE;
}
}
示例15: IncrementRuleVersionAction
import org.antlr.v4.tool.Grammar; //导入方法依赖的package包/类
public IncrementRuleVersionAction(@NullAllowed Lookup context) {
super(Bundle.CTL_IncrementRuleVersionAction());
this._context = context;
this._dataObject = context != null ? context.lookup(GrammarDataObject.class) : null;
this._editorCookie = context != null ? context.lookup(EditorCookie.class) : null;
if (_editorCookie != null) {
JTextComponent focused = EditorRegistry.focusedComponent();
if (!_editorCookie.getDocument().equals(focused.getDocument())) {
focused = _editorCookie.getOpenedPanes()[0];
}
Token token = GoToSupport.getContext(_editorCookie.getDocument(), focused.getCaretPosition());
if (token != null && token.getType() == GrammarParser.RULE_REF) {
ParserTaskManager parserTaskManager = Lookup.getDefault().lookup(ParserTaskManager.class);
VersionedDocument versionedDocument = VersionedDocumentUtilities.getVersionedDocument(_editorCookie.getDocument());
_snapshot = versionedDocument.getCurrentSnapshot();
Collection<Description> rules = GrammarCompletionProvider.getRulesFromGrammar(parserTaskManager, _snapshot, false);
SnapshotPosition caretPosition = new SnapshotPosition(_snapshot, focused.getCaretPosition());
Description currentDescription = null;
for (Description description : rules) {
if (Grammar.isTokenName(description.getName())) {
continue;
}
if (!_dataObject.getPrimaryFile().equals(description.getFileObject())) {
continue;
}
SnapshotPositionRegion namePosition = new SnapshotPositionRegion(_snapshot, description.getOffset(), description.getName().length());
if (caretPosition.compareTo(namePosition.getStart()) >= 0 && caretPosition.compareTo(namePosition.getEnd()) <= 0) {
currentDescription = description;
break;
}
}
_description = currentDescription;
} else {
_snapshot = null;
_description = null;
}
} else {
_snapshot = null;
_description = null;
}
}