本文整理匯總了Java中org.antlr.v4.runtime.Token.INVALID_TYPE屬性的典型用法代碼示例。如果您正苦於以下問題:Java Token.INVALID_TYPE屬性的具體用法?Java Token.INVALID_TYPE怎麽用?Java Token.INVALID_TYPE使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類org.antlr.v4.runtime.Token
的用法示例。
在下文中一共展示了Token.INVALID_TYPE屬性的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: importTokensFromTokensFile
/**
* We want to touch as little ANTR code as possible. We overload this
* function to pretend the existence of the token vocab parser
*/
@Override
public void importTokensFromTokensFile() {
if (!tokenVocab.isEmpty()) {
MemoryTokenVocabParser vparser = new MemoryTokenVocabParser(this, tokenVocab);
Map<String, Integer> tokens = vparser.load();
int ret;
for (String t : tokens.keySet()) {
if (t.charAt(0) == '\'') {
ret = defineStringLiteral(t, tokens.get(t));
if (ret == Token.INVALID_TYPE)
throw new IllegalArgumentException("Token must not be INVAlID_TYPE");
} else {
ret = defineTokenName(t, tokens.get(t));
if (ret == Token.INVALID_TYPE)
throw new IllegalArgumentException("Token must not be INVAlID_TYPE");
}
LOGGER.debug("token {} {}", t, tokens.get(t));
}
}
}
示例2: wipeCharPositionInfoAndWhitespaceTokens
public static void wipeCharPositionInfoAndWhitespaceTokens(CodeBuffTokenStream tokens) {
tokens.fill();
CommonToken dummy = new CommonToken(Token.INVALID_TYPE, "");
dummy.setChannel(Token.HIDDEN_CHANNEL);
Token firstRealToken = tokens.getNextRealToken(-1);
for (int i = 0; i<tokens.size(); i++) {
if ( i==firstRealToken.getTokenIndex() ) continue; // don't wack first token
CommonToken t = (CommonToken)tokens.get(i);
if ( t.getText().matches("\\s+") ) {
tokens.getTokens().set(i, dummy); // wack whitespace token so we can't use it during prediction
}
else {
t.setLine(0);
t.setCharPositionInLine(-1);
}
}
}
示例3: emit
@Override
public Token emit()
{
if ( _type == ANTLRv4Lexer.ID )
{
String firstChar = _input.getText( Interval.of( _tokenStartCharIndex, _tokenStartCharIndex ) );
if ( Character.isUpperCase( firstChar.charAt( 0 ) ) )
{
_type = ANTLRv4Lexer.TOKEN_REF;
}
else
{
_type = ANTLRv4Lexer.RULE_REF;
}
if ( _currentRuleType == Token.INVALID_TYPE )
{ // if outside of rule def
_currentRuleType = _type; // set to inside lexer or parser rule
}
}
else if ( _type == ANTLRv4Lexer.SEMI )
{ // exit rule def
_currentRuleType = Token.INVALID_TYPE;
}
return super.emit();
}
示例4: getTokenType
public int getTokenType(String token) {
Integer I;
if ( token.charAt(0)=='\'') {
I = stringLiteralToTypeMap.get(token);
}
else { // must be a label like ID
I = tokenNameToTypeMap.get(token);
}
int i = (I!=null)? I : Token.INVALID_TYPE;
//tool.log("grammar", "grammar type "+type+" "+tokenName+"->"+i);
return i;
}
示例5: getTokenDisplayName
/** Given a token type, get a meaningful name for it such as the ID
* or string literal. If this is a lexer and the ttype is in the
* char vocabulary, compute an ANTLR-valid (possibly escaped) char literal.
*/
public String getTokenDisplayName(int ttype) {
// inside any target's char range and is lexer grammar?
if ( isLexer() &&
ttype >= Lexer.MIN_CHAR_VALUE && ttype <= Lexer.MAX_CHAR_VALUE )
{
return CharSupport.getANTLRCharLiteralForChar(ttype);
}
if ( ttype==Token.EOF ) {
return "EOF";
}
if ( ttype==Token.INVALID_TYPE ) {
return INVALID_TOKEN_NAME;
}
if (ttype >= 0 && ttype < typeToStringLiteralList.size() && typeToStringLiteralList.get(ttype) != null) {
return typeToStringLiteralList.get(ttype);
}
if (ttype >= 0 && ttype < typeToTokenList.size() && typeToTokenList.get(ttype) != null) {
return typeToTokenList.get(ttype);
}
return String.valueOf(ttype);
}
示例6: defineStringLiteral
public int defineStringLiteral(String lit, int ttype) {
if ( !stringLiteralToTypeMap.containsKey(lit) ) {
stringLiteralToTypeMap.put(lit, ttype);
// track in reverse index too
if ( ttype>=typeToStringLiteralList.size() ) {
Utils.setSize(typeToStringLiteralList, ttype+1);
}
typeToStringLiteralList.set(ttype, lit);
setTokenForType(ttype, lit);
return ttype;
}
return Token.INVALID_TYPE;
}
示例7: getElementName
public String getElementName(String name) {
if (".".equals(name)) {
return "_wild";
}
if ( getCodeGenerator().g.getRule(name)!=null ) return name;
int ttype = getCodeGenerator().g.getTokenType(name);
if ( ttype==Token.INVALID_TYPE ) return name;
return getTokenTypeAsTargetLabel(getCodeGenerator().g, ttype);
}
示例8: assignTokenTypes
void assignTokenTypes(Grammar g, List<GrammarAST> tokensDefs,
List<GrammarAST> tokenIDs, List<GrammarAST> terminals)
{
//Grammar G = g.getOutermostGrammar(); // put in root, even if imported
// create token types for tokens { A, B, C } ALIASES
for (GrammarAST alias : tokensDefs) {
if (g.getTokenType(alias.getText()) != Token.INVALID_TYPE) {
g.tool.errMgr.grammarError(ErrorType.TOKEN_NAME_REASSIGNMENT, g.fileName, alias.token, alias.getText());
}
g.defineTokenName(alias.getText());
}
// DEFINE TOKEN TYPES FOR TOKEN REFS LIKE ID, INT
for (GrammarAST idAST : tokenIDs) {
if (g.getTokenType(idAST.getText()) == Token.INVALID_TYPE) {
g.tool.errMgr.grammarError(ErrorType.IMPLICIT_TOKEN_DEFINITION, g.fileName, idAST.token, idAST.getText());
}
g.defineTokenName(idAST.getText());
}
// VERIFY TOKEN TYPES FOR STRING LITERAL REFS LIKE 'while', ';'
for (GrammarAST termAST : terminals) {
if (termAST.getType() != ANTLRParser.STRING_LITERAL) {
continue;
}
if (g.getTokenType(termAST.getText()) == Token.INVALID_TYPE) {
g.tool.errMgr.grammarError(ErrorType.IMPLICIT_STRING_DEFINITION, g.fileName, termAST.token, termAST.getText());
}
}
g.tool.log("semantics", "tokens="+g.tokenNameToTypeMap);
g.tool.log("semantics", "strings="+g.stringLiteralToTypeMap);
}
示例9: assignChannelTypes
/**
* Assign constant values to custom channels defined in a grammar.
*
* @param g The grammar.
* @param channelDefs A collection of AST nodes defining individual channels
* within a {@code channels{}} block in the grammar.
*/
void assignChannelTypes(Grammar g, List<GrammarAST> channelDefs) {
Grammar outermost = g.getOutermostGrammar();
for (GrammarAST channel : channelDefs) {
String channelName = channel.getText();
// Channel names can't alias tokens or modes, because constant
// values are also assigned to them and the ->channel(NAME) lexer
// command does not distinguish between the various ways a constant
// can be declared. This method does not verify that channels do not
// alias rules, because rule names are not associated with constant
// values in ANTLR grammar semantics.
if (g.getTokenType(channelName) != Token.INVALID_TYPE) {
g.tool.errMgr.grammarError(ErrorType.CHANNEL_CONFLICTS_WITH_TOKEN, g.fileName, channel.token, channelName);
}
if (LexerATNFactory.COMMON_CONSTANTS.containsKey(channelName)) {
g.tool.errMgr.grammarError(ErrorType.CHANNEL_CONFLICTS_WITH_COMMON_CONSTANTS, g.fileName, channel.token, channelName);
}
if (outermost instanceof LexerGrammar) {
LexerGrammar lexerGrammar = (LexerGrammar)outermost;
if (lexerGrammar.modes.containsKey(channelName)) {
g.tool.errMgr.grammarError(ErrorType.CHANNEL_CONFLICTS_WITH_MODE, g.fileName, channel.token, channelName);
}
}
outermost.defineChannelName(channel.getText());
}
}
示例10: nextToken
@Override
public Token nextToken()
{
if (_input == null) {
throw new IllegalStateException("nextToken requires a non-null input stream.");
}
// Mark start location in char stream so unbuffered streams are
// guaranteed at least have text of current token
int tokenStartMarker = _input.mark();
try {
outer:
while (true) {
if (_hitEOF) {
emitEOF();
return _token;
}
_token = null;
_channel = Token.DEFAULT_CHANNEL;
_tokenStartCharIndex = _input.index();
_tokenStartCharPositionInLine = getInterpreter().getCharPositionInLine();
_tokenStartLine = getInterpreter().getLine();
_text = null;
do {
_type = Token.INVALID_TYPE;
int ttype = -1;
// This entire method is copied from org.antlr.v4.runtime.Lexer, with the following bit
// added to match the delimiters before we attempt to match the token
boolean found = false;
for (String terminator : delimiters) {
if (match(terminator)) {
ttype = SqlBaseParser.DELIMITER;
found = true;
break;
}
}
if (!found) {
try {
ttype = getInterpreter().match(_input, _mode);
}
catch (LexerNoViableAltException e) {
notifyListeners(e); // report error
recover(e);
ttype = SKIP;
}
}
if (_input.LA(1) == IntStream.EOF) {
_hitEOF = true;
}
if (_type == Token.INVALID_TYPE) {
_type = ttype;
}
if (_type == SKIP) {
continue outer;
}
}
while (_type == MORE);
if (_token == null) {
emit();
}
return _token;
}
}
finally {
// make sure we release marker after match or
// unbuffered char stream will keep buffering
_input.release(tokenStartMarker);
}
}