当前位置: 首页>>代码示例>>Python>>正文


Python tokens.Name方法代码示例

本文整理汇总了Python中sqlparse.tokens.Name方法的典型用法代码示例。如果您正苦于以下问题:Python tokens.Name方法的具体用法?Python tokens.Name怎么用?Python tokens.Name使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sqlparse.tokens的用法示例。


在下文中一共展示了tokens.Name方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _get_primary_key

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Name [as 别名]
def _get_primary_key(self, def_tokens):
        EXPECT_PRIMARY = 0
        EXPECT_KEY = 1
        EXPECT_COLUMN = 2
        state = EXPECT_PRIMARY
        for token in def_tokens:
            if state == EXPECT_PRIMARY and token.match(T.Keyword, 'PRIMARY'):
                state = EXPECT_KEY
            elif state == EXPECT_KEY and token.value.upper() == 'KEY':
                state = EXPECT_COLUMN
            elif state == EXPECT_COLUMN and isinstance(token, sql.Parenthesis):
                return [
                    self._clean_identifier_quotes(t.value)
                    for t in token.tokens[1:-1]
                    if t.ttype in (T.Name, T.Literal.String.Symbol)
                ]
        return [] 
开发者ID:Yelp,项目名称:schematizer,代码行数:19,代码来源:mysql_handler.py

示例2: group_period

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Name [as 别名]
def group_period(tlist):
    def match(token):
        return token.match(T.Punctuation, '.')

    def valid_prev(token):
        sqlcls = sql.SquareBrackets, sql.Identifier
        ttypes = T.Name, T.String.Symbol
        return imt(token, i=sqlcls, t=ttypes)

    def valid_next(token):
        # issue261, allow invalid next token
        return True

    def post(tlist, pidx, tidx, nidx):
        # next_ validation is being performed here. issue261
        sqlcls = sql.SquareBrackets, sql.Function
        ttypes = T.Name, T.String.Symbol, T.Wildcard
        next_ = tlist[nidx] if nidx is not None else None
        valid_next = imt(next_, i=sqlcls, t=ttypes)

        return (pidx, nidx) if valid_next else (pidx, tidx)

    _group(tlist, sql.Identifier, match, valid_prev, valid_next, post) 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:25,代码来源:grouping.py

示例3: group_arrays

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Name [as 别名]
def group_arrays(tlist):
    sqlcls = sql.SquareBrackets, sql.Identifier, sql.Function
    ttypes = T.Name, T.String.Symbol

    def match(token):
        return isinstance(token, sql.SquareBrackets)

    def valid_prev(token):
        return imt(token, i=sqlcls, t=ttypes)

    def valid_next(token):
        return True

    def post(tlist, pidx, tidx, nidx):
        return pidx, tidx

    _group(tlist, sql.Identifier, match,
           valid_prev, valid_next, post, extend=True, recurse=False) 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:20,代码来源:grouping.py

示例4: group_functions

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Name [as 别名]
def group_functions(tlist):
    has_create = False
    has_table = False
    for tmp_token in tlist.tokens:
        if tmp_token.value == 'CREATE':
            has_create = True
        if tmp_token.value == 'TABLE':
            has_table = True
    if has_create and has_table:
        return

    tidx, token = tlist.token_next_by(t=T.Name)
    while token:
        nidx, next_ = tlist.token_next(tidx)
        if isinstance(next_, sql.Parenthesis):
            tlist.group_tokens(sql.Function, tidx, nidx)
        tidx, token = tlist.token_next_by(t=T.Name, idx=tidx) 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:19,代码来源:grouping.py

示例5: _get_first_name

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Name [as 别名]
def _get_first_name(self, idx=None, reverse=False, keywords=False):
        """Returns the name of the first token with a name"""

        if idx and not isinstance(idx, int):
            idx = self.token_index(idx) + 1

        tokens = self.tokens[idx:] if idx else self.tokens
        tokens = reversed(tokens) if reverse else tokens
        types = [T.Name, T.Wildcard, T.String.Symbol]

        if keywords:
            types.append(T.Keyword)

        for tok in tokens:
            if tok.ttype in types:
                return self._remove_quotes(tok.value)
            elif isinstance(tok, Identifier) or isinstance(tok, Function):
                return tok.get_name()
        return None 
开发者ID:sriniiyer,项目名称:codenn,代码行数:21,代码来源:sql.py

示例6: test_placeholder

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Name [as 别名]
def test_placeholder(self):
        def _get_tokens(sql):
            return sqlparse.parse(sql)[0].tokens[-1].tokens
        t = _get_tokens('select * from foo where user = ?')
        self.assert_(t[-1].ttype is sqlparse.tokens.Name.Placeholder)
        self.assertEqual(t[-1].value, '?')
        t = _get_tokens('select * from foo where user = :1')
        self.assert_(t[-1].ttype is sqlparse.tokens.Name.Placeholder)
        self.assertEqual(t[-1].value, ':1')
        t = _get_tokens('select * from foo where user = :name')
        self.assert_(t[-1].ttype is sqlparse.tokens.Name.Placeholder)
        self.assertEqual(t[-1].value, ':name')
        t = _get_tokens('select * from foo where user = %s')
        self.assert_(t[-1].ttype is sqlparse.tokens.Name.Placeholder)
        self.assertEqual(t[-1].value, '%s')
        t = _get_tokens('select * from foo where user = $a')
        self.assert_(t[-1].ttype is sqlparse.tokens.Name.Placeholder)
        self.assertEqual(t[-1].value, '$a') 
开发者ID:sriniiyer,项目名称:codenn,代码行数:20,代码来源:test_parse.py

示例7: pop

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Name [as 别名]
def pop(self):
        next_val = self.peek()
        self.index += 1
        # We need to handle three cases here where the next_val could be:
        # 1. <table_name> ('business')
        # 2. <database_name>.<table_name> ('yelp.business')
        # 3. <database_name>.<table_name> <extended_query>
        # ('yelp.business change col_one col_two')
        # In all the cases we should return a token consisting of only the table
        # name or if the database name is present then the database name and the
        # table name. Case #3 occurs because SQLParse incorrectly parses certain
        # queries.
        if isinstance(next_val, Identifier):
            tokens = next_val.tokens
            if len(tokens) > 1 and tokens[1].value == '.':
                str_token = "{db_name}{punctuation}{table_name}".format(
                    db_name=tokens[0].value,
                    punctuation=tokens[1].value,
                    table_name=tokens[2].value
                )
                return TK(Token.Name, str_token)
            else:
                return next_val.token_first()
        return next_val 
开发者ID:Yelp,项目名称:mysql_streamer,代码行数:26,代码来源:sql_handler.py

示例8: _get_alias

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Name [as 别名]
def _get_alias(self, token):
        tkw = token.token_next_match(0, T.Keyword, 'AS')
        if tkw is not None:
            return tu.token_next_enable(token, tkw)

        left = tu.token_next_enable(token)
        if not left:
            return None

        def is_space(tkn):
            return tkn.is_whitespace() and tkn.value

        spl = token.token_matching(token.token_index(left), [is_space])
        if spl:
            return tu.token_next_enable(token, spl)

        if tu.is_parenthesis(left):
            tkn = tu.token_next_enable(token, left)
            if tkn and (tu.is_identifier(tkn) or (tkn.ttype in T.Name)):
                # (・・・)ALIAS の場合
                space = sql.Token(T.Whitespace, "\t") # スペースを付与
                token.insert_after(left, space)
                return tkn

        return None 
开发者ID:future-architect,项目名称:uroboroSQL-formatter,代码行数:27,代码来源:filters.py

示例9: _process_tokenlist

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Name [as 别名]
def _process_tokenlist(self, token_list: TokenList) -> None:
        """
        Add table names to table set

        :param token_list: TokenList to be processed
        """
        # exclude subselects
        if "(" not in str(token_list):
            table = self._get_table(token_list)
            if table and not table.table.startswith(CTE_PREFIX):
                self._tables.add(table)
            return

        # store aliases
        if token_list.has_alias():
            self._alias_names.add(token_list.get_alias())

        # some aliases are not parsed properly
        if token_list.tokens[0].ttype == Name:
            self._alias_names.add(token_list.tokens[0].value)
        self._extract_from_token(token_list) 
开发者ID:apache,项目名称:incubator-superset,代码行数:23,代码来源:sql_parse.py

示例10: update_names

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Name [as 别名]
def update_names(self, text):
        for parsed in sqlparse.parse(text):
            for token in parsed.flatten():
                if token.ttype in Name:
                    self.name_counts[token.value] += 1 
开发者ID:dbcli,项目名称:pgcli,代码行数:7,代码来源:prioritization.py

示例11: _get_char_set_value

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Name [as 别名]
def _get_char_set_value(self, attributes):
        """Currently the `character set` is not grouped as Attribute, so it
        is processed separately"""
        token = attributes.token_next_match(0, T.Name.Builtin, 'CHARACTER')
        if not token:
            return None

        index = attributes.token_index(token)
        token = attributes.token_next(index)
        if not token or token.value != u'SET':
            return None

        index = attributes.token_index(token)
        token = attributes.token_next(index)
        return token.value if token.ttype == T.Name else None 
开发者ID:Yelp,项目名称:schematizer,代码行数:17,代码来源:mysql_handler.py

示例12: group_identifier

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Name [as 别名]
def group_identifier(tlist):
    ttypes = (T.String.Symbol, T.Name)

    tidx, token = tlist.token_next_by(t=ttypes)
    while token:
        tlist.group_tokens(sql.Identifier, tidx, tidx)
        tidx, token = tlist.token_next_by(t=ttypes, idx=tidx) 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:9,代码来源:grouping.py

示例13: _get_first_name

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Name [as 别名]
def _get_first_name(self, idx=None, reverse=False, keywords=False):
        """Returns the name of the first token with a name"""

        tokens = self.tokens[idx:] if idx else self.tokens
        tokens = reversed(tokens) if reverse else tokens
        types = [T.Name, T.Wildcard, T.String.Symbol]

        if keywords:
            types.append(T.Keyword)

        for token in tokens:
            if token.ttype in types:
                return remove_quotes(token.value)
            elif isinstance(token, (Identifier, Function)):
                return token.get_name() 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:17,代码来源:sql.py

示例14: is_keyword

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Name [as 别名]
def is_keyword(value):
    val = value.upper()
    return (KEYWORDS_COMMON.get(val) or
            KEYWORDS_ORACLE.get(val) or
            KEYWORDS.get(val, tokens.Name)), value 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:7,代码来源:keywords.py

示例15: _process

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Name [as 别名]
def _process(self, stream, varname, has_nl):
        # SQL query asignation to varname
        if self.count > 1:
            yield sql.Token(T.Whitespace, '\n')
        yield sql.Token(T.Name, varname)
        yield sql.Token(T.Whitespace, ' ')
        yield sql.Token(T.Operator, '=')
        yield sql.Token(T.Whitespace, ' ')
        if has_nl:
            yield sql.Token(T.Operator, '(')
        yield sql.Token(T.Text, "'")

        # Print the tokens on the quote
        for token in stream:
            # Token is a new line separator
            if token.is_whitespace() and '\n' in token.value:
                # Close quote and add a new line
                yield sql.Token(T.Text, " '")
                yield sql.Token(T.Whitespace, '\n')

                # Quote header on secondary lines
                yield sql.Token(T.Whitespace, ' ' * (len(varname) + 4))
                yield sql.Token(T.Text, "'")

                # Indentation
                after_lb = token.value.split('\n', 1)[1]
                if after_lb:
                    yield sql.Token(T.Whitespace, after_lb)
                continue

            # Token has escape chars
            elif "'" in token.value:
                token.value = token.value.replace("'", "\\'")

            # Put the token
            yield sql.Token(T.Text, token.value)

        # Close quote
        yield sql.Token(T.Text, "'")
        if has_nl:
            yield sql.Token(T.Operator, ')') 
开发者ID:sriniiyer,项目名称:codenn,代码行数:43,代码来源:filters.py


注:本文中的sqlparse.tokens.Name方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。