当前位置: 首页>>代码示例>>Python>>正文


Python sql.TokenList方法代码示例

本文整理汇总了Python中sqlparse.sql.TokenList方法的典型用法代码示例。如果您正苦于以下问题:Python sql.TokenList方法的具体用法?Python sql.TokenList怎么用?Python sql.TokenList使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sqlparse.sql的用法示例。


在下文中一共展示了sql.TokenList方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _extract_limit_from_query

# 需要导入模块: from sqlparse import sql [as 别名]
# 或者: from sqlparse.sql import TokenList [as 别名]
def _extract_limit_from_query(statement: TokenList) -> Optional[int]:
    """
    Extract limit clause from SQL statement.

    :param statement: SQL statement
    :return: Limit extracted from query, None if no limit present in statement
    """
    idx, _ = statement.token_next_by(m=(Keyword, "LIMIT"))
    if idx is not None:
        _, token = statement.token_next(idx=idx)
        if token:
            if isinstance(token, IdentifierList):
                # In case of "LIMIT <offset>, <limit>", find comma and extract
                # first succeeding non-whitespace token
                idx, _ = token.token_next_by(m=(sqlparse.tokens.Punctuation, ","))
                _, token = token.token_next(idx=idx)
            if token and token.ttype == sqlparse.tokens.Literal.Number.Integer:
                return int(token.value)
    return None 
开发者ID:apache,项目名称:incubator-superset,代码行数:21,代码来源:sql_parse.py

示例2: _process_tokenlist

# 需要导入模块: from sqlparse import sql [as 别名]
# 或者: from sqlparse.sql import TokenList [as 别名]
def _process_tokenlist(self, token_list: TokenList) -> None:
        """
        Add table names to table set

        :param token_list: TokenList to be processed
        """
        # exclude subselects
        if "(" not in str(token_list):
            table = self._get_table(token_list)
            if table and not table.table.startswith(CTE_PREFIX):
                self._tables.add(table)
            return

        # store aliases
        if token_list.has_alias():
            self._alias_names.add(token_list.get_alias())

        # some aliases are not parsed properly
        if token_list.tokens[0].ttype == Name:
            self._alias_names.add(token_list.tokens[0].value)
        self._extract_from_token(token_list) 
开发者ID:apache,项目名称:incubator-superset,代码行数:23,代码来源:sql_parse.py

示例3: get_query_tokens

# 需要导入模块: from sqlparse import sql [as 别名]
# 或者: from sqlparse.sql import TokenList [as 别名]
def get_query_tokens(query: str) -> List[sqlparse.sql.Token]:
    """
    :type query str
    :rtype: list[sqlparse.sql.Token]
    """
    query = preprocess_query(query)
    parsed = sqlparse.parse(query)

    # handle empty queries (#12)
    if not parsed:
        return []

    tokens = TokenList(parsed[0].tokens).flatten()
    # print([(token.value, token.ttype) for token in tokens])

    return [token for token in tokens if token.ttype is not Whitespace] 
开发者ID:macbre,项目名称:sql-metadata,代码行数:18,代码来源:sql_metadata.py

示例4: align_comments

# 需要导入模块: from sqlparse import sql [as 别名]
# 或者: from sqlparse.sql import TokenList [as 别名]
def align_comments(tlist):
    tidx, token = tlist.token_next_by(i=sql.Comment)
    while token:
        pidx, prev_ = tlist.token_prev(tidx)
        if isinstance(prev_, sql.TokenList):
            tlist.group_tokens(sql.TokenList, pidx, tidx, extend=True)
            tidx = pidx
        tidx, token = tlist.token_next_by(i=sql.Comment, idx=tidx) 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:10,代码来源:grouping.py

示例5: _process_statement

# 需要导入模块: from sqlparse import sql [as 别名]
# 或者: from sqlparse.sql import TokenList [as 别名]
def _process_statement(self, tlist):
        if tlist.tokens[0].is_whitespace and self.indent == 0:
            tlist.tokens.pop(0)

        # process the main query body
        self._process(sql.TokenList(tlist.tokens)) 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:8,代码来源:aligned_indent.py

示例6: align_comments

# 需要导入模块: from sqlparse import sql [as 别名]
# 或者: from sqlparse.sql import TokenList [as 别名]
def align_comments(tlist):
    [align_comments(sgroup) for sgroup in tlist.get_sublists()]
    idx = 0
    token = tlist.token_next_by_instance(idx, sql.Comment)
    while token:
        before = tlist.token_prev(tlist.token_index(token))
        if isinstance(before, sql.TokenList):
            grp = tlist.tokens_between(before, token)[1:]
            before.tokens.extend(grp)
            for t in grp:
                tlist.tokens.remove(t)
            idx = tlist.token_index(before) + 1
        else:
            idx = tlist.token_index(token) + 1
        token = tlist.token_next_by_instance(idx, sql.Comment) 
开发者ID:sriniiyer,项目名称:codenn,代码行数:17,代码来源:grouping.py

示例7: test_token_first

# 需要导入模块: from sqlparse import sql [as 别名]
# 或者: from sqlparse.sql import TokenList [as 别名]
def test_token_first(self):
        p = sqlparse.parse(' select foo')[0]
        first = p.token_first()
        self.assertEqual(first.value, 'select')
        self.assertEqual(p.token_first(ignore_whitespace=False).value, ' ')
        self.assertEqual(sql.TokenList([]).token_first(), None) 
开发者ID:sriniiyer,项目名称:codenn,代码行数:8,代码来源:test_tokenize.py

示例8: test_token_matching

# 需要导入模块: from sqlparse import sql [as 别名]
# 或者: from sqlparse.sql import TokenList [as 别名]
def test_token_matching(self):
        t1 = sql.Token(Keyword, 'foo')
        t2 = sql.Token(Punctuation, ',')
        x = sql.TokenList([t1, t2])
        self.assertEqual(x.token_matching(0, [lambda t: t.ttype is Keyword]),
                         t1)
        self.assertEqual(x.token_matching(0,
                                          [lambda t: t.ttype is Punctuation]),
                         t2)
        self.assertEqual(x.token_matching(1, [lambda t: t.ttype is Keyword]),
                         None) 
开发者ID:sriniiyer,项目名称:codenn,代码行数:13,代码来源:test_tokenize.py

示例9: get_roots

# 需要导入模块: from sqlparse import sql [as 别名]
# 或者: from sqlparse.sql import TokenList [as 别名]
def get_roots(parent, token):
    """
        ルートTokenリスト
    """
    for tkn in parent.tokens:
        if tkn == token:
            return [token, parent]
        if isinstance(tkn, sql.TokenList):
            ret = get_roots(tkn, token)
            if ret:
                ret.append(parent)
                return ret
    return [] 
开发者ID:future-architect,项目名称:uroboroSQL-formatter,代码行数:15,代码来源:tokenutils.py

示例10: get_parent

# 需要导入模块: from sqlparse import sql [as 别名]
# 或者: from sqlparse.sql import TokenList [as 别名]
def get_parent(top_parent, token):
    """
        ルートを指定した親Token取得
    """
    for tkn in top_parent.tokens:
        tkn.parent = top_parent
        if tkn == token:
            return top_parent
        if isinstance(tkn, sql.TokenList):
            ret = get_parent(tkn, token)
            if ret:
                return ret
    return None 
开发者ID:future-architect,项目名称:uroboroSQL-formatter,代码行数:15,代码来源:tokenutils.py

示例11: flatten

# 需要导入模块: from sqlparse import sql [as 别名]
# 或者: from sqlparse.sql import TokenList [as 别名]
def flatten(token):
    """
        フラット化したgenerator
        ※処理中にparentを再設定する。sql.TokenList#flattenとはここが違う
    """
    if isinstance(token, sql.TokenList):
        for tkn in token.tokens:
            tkn.parent = token
            if isinstance(tkn, sql.TokenList):
                for item in flatten(tkn):
                    yield item
            else:
                yield tkn
    else:
        yield token 
开发者ID:future-architect,项目名称:uroboroSQL-formatter,代码行数:17,代码来源:tokenutils.py

示例12: process

# 需要导入模块: from sqlparse import sql [as 别名]
# 或者: from sqlparse.sql import TokenList [as 别名]
def process(self, _, stmt):
        def custom_flaten(token):
            """
                コメントはflatenしないflaten
            """
            if isinstance(token, sql.TokenList) and not tu.is_comment(token):
                for tkn in token.tokens:
                    for item in custom_flaten(tkn):
                        yield item
            else:
                yield token
        is_prev_cr = True
        for token in custom_flaten(stmt):
            if tu.is_plain_line_comment(token, self.local_config.comment_syntax):
                # コメントクラス置き換え
                parent = token.parent
                index = parent.tokens.index(token)
                comment = LineDescriptionLineCommentFilter.Comment(token.tokens)
                for tkn in token.tokens:
                    tkn.parent = comment
                comment.parent = parent
                parent.tokens[index] = comment
                # フラグセット
                comment.is_line_description = not is_prev_cr # pylint: disable=attribute-defined-outside-init
            elif token.is_whitespace():
                if is_inc_cr(token):
                    is_prev_cr = True
            else:
                is_prev_cr = False 
开发者ID:future-architect,项目名称:uroboroSQL-formatter,代码行数:31,代码来源:filters.py

示例13: _get_table

# 需要导入模块: from sqlparse import sql [as 别名]
# 或者: from sqlparse.sql import TokenList [as 别名]
def _get_table(tlist: TokenList) -> Optional[Table]:
        """
        Return the table if valid, i.e., conforms to the [[catalog.]schema.]table
        construct.

        :param tlist: The SQL tokens
        :returns: The table if the name conforms
        """

        # Strip the alias if present.
        idx = len(tlist.tokens)

        if tlist.has_alias():
            ws_idx, _ = tlist.token_next_by(t=Whitespace)

            if ws_idx != -1:
                idx = ws_idx

        tokens = tlist.tokens[:idx]

        if (
            len(tokens) in (1, 3, 5)
            and all(imt(token, t=[Name, String]) for token in tokens[::2])
            and all(imt(token, m=(Punctuation, ".")) for token in tokens[1::2])
        ):
            return Table(*[remove_quotes(token.value) for token in tokens[::-2]])

        return None 
开发者ID:apache,项目名称:incubator-superset,代码行数:30,代码来源:sql_parse.py

示例14: _extract_from_token

# 需要导入模块: from sqlparse import sql [as 别名]
# 或者: from sqlparse.sql import TokenList [as 别名]
def _extract_from_token(  # pylint: disable=too-many-branches
        self, token: Token
    ) -> None:
        """
        <Identifier> store a list of subtokens and <IdentifierList> store lists of
        subtoken list.

        It extracts <IdentifierList> and <Identifier> from :param token: and loops
        through all subtokens recursively. It finds table_name_preceding_token and
        passes <IdentifierList> and <Identifier> to self._process_tokenlist to populate
        self._tables.

        :param token: instance of Token or child class, e.g. TokenList, to be processed
        """
        if not hasattr(token, "tokens"):
            return

        table_name_preceding_token = False

        for item in token.tokens:
            if item.is_group and not self._is_identifier(item):
                self._extract_from_token(item)

            if item.ttype in Keyword and (
                item.normalized in PRECEDES_TABLE_NAME
                or item.normalized.endswith(" JOIN")
            ):
                table_name_preceding_token = True
                continue

            if item.ttype in Keyword:
                table_name_preceding_token = False
                continue

            if table_name_preceding_token:
                if isinstance(item, Identifier):
                    self._process_tokenlist(item)
                elif isinstance(item, IdentifierList):
                    for token2 in item.get_identifiers():
                        if isinstance(token2, TokenList):
                            self._process_tokenlist(token2)
            elif isinstance(item, IdentifierList):
                if any(not self._is_identifier(token2) for token2 in item.tokens):
                    self._extract_from_token(item) 
开发者ID:apache,项目名称:incubator-superset,代码行数:46,代码来源:sql_parse.py


注:本文中的sqlparse.sql.TokenList方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。