當前位置: 首頁>>代碼示例>>Python>>正文


Python sql.TokenList方法代碼示例

本文整理匯總了Python中sqlparse.sql.TokenList方法的典型用法代碼示例。如果您正苦於以下問題:Python sql.TokenList方法的具體用法?Python sql.TokenList怎麽用?Python sql.TokenList使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sqlparse.sql的用法示例。


在下文中一共展示了sql.TokenList方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _extract_limit_from_query

# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import TokenList [as 別名]
def _extract_limit_from_query(statement: TokenList) -> Optional[int]:
    """
    Extract limit clause from SQL statement.

    :param statement: SQL statement
    :return: Limit extracted from query, None if no limit present in statement
    """
    idx, _ = statement.token_next_by(m=(Keyword, "LIMIT"))
    if idx is not None:
        _, token = statement.token_next(idx=idx)
        if token:
            if isinstance(token, IdentifierList):
                # In case of "LIMIT <offset>, <limit>", find comma and extract
                # first succeeding non-whitespace token
                idx, _ = token.token_next_by(m=(sqlparse.tokens.Punctuation, ","))
                _, token = token.token_next(idx=idx)
            if token and token.ttype == sqlparse.tokens.Literal.Number.Integer:
                return int(token.value)
    return None 
開發者ID:apache,項目名稱:incubator-superset,代碼行數:21,代碼來源:sql_parse.py

示例2: _process_tokenlist

# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import TokenList [as 別名]
def _process_tokenlist(self, token_list: TokenList) -> None:
        """
        Add table names to table set

        :param token_list: TokenList to be processed
        """
        # exclude subselects
        if "(" not in str(token_list):
            table = self._get_table(token_list)
            if table and not table.table.startswith(CTE_PREFIX):
                self._tables.add(table)
            return

        # store aliases
        if token_list.has_alias():
            self._alias_names.add(token_list.get_alias())

        # some aliases are not parsed properly
        if token_list.tokens[0].ttype == Name:
            self._alias_names.add(token_list.tokens[0].value)
        self._extract_from_token(token_list) 
開發者ID:apache,項目名稱:incubator-superset,代碼行數:23,代碼來源:sql_parse.py

示例3: get_query_tokens

# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import TokenList [as 別名]
def get_query_tokens(query: str) -> List[sqlparse.sql.Token]:
    """
    :type query str
    :rtype: list[sqlparse.sql.Token]
    """
    query = preprocess_query(query)
    parsed = sqlparse.parse(query)

    # handle empty queries (#12)
    if not parsed:
        return []

    tokens = TokenList(parsed[0].tokens).flatten()
    # print([(token.value, token.ttype) for token in tokens])

    return [token for token in tokens if token.ttype is not Whitespace] 
開發者ID:macbre,項目名稱:sql-metadata,代碼行數:18,代碼來源:sql_metadata.py

示例4: align_comments

# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import TokenList [as 別名]
def align_comments(tlist):
    tidx, token = tlist.token_next_by(i=sql.Comment)
    while token:
        pidx, prev_ = tlist.token_prev(tidx)
        if isinstance(prev_, sql.TokenList):
            tlist.group_tokens(sql.TokenList, pidx, tidx, extend=True)
            tidx = pidx
        tidx, token = tlist.token_next_by(i=sql.Comment, idx=tidx) 
開發者ID:mtxr,項目名稱:SublimeText-SQLTools,代碼行數:10,代碼來源:grouping.py

示例5: _process_statement

# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import TokenList [as 別名]
def _process_statement(self, tlist):
        if tlist.tokens[0].is_whitespace and self.indent == 0:
            tlist.tokens.pop(0)

        # process the main query body
        self._process(sql.TokenList(tlist.tokens)) 
開發者ID:mtxr,項目名稱:SublimeText-SQLTools,代碼行數:8,代碼來源:aligned_indent.py

示例6: align_comments

# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import TokenList [as 別名]
def align_comments(tlist):
    [align_comments(sgroup) for sgroup in tlist.get_sublists()]
    idx = 0
    token = tlist.token_next_by_instance(idx, sql.Comment)
    while token:
        before = tlist.token_prev(tlist.token_index(token))
        if isinstance(before, sql.TokenList):
            grp = tlist.tokens_between(before, token)[1:]
            before.tokens.extend(grp)
            for t in grp:
                tlist.tokens.remove(t)
            idx = tlist.token_index(before) + 1
        else:
            idx = tlist.token_index(token) + 1
        token = tlist.token_next_by_instance(idx, sql.Comment) 
開發者ID:sriniiyer,項目名稱:codenn,代碼行數:17,代碼來源:grouping.py

示例7: test_token_first

# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import TokenList [as 別名]
def test_token_first(self):
        p = sqlparse.parse(' select foo')[0]
        first = p.token_first()
        self.assertEqual(first.value, 'select')
        self.assertEqual(p.token_first(ignore_whitespace=False).value, ' ')
        self.assertEqual(sql.TokenList([]).token_first(), None) 
開發者ID:sriniiyer,項目名稱:codenn,代碼行數:8,代碼來源:test_tokenize.py

示例8: test_token_matching

# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import TokenList [as 別名]
def test_token_matching(self):
        t1 = sql.Token(Keyword, 'foo')
        t2 = sql.Token(Punctuation, ',')
        x = sql.TokenList([t1, t2])
        self.assertEqual(x.token_matching(0, [lambda t: t.ttype is Keyword]),
                         t1)
        self.assertEqual(x.token_matching(0,
                                          [lambda t: t.ttype is Punctuation]),
                         t2)
        self.assertEqual(x.token_matching(1, [lambda t: t.ttype is Keyword]),
                         None) 
開發者ID:sriniiyer,項目名稱:codenn,代碼行數:13,代碼來源:test_tokenize.py

示例9: get_roots

# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import TokenList [as 別名]
def get_roots(parent, token):
    """
        ルートTokenリスト
    """
    for tkn in parent.tokens:
        if tkn == token:
            return [token, parent]
        if isinstance(tkn, sql.TokenList):
            ret = get_roots(tkn, token)
            if ret:
                ret.append(parent)
                return ret
    return [] 
開發者ID:future-architect,項目名稱:uroboroSQL-formatter,代碼行數:15,代碼來源:tokenutils.py

示例10: get_parent

# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import TokenList [as 別名]
def get_parent(top_parent, token):
    """
        ルートを指定した親Token取得
    """
    for tkn in top_parent.tokens:
        tkn.parent = top_parent
        if tkn == token:
            return top_parent
        if isinstance(tkn, sql.TokenList):
            ret = get_parent(tkn, token)
            if ret:
                return ret
    return None 
開發者ID:future-architect,項目名稱:uroboroSQL-formatter,代碼行數:15,代碼來源:tokenutils.py

示例11: flatten

# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import TokenList [as 別名]
def flatten(token):
    """
        フラット化したgenerator
        ※処理中にparentを再設定する。sql.TokenList#flattenとはここが違う
    """
    if isinstance(token, sql.TokenList):
        for tkn in token.tokens:
            tkn.parent = token
            if isinstance(tkn, sql.TokenList):
                for item in flatten(tkn):
                    yield item
            else:
                yield tkn
    else:
        yield token 
開發者ID:future-architect,項目名稱:uroboroSQL-formatter,代碼行數:17,代碼來源:tokenutils.py

示例12: process

# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import TokenList [as 別名]
def process(self, _, stmt):
        def custom_flaten(token):
            """
                コメントはflatenしないflaten
            """
            if isinstance(token, sql.TokenList) and not tu.is_comment(token):
                for tkn in token.tokens:
                    for item in custom_flaten(tkn):
                        yield item
            else:
                yield token
        is_prev_cr = True
        for token in custom_flaten(stmt):
            if tu.is_plain_line_comment(token, self.local_config.comment_syntax):
                # コメントクラス置き換え
                parent = token.parent
                index = parent.tokens.index(token)
                comment = LineDescriptionLineCommentFilter.Comment(token.tokens)
                for tkn in token.tokens:
                    tkn.parent = comment
                comment.parent = parent
                parent.tokens[index] = comment
                # フラグセット
                comment.is_line_description = not is_prev_cr # pylint: disable=attribute-defined-outside-init
            elif token.is_whitespace():
                if is_inc_cr(token):
                    is_prev_cr = True
            else:
                is_prev_cr = False 
開發者ID:future-architect,項目名稱:uroboroSQL-formatter,代碼行數:31,代碼來源:filters.py

示例13: _get_table

# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import TokenList [as 別名]
def _get_table(tlist: TokenList) -> Optional[Table]:
        """
        Return the table if valid, i.e., conforms to the [[catalog.]schema.]table
        construct.

        :param tlist: The SQL tokens
        :returns: The table if the name conforms
        """

        # Strip the alias if present.
        idx = len(tlist.tokens)

        if tlist.has_alias():
            ws_idx, _ = tlist.token_next_by(t=Whitespace)

            if ws_idx != -1:
                idx = ws_idx

        tokens = tlist.tokens[:idx]

        if (
            len(tokens) in (1, 3, 5)
            and all(imt(token, t=[Name, String]) for token in tokens[::2])
            and all(imt(token, m=(Punctuation, ".")) for token in tokens[1::2])
        ):
            return Table(*[remove_quotes(token.value) for token in tokens[::-2]])

        return None 
開發者ID:apache,項目名稱:incubator-superset,代碼行數:30,代碼來源:sql_parse.py

示例14: _extract_from_token

# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import TokenList [as 別名]
def _extract_from_token(  # pylint: disable=too-many-branches
        self, token: Token
    ) -> None:
        """
        <Identifier> store a list of subtokens and <IdentifierList> store lists of
        subtoken list.

        It extracts <IdentifierList> and <Identifier> from :param token: and loops
        through all subtokens recursively. It finds table_name_preceding_token and
        passes <IdentifierList> and <Identifier> to self._process_tokenlist to populate
        self._tables.

        :param token: instance of Token or child class, e.g. TokenList, to be processed
        """
        if not hasattr(token, "tokens"):
            return

        table_name_preceding_token = False

        for item in token.tokens:
            if item.is_group and not self._is_identifier(item):
                self._extract_from_token(item)

            if item.ttype in Keyword and (
                item.normalized in PRECEDES_TABLE_NAME
                or item.normalized.endswith(" JOIN")
            ):
                table_name_preceding_token = True
                continue

            if item.ttype in Keyword:
                table_name_preceding_token = False
                continue

            if table_name_preceding_token:
                if isinstance(item, Identifier):
                    self._process_tokenlist(item)
                elif isinstance(item, IdentifierList):
                    for token2 in item.get_identifiers():
                        if isinstance(token2, TokenList):
                            self._process_tokenlist(token2)
            elif isinstance(item, IdentifierList):
                if any(not self._is_identifier(token2) for token2 in item.tokens):
                    self._extract_from_token(item) 
開發者ID:apache,項目名稱:incubator-superset,代碼行數:46,代碼來源:sql_parse.py


注:本文中的sqlparse.sql.TokenList方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。