當前位置: 首頁>>代碼示例>>Python>>正文


Python tokens.Wildcard方法代碼示例

本文整理匯總了Python中sqlparse.tokens.Wildcard方法的典型用法代碼示例。如果您正苦於以下問題:Python tokens.Wildcard方法的具體用法?Python tokens.Wildcard怎麽用?Python tokens.Wildcard使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sqlparse.tokens的用法示例。


在下文中一共展示了tokens.Wildcard方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: group_period

# 需要導入模塊: from sqlparse import tokens [as 別名]
# 或者: from sqlparse.tokens import Wildcard [as 別名]
def group_period(tlist):
    def match(token):
        return token.match(T.Punctuation, '.')

    def valid_prev(token):
        sqlcls = sql.SquareBrackets, sql.Identifier
        ttypes = T.Name, T.String.Symbol
        return imt(token, i=sqlcls, t=ttypes)

    def valid_next(token):
        # issue261, allow invalid next token
        return True

    def post(tlist, pidx, tidx, nidx):
        # next_ validation is being performed here. issue261
        sqlcls = sql.SquareBrackets, sql.Function
        ttypes = T.Name, T.String.Symbol, T.Wildcard
        next_ = tlist[nidx] if nidx is not None else None
        valid_next = imt(next_, i=sqlcls, t=ttypes)

        return (pidx, nidx) if valid_next else (pidx, tidx)

    _group(tlist, sql.Identifier, match, valid_prev, valid_next, post) 
開發者ID:mtxr,項目名稱:SublimeText-SQLTools,代碼行數:25,代碼來源:grouping.py

示例2: group_operator

# 需要導入模塊: from sqlparse import tokens [as 別名]
# 或者: from sqlparse.tokens import Wildcard [as 別名]
def group_operator(tlist):
    ttypes = T_NUMERICAL + T_STRING + T_NAME
    sqlcls = (sql.SquareBrackets, sql.Parenthesis, sql.Function,
              sql.Identifier, sql.Operation)

    def match(token):
        return imt(token, t=(T.Operator, T.Wildcard))

    def valid(token):
        return imt(token, i=sqlcls, t=ttypes)

    def post(tlist, pidx, tidx, nidx):
        tlist[tidx].ttype = T.Operator
        return pidx, nidx

    valid_prev = valid_next = valid
    _group(tlist, sql.Operation, match,
           valid_prev, valid_next, post, extend=False) 
開發者ID:mtxr,項目名稱:SublimeText-SQLTools,代碼行數:20,代碼來源:grouping.py

示例3: group_identifier_list

# 需要導入模塊: from sqlparse import tokens [as 別名]
# 或者: from sqlparse.tokens import Wildcard [as 別名]
def group_identifier_list(tlist):
    m_role = T.Keyword, ('null', 'role')
    sqlcls = (sql.Function, sql.Case, sql.Identifier, sql.Comparison,
              sql.IdentifierList, sql.Operation)
    ttypes = (T_NUMERICAL + T_STRING + T_NAME +
              (T.Keyword, T.Comment, T.Wildcard))

    def match(token):
        return token.match(T.Punctuation, ',')

    def valid(token):
        return imt(token, i=sqlcls, m=m_role, t=ttypes)

    def post(tlist, pidx, tidx, nidx):
        return pidx, nidx

    valid_prev = valid_next = valid
    _group(tlist, sql.IdentifierList, match,
           valid_prev, valid_next, post, extend=True) 
開發者ID:mtxr,項目名稱:SublimeText-SQLTools,代碼行數:21,代碼來源:grouping.py

示例4: _get_first_name

# 需要導入模塊: from sqlparse import tokens [as 別名]
# 或者: from sqlparse.tokens import Wildcard [as 別名]
def _get_first_name(self, idx=None, reverse=False, keywords=False):
        """Returns the name of the first token with a name"""

        if idx and not isinstance(idx, int):
            idx = self.token_index(idx) + 1

        tokens = self.tokens[idx:] if idx else self.tokens
        tokens = reversed(tokens) if reverse else tokens
        types = [T.Name, T.Wildcard, T.String.Symbol]

        if keywords:
            types.append(T.Keyword)

        for tok in tokens:
            if tok.ttype in types:
                return self._remove_quotes(tok.value)
            elif isinstance(tok, Identifier) or isinstance(tok, Function):
                return tok.get_name()
        return None 
開發者ID:sriniiyer,項目名稱:codenn,代碼行數:21,代碼來源:sql.py

示例5: _get_first_name

# 需要導入模塊: from sqlparse import tokens [as 別名]
# 或者: from sqlparse.tokens import Wildcard [as 別名]
def _get_first_name(self, idx=None, reverse=False, keywords=False):
        """Returns the name of the first token with a name"""

        tokens = self.tokens[idx:] if idx else self.tokens
        tokens = reversed(tokens) if reverse else tokens
        types = [T.Name, T.Wildcard, T.String.Symbol]

        if keywords:
            types.append(T.Keyword)

        for token in tokens:
            if token.ttype in types:
                return remove_quotes(token.value)
            elif isinstance(token, (Identifier, Function)):
                return token.get_name() 
開發者ID:mtxr,項目名稱:SublimeText-SQLTools,代碼行數:17,代碼來源:sql.py

示例6: is_wildcard

# 需要導入模塊: from sqlparse import tokens [as 別名]
# 或者: from sqlparse.tokens import Wildcard [as 別名]
def is_wildcard(self):
        """Return ``True`` if this identifier contains a wildcard."""
        _, token = self.token_next_by(t=T.Wildcard)
        return token is not None 
開發者ID:mtxr,項目名稱:SublimeText-SQLTools,代碼行數:6,代碼來源:sql.py

示例7: is_wildcard

# 需要導入模塊: from sqlparse import tokens [as 別名]
# 或者: from sqlparse.tokens import Wildcard [as 別名]
def is_wildcard(self):
        """Return ``True`` if this identifier contains a wildcard."""
        token = self.token_next_by_type(0, T.Wildcard)
        return token is not None 
開發者ID:sriniiyer,項目名稱:codenn,代碼行數:6,代碼來源:sql.py

示例8: group_identifier_list

# 需要導入模塊: from sqlparse import tokens [as 別名]
# 或者: from sqlparse.tokens import Wildcard [as 別名]
def group_identifier_list(tlist):
    [group_identifier_list(sgroup) for sgroup in tlist.get_sublists()
     if not isinstance(sgroup, sql.IdentifierList)]
    idx = 0
    # Allowed list items
    fend1_funcs = [lambda t: isinstance(t, (sql.Identifier, sql.Function,
                                            sql.Case)),
                   lambda t: t.is_whitespace(),
                   lambda t: t.ttype == T.Name,
                   lambda t: t.ttype == T.Wildcard,
                   lambda t: t.match(T.Keyword, 'null'),
                   lambda t: t.match(T.Keyword, 'role'),
                   lambda t: t.ttype == T.Number.Integer,
                   lambda t: t.ttype == T.String.Single,
                   lambda t: t.ttype == T.Name.Placeholder,
                   lambda t: t.ttype == T.Keyword,
                   lambda t: isinstance(t, sql.Comparison),
                   lambda t: isinstance(t, sql.Comment),
                   lambda t: t.ttype == T.Comment.Multiline,
                   ]
    tcomma = tlist.token_next_match(idx, T.Punctuation, ',')
    start = None
    while tcomma is not None:
        before = tlist.token_prev(tcomma)
        after = tlist.token_next(tcomma)
        # Check if the tokens around tcomma belong to a list
        bpassed = apassed = False
        for func in fend1_funcs:
            if before is not None and func(before):
                bpassed = True
            if after is not None and func(after):
                apassed = True
        if not bpassed or not apassed:
            # Something's wrong here, skip ahead to next ","
            start = None
            tcomma = tlist.token_next_match(tlist.token_index(tcomma) + 1,
                                            T.Punctuation, ',')
        else:
            if start is None:
                start = before
            next_ = tlist.token_next(after)
            if next_ is None or not next_.match(T.Punctuation, ','):
                # Reached the end of the list
                tokens = tlist.tokens_between(start, after)
                group = tlist.group_tokens(sql.IdentifierList, tokens)
                start = None
                tcomma = tlist.token_next_match(tlist.token_index(group) + 1,
                                                T.Punctuation, ',')
            else:
                tcomma = next_ 
開發者ID:sriniiyer,項目名稱:codenn,代碼行數:52,代碼來源:grouping.py

示例9: group_identifier_list

# 需要導入模塊: from sqlparse import tokens [as 別名]
# 或者: from sqlparse.tokens import Wildcard [as 別名]
def group_identifier_list(tlist):
    [group_identifier_list(sgroup) for sgroup in tlist.get_sublists()
     if not isinstance(sgroup, sql.IdentifierList)]
    # Allowed list items
    fend1_funcs = [lambda t: isinstance(t, (sql.Identifier, sql.Function,
                                            sql.Case)),
                   lambda t: t.is_whitespace(),
                   lambda t: t.ttype == T.Name,
                   lambda t: t.ttype == T.Wildcard,
                   lambda t: t.match(T.Keyword, 'null'),
                   lambda t: t.match(T.Keyword, 'role'),
                   lambda t: t.ttype == T.Number.Integer,
                   lambda t: t.ttype == T.String.Single,
                   lambda t: t.ttype == T.Name.Placeholder,
                   lambda t: t.ttype == T.Keyword,
                   lambda t: isinstance(t, sql.Comparison),
                   lambda t: isinstance(t, sql.Comment),
                   lambda t: t.ttype == T.Comment.Multiline,
                   ]
    tcomma = tlist.token_next_match(0, T.Punctuation, ',')
    start = None
    while tcomma is not None:
        # Go back one idx to make sure to find the correct tcomma
        idx = tlist.token_index(tcomma)
        before = tlist.token_prev(idx)
        after = tlist.token_next(idx)
        # Check if the tokens around tcomma belong to a list
        bpassed = apassed = False
        for func in fend1_funcs:
            if before is not None and func(before):
                bpassed = True
            if after is not None and func(after):
                apassed = True
        if not bpassed or not apassed:
            # Something's wrong here, skip ahead to next ","
            start = None
            tcomma = tlist.token_next_match(idx + 1,
                                            T.Punctuation, ',')
        else:
            if start is None:
                start = before
            after_idx = tlist.token_index(after, start=idx)
            next_ = tlist.token_next(after_idx)
            if next_ is None or not next_.match(T.Punctuation, ','):
                # Reached the end of the list
                tokens = tlist.tokens_between(start, after)
                group = tlist.group_tokens(sql.IdentifierList, tokens)
                start = None
                tcomma = tlist.token_next_match(tlist.token_index(group) + 1,
                                                T.Punctuation, ',')
            else:
                tcomma = next_ 
開發者ID:future-architect,項目名稱:uroboroSQL-formatter,代碼行數:54,代碼來源:grouping.py

示例10: get_query_columns

# 需要導入模塊: from sqlparse import tokens [as 別名]
# 或者: from sqlparse.tokens import Wildcard [as 別名]
def get_query_columns(query: str) -> List[str]:
    """
    :type query str
    :rtype: list[str]
    """
    columns = []
    last_keyword = None
    last_token = None

    # print(preprocess_query(query))

    # these keywords should not change the state of a parser
    # and not "reset" previously found SELECT keyword
    keywords_ignored = ['AS', 'AND', 'OR', 'IN', 'IS', 'NOT', 'NOT NULL', 'LIKE', 'CASE', 'WHEN']

    # these function should be ignored
    # and not "reset" previously found SELECT keyword
    functions_ignored = ['COUNT', 'MIN', 'MAX', 'FROM_UNIXTIME', 'DATE_FORMAT', 'CAST', 'CONVERT']

    for token in get_query_tokens(query):
        if token.is_keyword and token.value.upper() not in keywords_ignored:
            # keep the name of the last keyword, e.g. SELECT, FROM, WHERE, (ORDER) BY
            last_keyword = token.value.upper()
            # print('keyword', last_keyword)
        elif token.ttype is Name:
            # analyze the name tokens, column names and where condition values
            if last_keyword in ['SELECT', 'WHERE', 'ORDER BY', 'ON'] \
                    and last_token.value.upper() not in ['AS']:
                # print(last_keyword, last_token, token.value)

                if token.value.upper() not in functions_ignored:
                    if str(last_token) == '.':
                        # print('DOT', last_token, columns[-1])

                        # we have table.column notation example
                        # append column name to the last entry of columns
                        # as it is a table name in fact
                        table_name = columns[-1]
                        columns[-1] = '{}.{}'.format(table_name, token)
                    else:
                        columns.append(str(token.value))
            elif last_keyword in ['INTO'] and last_token.ttype is Punctuation:
                # INSERT INTO `foo` (col1, `col2`) VALUES (..)
                #  print(last_keyword, token, last_token)
                columns.append(str(token.value).strip('`'))
        elif token.ttype is Wildcard:
            # handle * wildcard in SELECT part, but ignore count(*)
            # print(last_keyword, last_token, token.value)
            if last_keyword == 'SELECT' and last_token.value != '(':

                if str(last_token) == '.':
                    # handle SELECT foo.*
                    table_name = columns[-1]
                    columns[-1] = '{}.{}'.format(table_name, str(token))
                else:
                    columns.append(str(token.value))

        last_token = token

    return unique(columns) 
開發者ID:macbre,項目名稱:sql-metadata,代碼行數:62,代碼來源:sql_metadata.py


注:本文中的sqlparse.tokens.Wildcard方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。