当前位置: 首页>>代码示例>>Python>>正文


Python tokens.Punctuation方法代码示例

本文整理汇总了Python中sqlparse.tokens.Punctuation方法的典型用法代码示例。如果您正苦于以下问题:Python tokens.Punctuation方法的具体用法?Python tokens.Punctuation怎么用?Python tokens.Punctuation使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sqlparse.tokens的用法示例。


在下文中一共展示了tokens.Punctuation方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: extract_tables

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Punctuation [as 别名]
def extract_tables(sql):
    """Extract the table names from an SQL statment.

    Returns a list of TableReference namedtuples

    """
    parsed = sqlparse.parse(sql)
    if not parsed:
        return ()

    # INSERT statements must stop looking for tables at the sign of first
    # Punctuation. eg: INSERT INTO abc (col1, col2) VALUES (1, 2)
    # abc is the table name, but if we don't stop at the first lparen, then
    # we'll identify abc, col1 and col2 as table names.
    insert_stmt = parsed[0].token_first().value.lower() == "insert"
    stream = extract_from_part(parsed[0], stop_at_punctuation=insert_stmt)

    # Kludge: sqlparse mistakenly identifies insert statements as
    # function calls due to the parenthesized column list, e.g. interprets
    # "insert into foo (bar, baz)" as a function call to foo with arguments
    # (bar, baz). So don't allow any identifiers in insert statements
    # to have is_function=True
    identifiers = extract_table_identifiers(stream, allow_functions=not insert_stmt)
    # In the case 'sche.<cursor>', we get an empty TableReference; remove that
    return tuple(i for i in identifiers if i.name) 
开发者ID:dbcli,项目名称:pgcli,代码行数:27,代码来源:tables.py

示例2: strip_whitespace_front

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Punctuation [as 别名]
def strip_whitespace_front(token_list):
    new_token_list = []
    found_valid = False

    for token in token_list:
        if not (token.is_whitespace or token.ttype ==
                token_types.Punctuation) or found_valid:
            found_valid = True
            new_token_list.append(token)

    return new_token_list

# strip_whitespace
# Strips whitespace from a token list.
#
# Inputs:
#    token_list: the token list.
#
# Outputs:
#    new token list with no whitespace/punctuation surrounding. 
开发者ID:lil-lab,项目名称:atis,代码行数:22,代码来源:sql_util.py

示例3: extract_tables

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Punctuation [as 别名]
def extract_tables(sql):
    """Extract the table names from an SQL statment.

    Returns a list of (schema, table, alias) tuples

    """
    parsed = sqlparse.parse(sql)
    if not parsed:
        return []

    # INSERT statements must stop looking for tables at the sign of first
    # Punctuation. eg: INSERT INTO abc (col1, col2) VALUES (1, 2)
    # abc is the table name, but if we don't stop at the first lparen, then
    # we'll identify abc, col1 and col2 as table names.
    insert_stmt = parsed[0].token_first().value.lower() == "insert"
    stream = extract_from_part(parsed[0], stop_at_punctuation=insert_stmt)
    return list(extract_table_identifiers(stream)) 
开发者ID:dbcli,项目名称:litecli,代码行数:19,代码来源:parseutils.py

示例4: _get_create_definition_tokens

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Punctuation [as 别名]
def _get_create_definition_tokens(self, stmt):
        lparen_token = stmt.token_next_by_type(0, T.Punctuation)
        if not lparen_token or lparen_token.value != '(':
            yield

        index = stmt.token_index(lparen_token)
        def_tokens = []
        for token in stmt.tokens[index + 1:]:
            if token.value == ')':
                break

            if isinstance(token, sql.ColumnsDefinition):
                yield token
            elif token.match(T.Punctuation, ','):
                yield def_tokens
                def_tokens = []
            elif not token.is_whitespace():
                def_tokens.append(token)

        if def_tokens:
            yield def_tokens 
开发者ID:Yelp,项目名称:schematizer,代码行数:23,代码来源:mysql_handler.py

示例5: group_period

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Punctuation [as 别名]
def group_period(tlist):
    def match(token):
        return token.match(T.Punctuation, '.')

    def valid_prev(token):
        sqlcls = sql.SquareBrackets, sql.Identifier
        ttypes = T.Name, T.String.Symbol
        return imt(token, i=sqlcls, t=ttypes)

    def valid_next(token):
        # issue261, allow invalid next token
        return True

    def post(tlist, pidx, tidx, nidx):
        # next_ validation is being performed here. issue261
        sqlcls = sql.SquareBrackets, sql.Function
        ttypes = T.Name, T.String.Symbol, T.Wildcard
        next_ = tlist[nidx] if nidx is not None else None
        valid_next = imt(next_, i=sqlcls, t=ttypes)

        return (pidx, nidx) if valid_next else (pidx, tidx)

    _group(tlist, sql.Identifier, match, valid_prev, valid_next, post) 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:25,代码来源:grouping.py

示例6: group_identifier_list

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Punctuation [as 别名]
def group_identifier_list(tlist):
    m_role = T.Keyword, ('null', 'role')
    sqlcls = (sql.Function, sql.Case, sql.Identifier, sql.Comparison,
              sql.IdentifierList, sql.Operation)
    ttypes = (T_NUMERICAL + T_STRING + T_NAME +
              (T.Keyword, T.Comment, T.Wildcard))

    def match(token):
        return token.match(T.Punctuation, ',')

    def valid(token):
        return imt(token, i=sqlcls, m=m_role, t=ttypes)

    def post(tlist, pidx, tidx, nidx):
        return pidx, nidx

    valid_prev = valid_next = valid
    _group(tlist, sql.IdentifierList, match,
           valid_prev, valid_next, post, extend=True) 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:21,代码来源:grouping.py

示例7: _process

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Punctuation [as 别名]
def _process(tlist):
        def get_next_comment():
            # TODO(andi) Comment types should be unified, see related issue38
            return tlist.token_next_by(i=sql.Comment, t=T.Comment)

        tidx, token = get_next_comment()
        while token:
            pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
            nidx, next_ = tlist.token_next(tidx, skip_ws=False)
            # Replace by whitespace if prev and next exist and if they're not
            # whitespaces. This doesn't apply if prev or next is a paranthesis.
            if (prev_ is None or next_ is None or
                    prev_.is_whitespace or prev_.match(T.Punctuation, '(') or
                    next_.is_whitespace or next_.match(T.Punctuation, ')')):
                tlist.tokens.remove(token)
            else:
                tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')

            tidx, token = get_next_comment() 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:21,代码来源:others.py

示例8: _process

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Punctuation [as 别名]
def _process(self, tlist):
        token = self._get_next_comment(tlist)
        while token:
            tidx = tlist.token_index(token)
            prev = tlist.token_prev(tidx, False)
            next_ = tlist.token_next(tidx, False)
            # Replace by whitespace if prev and next exist and if they're not
            # whitespaces. This doesn't apply if prev or next is a paranthesis.
            if (prev is not None and next_ is not None
                and not prev.is_whitespace() and not next_.is_whitespace()
                and not (prev.match(T.Punctuation, '(')
                         or next_.match(T.Punctuation, ')'))):
                tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')
            else:
                tlist.tokens.pop(tidx)
            token = self._get_next_comment(tlist) 
开发者ID:sriniiyer,项目名称:codenn,代码行数:18,代码来源:filters.py

示例9: is_dmlddl_parenthesis

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Punctuation [as 别名]
def is_dmlddl_parenthesis(token):
    """
        DMLかDDLの括弧判定
    """
    if not is_parenthesis(token):
        return False


    open_punc = token.token_next_match(0, T.Punctuation, '(')
    first = token_next_enable(token, open_punc)
    if first and first.ttype in (T.Keyword.DML, T.Keyword.DDL):
        return True

    if is_with(first):
        return True

    if is_parenthesis(first):
        return is_dmlddl_parenthesis(first)

    return False 
开发者ID:future-architect,项目名称:uroboroSQL-formatter,代码行数:22,代码来源:tokenutils.py

示例10: _stripws_default

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Punctuation [as 别名]
def _stripws_default(self, tlist):
        last_was_ws = False
        last_ws_token = None
        last_was_punctuation = False
        for token in tlist.tokens[:]:
            if token.is_whitespace():
                if last_was_ws or last_was_punctuation:  # 前tokenがwhitespaceまたはPunctuationの場合、空白を除去
                    tlist.tokens.remove(token)
                    continue
                else:
                    token.value = "\t"
            if tu.is_punctuation(token):
                if last_ws_token:
                    tlist.tokens.remove(last_ws_token) # Punctuation前のwhitespaceを除去
            last_was_ws = token.is_whitespace()
            last_ws_token = token if last_was_ws else None
            last_was_punctuation = tu.is_punctuation(token)

        self.__custom_stripws_tokenlist(tlist) 
开发者ID:future-architect,项目名称:uroboroSQL-formatter,代码行数:21,代码来源:filters.py

示例11: __custom_process_parenthesis_order

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Punctuation [as 别名]
def __custom_process_parenthesis_order(self, parenthesis):
        open_punc = parenthesis.token_next_match(0, T.Punctuation, '(')
        close_punc = parenthesis.token_next_match(open_punc, T.Punctuation, ')')

        self.indent += 2
        parenthesis.insert_after(open_punc, self.nl())

        for token in parenthesis.tokens_between(open_punc, close_punc)[1:-1]:
            if isinstance(token, Phrase):
                parenthesis.insert_before(token, self.nl())
                self._process_phrase(token, kwds=False)
                parenthesis.insert_after(token, self.nl_with_indent(1))
            elif isinstance(token, sql.Identifier) and len(token.tokens)==1 and isinstance(token.tokens[0], Phrase):
                # 中がPhraseのIdentifier
                child_token = token.tokens[0]
                parenthesis.insert_before(token, self.nl())
                self._process_phrase(child_token, kwds=False)
                parenthesis.insert_after(token, self.nl_with_indent(1))
            elif token.is_group():
                self._process(token)

        self.indent -= 1
        parenthesis.insert_before(close_punc, self.nl())
        self.indent -= 1 
开发者ID:future-architect,项目名称:uroboroSQL-formatter,代码行数:26,代码来源:filters.py

示例12: _extract_limit_from_query

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Punctuation [as 别名]
def _extract_limit_from_query(statement: TokenList) -> Optional[int]:
    """
    Extract limit clause from SQL statement.

    :param statement: SQL statement
    :return: Limit extracted from query, None if no limit present in statement
    """
    idx, _ = statement.token_next_by(m=(Keyword, "LIMIT"))
    if idx is not None:
        _, token = statement.token_next(idx=idx)
        if token:
            if isinstance(token, IdentifierList):
                # In case of "LIMIT <offset>, <limit>", find comma and extract
                # first succeeding non-whitespace token
                idx, _ = token.token_next_by(m=(sqlparse.tokens.Punctuation, ","))
                _, token = token.token_next(idx=idx)
            if token and token.ttype == sqlparse.tokens.Literal.Number.Integer:
                return int(token.value)
    return None 
开发者ID:apache,项目名称:incubator-superset,代码行数:21,代码来源:sql_parse.py

示例13: extract_tables

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Punctuation [as 别名]
def extract_tables(sql):
    """Extract the table names from an SQL statment.
    Returns a list of TableReference namedtuples
    """
    parsed = sqlparse.parse(sql)
    if not parsed:
        return ()

    # INSERT statements must stop looking for tables at the sign of first
    # Punctuation. eg: INSERT INTO abc (col1, col2) VALUES (1, 2)
    # abc is the table name, but if we don't stop at the first lparen, then
    # we'll identify abc, col1 and col2 as table names.
    insert_stmt = parsed[0].token_first().value.lower() == "insert"
    stream = extract_from_part(parsed[0], stop_at_punctuation=insert_stmt)

    # Kludge: sqlparse mistakenly identifies insert statements as
    # function calls due to the parenthesized column list, e.g. interprets
    # "insert into foo (bar, baz)" as a function call to foo with arguments
    # (bar, baz). So don't allow any identifiers in insert statements
    # to have is_function=True
    identifiers = extract_table_identifiers(stream, allow_functions=not insert_stmt)
    # In the case 'sche.<cursor>', we get an empty TableReference; remove that
    return tuple(i for i in identifiers if i.name) 
开发者ID:hhyo,项目名称:Archery,代码行数:25,代码来源:extract_tables.py

示例14: extract_from_part

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Punctuation [as 别名]
def extract_from_part(parsed, stop_at_punctuation=True):
    tbl_prefix_seen = False
    for item in parsed.tokens:
        if tbl_prefix_seen:
            if is_subselect(item):
                for x in extract_from_part(item, stop_at_punctuation):
                    yield x
            elif stop_at_punctuation and item.ttype is Punctuation:
                return
            # An incomplete nested select won't be recognized correctly as a
            # sub-select. eg: 'SELECT * FROM (SELECT id FROM user'. This causes
            # the second FROM to trigger this elif condition resulting in a
            # `return`. So we need to ignore the keyword if the keyword
            # FROM.
            # Also 'SELECT * FROM abc JOIN def' will trigger this elif
            # condition. So we need to ignore the keyword JOIN and its variants
            # INNER JOIN, FULL OUTER JOIN, etc.
            elif (
                item.ttype is Keyword
                and (not item.value.upper() == "FROM")
                and (not item.value.upper().endswith("JOIN"))
            ):
                return
            else:
                yield item
        elif (
            item.ttype is Keyword or item.ttype is Keyword.DML
        ) and item.value.upper() in ("COPY", "FROM", "INTO", "UPDATE", "TABLE", "JOIN"):
            tbl_prefix_seen = True
        # 'SELECT a, FROM abc' will detect FROM as part of the column list.
        # So this check here is necessary.
        elif isinstance(item, IdentifierList):
            for identifier in item.get_identifiers():
                if identifier.ttype is Keyword and identifier.value.upper() == "FROM":
                    tbl_prefix_seen = True
                    break 
开发者ID:dbcli,项目名称:litecli,代码行数:38,代码来源:parseutils.py

示例15: process

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Punctuation [as 别名]
def process(self, stream):
        """Process the stream"""
        EOS_TTYPE = T.Whitespace, T.Comment.Single

        # Run over all stream tokens
        for ttype, value in stream:
            # Yield token if we finished a statement and there's no whitespaces
            # It will count newline token as a non whitespace. In this context
            # whitespace ignores newlines.
            # why don't multi line comments also count?
            if self.consume_ws and ttype not in EOS_TTYPE:
                yield sql.Statement(self.tokens)

                # Reset filter and prepare to process next statement
                self._reset()

            # Change current split level (increase, decrease or remain equal)
            self.level += self._change_splitlevel(ttype, value)

            # Append the token to the current statement
            self.tokens.append(sql.Token(ttype, value))

            # Check if we get the end of a statement
            if self.level <= 0 and ttype is T.Punctuation and value == ';':
                self.consume_ws = True

        # Yield pending statement (if any)
        if self.tokens:
            yield sql.Statement(self.tokens) 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:31,代码来源:statement_splitter.py


注:本文中的sqlparse.tokens.Punctuation方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。