当前位置: 首页>>代码示例>>Python>>正文


Python tokens.Whitespace方法代码示例

本文整理汇总了Python中sqlparse.tokens.Whitespace方法的典型用法代码示例。如果您正苦于以下问题:Python tokens.Whitespace方法的具体用法?Python tokens.Whitespace怎么用?Python tokens.Whitespace使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sqlparse.tokens的用法示例。


在下文中一共展示了tokens.Whitespace方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _process

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Whitespace [as 别名]
def _process(tlist):
        def get_next_comment():
            # TODO(andi) Comment types should be unified, see related issue38
            return tlist.token_next_by(i=sql.Comment, t=T.Comment)

        tidx, token = get_next_comment()
        while token:
            pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
            nidx, next_ = tlist.token_next(tidx, skip_ws=False)
            # Replace by whitespace if prev and next exist and if they're not
            # whitespaces. This doesn't apply if prev or next is a paranthesis.
            if (prev_ is None or next_ is None or
                    prev_.is_whitespace or prev_.match(T.Punctuation, '(') or
                    next_.is_whitespace or next_.match(T.Punctuation, ')')):
                tlist.tokens.remove(token)
            else:
                tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')

            tidx, token = get_next_comment() 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:21,代码来源:others.py

示例2: _process

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Whitespace [as 别名]
def _process(self, tlist):
        token = self._get_next_comment(tlist)
        while token:
            tidx = tlist.token_index(token)
            prev = tlist.token_prev(tidx, False)
            next_ = tlist.token_next(tidx, False)
            # Replace by whitespace if prev and next exist and if they're not
            # whitespaces. This doesn't apply if prev or next is a paranthesis.
            if (prev is not None and next_ is not None
                and not prev.is_whitespace() and not next_.is_whitespace()
                and not (prev.match(T.Punctuation, '(')
                         or next_.match(T.Punctuation, ')'))):
                tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')
            else:
                tlist.tokens.pop(tidx)
            token = self._get_next_comment(tlist) 
开发者ID:sriniiyer,项目名称:codenn,代码行数:18,代码来源:filters.py

示例3: process

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Whitespace [as 别名]
def process(self, stack, stmt):
        if isinstance(stmt, sql.Statement):
            self._curr_stmt = stmt
        self._process(stmt)
        if isinstance(stmt, sql.Statement):
            if self._last_stmt is not None:
                if unicode(self._last_stmt).endswith('\n'):
                    nl = '\n'
                else:
                    nl = '\n\n'
                stmt.tokens.insert(
                    0, sql.Token(T.Whitespace, nl))
            if self._last_stmt != stmt:
                self._last_stmt = stmt


# FIXME: Doesn't work ;) 
开发者ID:sriniiyer,项目名称:codenn,代码行数:19,代码来源:filters.py

示例4: process

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Whitespace [as 别名]
def process(self, stack, stmt):
        if isinstance(stmt, sql.Statement):
            self._curr_stmt = stmt
        self._process(stmt)
        if isinstance(stmt, sql.Statement):
            if self._last_stmt is not None:
                if str(self._last_stmt).endswith('\n'):
                    nl = '\n'
                else:
                    nl = '\n\n'
                stmt.tokens.insert(
                    0, sql.Token(T.Whitespace, nl))
            if self._last_stmt != stmt:
                self._last_stmt = stmt


# FIXME: Doesn't work ;) 
开发者ID:future-architect,项目名称:uroboroSQL-formatter,代码行数:19,代码来源:filters.py

示例5: __custom_process_insert_values_lr

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Whitespace [as 别名]
def __custom_process_insert_values_lr(self, tlist):
        #INSERT の場合VALUES前後に空白1つをセット
        values_token = tlist.token_next_match(0, T.Keyword, "VALUES")
        if values_token:
            prv = tlist.token_prev(values_token, skip_ws=False)
            if prv and prv.is_whitespace():
                prv.value = " "
                prv = tlist.token_prev(prv, skip_ws=False)
                while prv and prv.is_whitespace():
                    prv.value = ""
                    prv = tlist.token_prev(prv, skip_ws=False)
            else:
                tlist.insert_before(values_token, sql.Token(T.Whitespace, " "))

            nxt = tlist.token_next(values_token, skip_ws=False)
            if nxt and nxt.is_whitespace():
                nxt.value = " "
                nxt = tlist.token_next(nxt, skip_ws=False)
                while nxt and nxt.is_whitespace():
                    nxt.value = ""
                    nxt = tlist.token_next(nxt, skip_ws=False)
            else:
                tlist.insert_after(values_token, sql.Token(T.Whitespace, " ")) 
开发者ID:future-architect,项目名称:uroboroSQL-formatter,代码行数:25,代码来源:filters.py

示例6: get_query_tokens

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Whitespace [as 别名]
def get_query_tokens(query: str) -> List[sqlparse.sql.Token]:
    """
    :type query str
    :rtype: list[sqlparse.sql.Token]
    """
    query = preprocess_query(query)
    parsed = sqlparse.parse(query)

    # handle empty queries (#12)
    if not parsed:
        return []

    tokens = TokenList(parsed[0].tokens).flatten()
    # print([(token.value, token.ttype) for token in tokens])

    return [token for token in tokens if token.ttype is not Whitespace] 
开发者ID:macbre,项目名称:sql-metadata,代码行数:18,代码来源:sql_metadata.py

示例7: process

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Whitespace [as 别名]
def process(self, stream):
        """Process the stream"""
        EOS_TTYPE = T.Whitespace, T.Comment.Single

        # Run over all stream tokens
        for ttype, value in stream:
            # Yield token if we finished a statement and there's no whitespaces
            # It will count newline token as a non whitespace. In this context
            # whitespace ignores newlines.
            # why don't multi line comments also count?
            if self.consume_ws and ttype not in EOS_TTYPE:
                yield sql.Statement(self.tokens)

                # Reset filter and prepare to process next statement
                self._reset()

            # Change current split level (increase, decrease or remain equal)
            self.level += self._change_splitlevel(ttype, value)

            # Append the token to the current statement
            self.tokens.append(sql.Token(ttype, value))

            # Check if we get the end of a statement
            if self.level <= 0 and ttype is T.Punctuation and value == ';':
                self.consume_ws = True

        # Yield pending statement (if any)
        if self.tokens:
            yield sql.Statement(self.tokens) 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:31,代码来源:statement_splitter.py

示例8: __init__

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Whitespace [as 别名]
def __init__(self, ttype, value):
        value = text_type(value)
        self.value = value
        self.ttype = ttype
        self.parent = None
        self.is_group = False
        self.is_keyword = ttype in T.Keyword
        self.is_whitespace = self.ttype in T.Whitespace
        self.normalized = value.upper() if self.is_keyword else value 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:11,代码来源:sql.py

示例9: get_alias

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Whitespace [as 别名]
def get_alias(self):
        """Returns the alias for this identifier or ``None``."""

        # "name AS alias"
        kw_idx, kw = self.token_next_by(m=(T.Keyword, 'AS'))
        if kw is not None:
            return self._get_first_name(kw_idx + 1, keywords=True)

        # "name alias" or "complicated column expression alias"
        _, ws = self.token_next_by(t=T.Whitespace)
        if len(self.tokens) > 2 and ws is not None:
            return self._get_first_name(reverse=True) 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:14,代码来源:sql.py

示例10: nl

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Whitespace [as 别名]
def nl(self, offset=1):
        # offset = 1 represent a single space after SELECT
        offset = -len(offset) if not isinstance(offset, int) else offset
        # add two for the space and parens
        indent = self.indent * (2 + self._max_kwd_len)

        return sql.Token(T.Whitespace, self.n + self.char * (
            self._max_kwd_len + offset + indent + self.offset)) 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:10,代码来源:aligned_indent.py

示例11: _process

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Whitespace [as 别名]
def _process(self, stream, varname, has_nl):
        # SQL query asignation to varname
        if self.count > 1:
            yield sql.Token(T.Whitespace, '\n')
        yield sql.Token(T.Name, varname)
        yield sql.Token(T.Whitespace, ' ')
        yield sql.Token(T.Operator, '=')
        yield sql.Token(T.Whitespace, ' ')
        if has_nl:
            yield sql.Token(T.Operator, '(')
        yield sql.Token(T.Text, "'")

        # Print the tokens on the quote
        for token in stream:
            # Token is a new line separator
            if token.is_whitespace and '\n' in token.value:
                # Close quote and add a new line
                yield sql.Token(T.Text, " '")
                yield sql.Token(T.Whitespace, '\n')

                # Quote header on secondary lines
                yield sql.Token(T.Whitespace, ' ' * (len(varname) + 4))
                yield sql.Token(T.Text, "'")

                # Indentation
                after_lb = token.value.split('\n', 1)[1]
                if after_lb:
                    yield sql.Token(T.Whitespace, after_lb)
                continue

            # Token has escape chars
            elif "'" in token.value:
                token.value = token.value.replace("'", "\\'")

            # Put the token
            yield sql.Token(T.Text, token.value)

        # Close quote
        yield sql.Token(T.Text, "'")
        if has_nl:
            yield sql.Token(T.Operator, ')') 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:43,代码来源:output.py

示例12: nl

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Whitespace [as 别名]
def nl(self, offset=0):
        return sql.Token(
            T.Whitespace,
            self.n + self.char * max(0, self.leading_ws + offset)) 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:6,代码来源:reindent.py

示例13: _process_identifierlist

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Whitespace [as 别名]
def _process_identifierlist(self, tlist):
        identifiers = list(tlist.get_identifiers())
        first = next(identifiers.pop(0).flatten())
        num_offset = 1 if self.char == '\t' else self._get_offset(first)
        if not tlist.within(sql.Function):
            with offset(self, num_offset):
                position = 0
                for token in identifiers:
                    # Add 1 for the "," separator
                    position += len(token.value) + 1
                    if position > (self.wrap_after - self.offset):
                        adjust = 0
                        if self.comma_first:
                            adjust = -2
                            _, comma = tlist.token_prev(
                                tlist.token_index(token))
                            if comma is None:
                                continue
                            token = comma
                        tlist.insert_before(token, self.nl(offset=adjust))
                        if self.comma_first:
                            _, ws = tlist.token_next(
                                tlist.token_index(token), skip_ws=False)
                            if (ws is not None
                                    and ws.ttype is not T.Text.Whitespace):
                                tlist.insert_after(
                                    token, sql.Token(T.Whitespace, ' '))
                        position = 0
        self._process_default(tlist) 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:31,代码来源:reindent.py

示例14: process

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Whitespace [as 别名]
def process(self, stmt):
        self._curr_stmt = stmt
        self._process(stmt)

        if self._last_stmt is not None:
            nl = '\n' if text_type(self._last_stmt).endswith('\n') else '\n\n'
            stmt.tokens.insert(0, sql.Token(T.Whitespace, nl))

        self._last_stmt = stmt
        return stmt 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:12,代码来源:reindent.py

示例15: _stripws_identifierlist

# 需要导入模块: from sqlparse import tokens [as 别名]
# 或者: from sqlparse.tokens import Whitespace [as 别名]
def _stripws_identifierlist(self, tlist):
        # Removes newlines before commas, see issue140
        last_nl = None
        for token in list(tlist.tokens):
            if last_nl and token.ttype is T.Punctuation and token.value == ',':
                tlist.tokens.remove(last_nl)
            last_nl = token if token.is_whitespace else None

            # next_ = tlist.token_next(token, skip_ws=False)
            # if (next_ and not next_.is_whitespace and
            #             token.ttype is T.Punctuation and token.value == ','):
            #     tlist.insert_after(token, sql.Token(T.Whitespace, ' '))
        return self._stripws_default(tlist) 
开发者ID:mtxr,项目名称:SublimeText-SQLTools,代码行数:15,代码来源:others.py


注:本文中的sqlparse.tokens.Whitespace方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。