本文整理匯總了Python中sqlparse.sql.Token方法的典型用法代碼示例。如果您正苦於以下問題:Python sql.Token方法的具體用法?Python sql.Token怎麽用?Python sql.Token使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sqlparse.sql
的用法示例。
在下文中一共展示了sql.Token方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _process_case
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Token [as 別名]
def _process_case(self, tlist):
offset_ = len('case ') + len('when ')
cases = tlist.get_cases(skip_ws=True)
# align the end as well
end_token = tlist.token_next_by(m=(T.Keyword, 'END'))[1]
cases.append((None, [end_token]))
condition_width = [len(' '.join(map(text_type, cond))) if cond else 0
for cond, _ in cases]
max_cond_width = max(condition_width)
for i, (cond, value) in enumerate(cases):
# cond is None when 'else or end'
stmt = cond[0] if cond else value[0]
if i > 0:
tlist.insert_before(stmt, self.nl(
offset_ - len(text_type(stmt))))
if cond:
ws = sql.Token(T.Whitespace, self.char * (
max_cond_width - condition_width[i]))
tlist.insert_after(cond[-1], ws)
示例2: _process
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Token [as 別名]
def _process(self, group, stream):
for token in stream:
if token.is_whitespace and '\n' in token.value:
if token.value.endswith('\n'):
self.line = ''
else:
self.line = token.value.splitlines()[-1]
elif token.is_group and type(token) not in self.keep_together:
token.tokens = self._process(token, token.tokens)
else:
val = text_type(token)
if len(self.line) + len(val) > self.width:
match = re.search(r'^ +', self.line)
if match is not None:
indent = match.group()
else:
indent = ''
yield sql.Token(T.Whitespace, '\n{0}'.format(indent))
self.line = indent
self.line += val
yield token
示例3: _process
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Token [as 別名]
def _process(tlist):
def get_next_comment():
# TODO(andi) Comment types should be unified, see related issue38
return tlist.token_next_by(i=sql.Comment, t=T.Comment)
tidx, token = get_next_comment()
while token:
pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
nidx, next_ = tlist.token_next(tidx, skip_ws=False)
# Replace by whitespace if prev and next exist and if they're not
# whitespaces. This doesn't apply if prev or next is a paranthesis.
if (prev_ is None or next_ is None or
prev_.is_whitespace or prev_.match(T.Punctuation, '(') or
next_.is_whitespace or next_.match(T.Punctuation, ')')):
tlist.tokens.remove(token)
else:
tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')
tidx, token = get_next_comment()
示例4: _process
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Token [as 別名]
def _process(self, tlist):
token = self._get_next_comment(tlist)
while token:
tidx = tlist.token_index(token)
prev = tlist.token_prev(tidx, False)
next_ = tlist.token_next(tidx, False)
# Replace by whitespace if prev and next exist and if they're not
# whitespaces. This doesn't apply if prev or next is a paranthesis.
if (prev is not None and next_ is not None
and not prev.is_whitespace() and not next_.is_whitespace()
and not (prev.match(T.Punctuation, '(')
or next_.match(T.Punctuation, ')'))):
tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')
else:
tlist.tokens.pop(tidx)
token = self._get_next_comment(tlist)
示例5: process
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Token [as 別名]
def process(self, stack, stmt):
if isinstance(stmt, sql.Statement):
self._curr_stmt = stmt
self._process(stmt)
if isinstance(stmt, sql.Statement):
if self._last_stmt is not None:
if unicode(self._last_stmt).endswith('\n'):
nl = '\n'
else:
nl = '\n\n'
stmt.tokens.insert(
0, sql.Token(T.Whitespace, nl))
if self._last_stmt != stmt:
self._last_stmt = stmt
# FIXME: Doesn't work ;)
示例6: pop
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Token [as 別名]
def pop(self):
next_val = self.peek()
self.index += 1
# We need to handle three cases here where the next_val could be:
# 1. <table_name> ('business')
# 2. <database_name>.<table_name> ('yelp.business')
# 3. <database_name>.<table_name> <extended_query>
# ('yelp.business change col_one col_two')
# In all the cases we should return a token consisting of only the table
# name or if the database name is present then the database name and the
# table name. Case #3 occurs because SQLParse incorrectly parses certain
# queries.
if isinstance(next_val, Identifier):
tokens = next_val.tokens
if len(tokens) > 1 and tokens[1].value == '.':
str_token = "{db_name}{punctuation}{table_name}".format(
db_name=tokens[0].value,
punctuation=tokens[1].value,
table_name=tokens[2].value
)
return TK(Token.Name, str_token)
else:
return next_val.token_first()
return next_val
示例7: process
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Token [as 別名]
def process(self, stack, stmt):
if isinstance(stmt, sql.Statement):
self._curr_stmt = stmt
self._process(stmt)
if isinstance(stmt, sql.Statement):
if self._last_stmt is not None:
if str(self._last_stmt).endswith('\n'):
nl = '\n'
else:
nl = '\n\n'
stmt.tokens.insert(
0, sql.Token(T.Whitespace, nl))
if self._last_stmt != stmt:
self._last_stmt = stmt
# FIXME: Doesn't work ;)
示例8: _get_alias
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Token [as 別名]
def _get_alias(self, token):
tkw = token.token_next_match(0, T.Keyword, 'AS')
if tkw is not None:
return tu.token_next_enable(token, tkw)
left = tu.token_next_enable(token)
if not left:
return None
def is_space(tkn):
return tkn.is_whitespace() and tkn.value
spl = token.token_matching(token.token_index(left), [is_space])
if spl:
return tu.token_next_enable(token, spl)
if tu.is_parenthesis(left):
tkn = tu.token_next_enable(token, left)
if tkn and (tu.is_identifier(tkn) or (tkn.ttype in T.Name)):
# (・・・)ALIAS の場合
space = sql.Token(T.Whitespace, "\t") # スペースを付與
token.insert_after(left, space)
return tkn
return None
示例9: __custom_process_insert_values_lr
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Token [as 別名]
def __custom_process_insert_values_lr(self, tlist):
#INSERT の場合VALUES前後に空白1つをセット
values_token = tlist.token_next_match(0, T.Keyword, "VALUES")
if values_token:
prv = tlist.token_prev(values_token, skip_ws=False)
if prv and prv.is_whitespace():
prv.value = " "
prv = tlist.token_prev(prv, skip_ws=False)
while prv and prv.is_whitespace():
prv.value = ""
prv = tlist.token_prev(prv, skip_ws=False)
else:
tlist.insert_before(values_token, sql.Token(T.Whitespace, " "))
nxt = tlist.token_next(values_token, skip_ws=False)
if nxt and nxt.is_whitespace():
nxt.value = " "
nxt = tlist.token_next(nxt, skip_ws=False)
while nxt and nxt.is_whitespace():
nxt.value = ""
nxt = tlist.token_next(nxt, skip_ws=False)
else:
tlist.insert_after(values_token, sql.Token(T.Whitespace, " "))
示例10: _validate_comparison
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Token [as 別名]
def _validate_comparison(cls, tokens):
base_error_string = "Invalid comparison clause"
if len(tokens) != 3:
raise MlflowException("{}. Expected 3 tokens found {}".format(base_error_string,
len(tokens)),
error_code=INVALID_PARAMETER_VALUE)
if not isinstance(tokens[0], Identifier):
raise MlflowException("{}. Expected 'Identifier' found '{}'".format(base_error_string,
str(tokens[0])),
error_code=INVALID_PARAMETER_VALUE)
if not isinstance(tokens[1], Token) and tokens[1].ttype != TokenType.Operator.Comparison:
raise MlflowException("{}. Expected comparison found '{}'".format(base_error_string,
str(tokens[1])),
error_code=INVALID_PARAMETER_VALUE)
if not isinstance(tokens[2], Token) and \
(tokens[2].ttype not in cls.STRING_VALUE_TYPES.union(cls.NUMERIC_VALUE_TYPES) or
isinstance(tokens[2], Identifier)):
raise MlflowException("{}. Expected value token found '{}'".format(base_error_string,
str(tokens[2])),
error_code=INVALID_PARAMETER_VALUE)
示例11: statement2col_defs
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Token [as 別名]
def statement2col_defs(cls, token: Token):
from djongo.base import DatabaseWrapper
supported_data_types = set(DatabaseWrapper.data_types.values())
defs = token.value.strip('()').split(',')
for col in defs:
col = col.strip()
name, other = col.split(' ', 1)
if name == 'CONSTRAINT':
yield SQLColumnConstraint()
else:
if col[0] != '"':
raise SQLDecodeError('Column identifier not quoted')
name, other = col[1:].split('"', 1)
other = other.strip()
data_type, constraint_sql = other.split(' ', 1)
if data_type not in supported_data_types:
raise NotSupportedError(f'Data of type: {data_type}')
col_constraints = set(SQLColumnDef._get_constraints(constraint_sql))
yield SQLColumnDef(name=name,
data_type=data_type,
col_constraints=col_constraints)
示例12: is_tokens
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Token [as 別名]
def is_tokens(x):
return isinstance(x, list) and len(x) > 0 and isinstance(x[0], S.Token)
示例13: process
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Token [as 別名]
def process(self, stream):
"""Process the stream"""
EOS_TTYPE = T.Whitespace, T.Comment.Single
# Run over all stream tokens
for ttype, value in stream:
# Yield token if we finished a statement and there's no whitespaces
# It will count newline token as a non whitespace. In this context
# whitespace ignores newlines.
# why don't multi line comments also count?
if self.consume_ws and ttype not in EOS_TTYPE:
yield sql.Statement(self.tokens)
# Reset filter and prepare to process next statement
self._reset()
# Change current split level (increase, decrease or remain equal)
self.level += self._change_splitlevel(ttype, value)
# Append the token to the current statement
self.tokens.append(sql.Token(ttype, value))
# Check if we get the end of a statement
if self.level <= 0 and ttype is T.Punctuation and value == ';':
self.consume_ws = True
# Yield pending statement (if any)
if self.tokens:
yield sql.Statement(self.tokens)
示例14: nl
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Token [as 別名]
def nl(self, offset=1):
# offset = 1 represent a single space after SELECT
offset = -len(offset) if not isinstance(offset, int) else offset
# add two for the space and parens
indent = self.indent * (2 + self._max_kwd_len)
return sql.Token(T.Whitespace, self.n + self.char * (
self._max_kwd_len + offset + indent + self.offset))
示例15: _process
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Token [as 別名]
def _process(self, stream, varname, has_nl):
# SQL query asignation to varname
if self.count > 1:
yield sql.Token(T.Whitespace, '\n')
yield sql.Token(T.Name, varname)
yield sql.Token(T.Whitespace, ' ')
yield sql.Token(T.Operator, '=')
yield sql.Token(T.Whitespace, ' ')
if has_nl:
yield sql.Token(T.Operator, '(')
yield sql.Token(T.Text, "'")
# Print the tokens on the quote
for token in stream:
# Token is a new line separator
if token.is_whitespace and '\n' in token.value:
# Close quote and add a new line
yield sql.Token(T.Text, " '")
yield sql.Token(T.Whitespace, '\n')
# Quote header on secondary lines
yield sql.Token(T.Whitespace, ' ' * (len(varname) + 4))
yield sql.Token(T.Text, "'")
# Indentation
after_lb = token.value.split('\n', 1)[1]
if after_lb:
yield sql.Token(T.Whitespace, after_lb)
continue
# Token has escape chars
elif "'" in token.value:
token.value = token.value.replace("'", "\\'")
# Put the token
yield sql.Token(T.Text, token.value)
# Close quote
yield sql.Token(T.Text, "'")
if has_nl:
yield sql.Token(T.Operator, ')')