本文整理匯總了Python中sqlparse.sql.Comment方法的典型用法代碼示例。如果您正苦於以下問題:Python sql.Comment方法的具體用法?Python sql.Comment怎麽用?Python sql.Comment使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sqlparse.sql
的用法示例。
在下文中一共展示了sql.Comment方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: group_identifier_list
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Comment [as 別名]
def group_identifier_list(tlist):
m_role = T.Keyword, ('null', 'role')
sqlcls = (sql.Function, sql.Case, sql.Identifier, sql.Comparison,
sql.IdentifierList, sql.Operation)
ttypes = (T_NUMERICAL + T_STRING + T_NAME +
(T.Keyword, T.Comment, T.Wildcard))
def match(token):
return token.match(T.Punctuation, ',')
def valid(token):
return imt(token, i=sqlcls, m=m_role, t=ttypes)
def post(tlist, pidx, tidx, nidx):
return pidx, nidx
valid_prev = valid_next = valid
_group(tlist, sql.IdentifierList, match,
valid_prev, valid_next, post, extend=True)
示例2: _process
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Comment [as 別名]
def _process(tlist):
def get_next_comment():
# TODO(andi) Comment types should be unified, see related issue38
return tlist.token_next_by(i=sql.Comment, t=T.Comment)
tidx, token = get_next_comment()
while token:
pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
nidx, next_ = tlist.token_next(tidx, skip_ws=False)
# Replace by whitespace if prev and next exist and if they're not
# whitespaces. This doesn't apply if prev or next is a paranthesis.
if (prev_ is None or next_ is None or
prev_.is_whitespace or prev_.match(T.Punctuation, '(') or
next_.is_whitespace or next_.match(T.Punctuation, ')')):
tlist.tokens.remove(token)
else:
tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')
tidx, token = get_next_comment()
示例3: group_comments
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Comment [as 別名]
def group_comments(tlist):
tidx, token = tlist.token_next_by(t=T.Comment)
while token:
eidx, end = tlist.token_not_matching(
lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace, idx=tidx)
if end is not None:
eidx, end = tlist.token_prev(eidx, skip_ws=False)
tlist.group_tokens(sql.Comment, tidx, eidx)
tidx, token = tlist.token_next_by(t=T.Comment, idx=tidx)
示例4: align_comments
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Comment [as 別名]
def align_comments(tlist):
tidx, token = tlist.token_next_by(i=sql.Comment)
while token:
pidx, prev_ = tlist.token_prev(tidx)
if isinstance(prev_, sql.TokenList):
tlist.group_tokens(sql.TokenList, pidx, tidx, extend=True)
tidx = pidx
tidx, token = tlist.token_next_by(i=sql.Comment, idx=tidx)
示例5: process
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Comment [as 別名]
def process(self, stack, stream):
for token_type, value in stream:
if token_type in Comment:
yield token_type, value
示例6: _get_next_comment
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Comment [as 別名]
def _get_next_comment(self, tlist):
# TODO(andi) Comment types should be unified, see related issue38
token = tlist.token_next_by_instance(0, sql.Comment)
if token is None:
token = tlist.token_next_by_type(0, T.Comment)
return token
示例7: align_comments
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Comment [as 別名]
def align_comments(tlist):
[align_comments(sgroup) for sgroup in tlist.get_sublists()]
idx = 0
token = tlist.token_next_by_instance(idx, sql.Comment)
while token:
before = tlist.token_prev(tlist.token_index(token))
if isinstance(before, sql.TokenList):
grp = tlist.tokens_between(before, token)[1:]
before.tokens.extend(grp)
for t in grp:
tlist.tokens.remove(t)
idx = tlist.token_index(before) + 1
else:
idx = tlist.token_index(token) + 1
token = tlist.token_next_by_instance(idx, sql.Comment)
示例8: tokens
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Comment [as 別名]
def tokens(self):
return [
token for token in self.statement.tokens
if not token.is_whitespace() and not isinstance(token, Comment)
]
示例9: is_block_comment
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Comment [as 別名]
def is_block_comment(token):
"""
ブロックコメント判定
"""
if is_comment(token):
comment = token.token_next_by_type(0, T.Comment)
return comment.value in ["/*", "*/"]
return False
示例10: is_line_comment
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Comment [as 別名]
def is_line_comment(token):
"""
ラインコメント判定
"""
if is_comment(token):
comment = token.token_next_by_type(0, T.Comment)
return comment.value not in ["/*", "*/"]
return False
示例11: is_comment
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Comment [as 別名]
def is_comment(token):
"""
コメント判定
"""
return isinstance(token, sql.Comment)
示例12: process
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Comment [as 別名]
def process(self, _, stmt):
def custom_flaten(token):
"""
コメントはflatenしないflaten
"""
if isinstance(token, sql.TokenList) and not tu.is_comment(token):
for tkn in token.tokens:
for item in custom_flaten(tkn):
yield item
else:
yield token
is_prev_cr = True
for token in custom_flaten(stmt):
if tu.is_plain_line_comment(token, self.local_config.comment_syntax):
# コメントクラス置き換え
parent = token.parent
index = parent.tokens.index(token)
comment = LineDescriptionLineCommentFilter.Comment(token.tokens)
for tkn in token.tokens:
tkn.parent = comment
comment.parent = parent
parent.tokens[index] = comment
# フラグセット
comment.is_line_description = not is_prev_cr # pylint: disable=attribute-defined-outside-init
elif token.is_whitespace():
if is_inc_cr(token):
is_prev_cr = True
else:
is_prev_cr = False
示例13: sql_recursively_strip
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Comment [as 別名]
def sql_recursively_strip(node):
for sub_node in node.get_sublists():
sql_recursively_strip(sub_node)
if isinstance(node, Comment):
return node
sql_strip(node)
# strip duplicate whitespaces between parenthesis
if isinstance(node, Parenthesis):
sql_trim(node, 1)
sql_trim(node, -2)
return node
示例14: group_identifier_list
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Comment [as 別名]
def group_identifier_list(tlist):
[group_identifier_list(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, sql.IdentifierList)]
idx = 0
# Allowed list items
fend1_funcs = [lambda t: isinstance(t, (sql.Identifier, sql.Function,
sql.Case)),
lambda t: t.is_whitespace(),
lambda t: t.ttype == T.Name,
lambda t: t.ttype == T.Wildcard,
lambda t: t.match(T.Keyword, 'null'),
lambda t: t.match(T.Keyword, 'role'),
lambda t: t.ttype == T.Number.Integer,
lambda t: t.ttype == T.String.Single,
lambda t: t.ttype == T.Name.Placeholder,
lambda t: t.ttype == T.Keyword,
lambda t: isinstance(t, sql.Comparison),
lambda t: isinstance(t, sql.Comment),
lambda t: t.ttype == T.Comment.Multiline,
]
tcomma = tlist.token_next_match(idx, T.Punctuation, ',')
start = None
while tcomma is not None:
before = tlist.token_prev(tcomma)
after = tlist.token_next(tcomma)
# Check if the tokens around tcomma belong to a list
bpassed = apassed = False
for func in fend1_funcs:
if before is not None and func(before):
bpassed = True
if after is not None and func(after):
apassed = True
if not bpassed or not apassed:
# Something's wrong here, skip ahead to next ","
start = None
tcomma = tlist.token_next_match(tlist.token_index(tcomma) + 1,
T.Punctuation, ',')
else:
if start is None:
start = before
next_ = tlist.token_next(after)
if next_ is None or not next_.match(T.Punctuation, ','):
# Reached the end of the list
tokens = tlist.tokens_between(start, after)
group = tlist.group_tokens(sql.IdentifierList, tokens)
start = None
tcomma = tlist.token_next_match(tlist.token_index(group) + 1,
T.Punctuation, ',')
else:
tcomma = next_
示例15: group_identifier_list
# 需要導入模塊: from sqlparse import sql [as 別名]
# 或者: from sqlparse.sql import Comment [as 別名]
def group_identifier_list(tlist):
[group_identifier_list(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, sql.IdentifierList)]
# Allowed list items
fend1_funcs = [lambda t: isinstance(t, (sql.Identifier, sql.Function,
sql.Case)),
lambda t: t.is_whitespace(),
lambda t: t.ttype == T.Name,
lambda t: t.ttype == T.Wildcard,
lambda t: t.match(T.Keyword, 'null'),
lambda t: t.match(T.Keyword, 'role'),
lambda t: t.ttype == T.Number.Integer,
lambda t: t.ttype == T.String.Single,
lambda t: t.ttype == T.Name.Placeholder,
lambda t: t.ttype == T.Keyword,
lambda t: isinstance(t, sql.Comparison),
lambda t: isinstance(t, sql.Comment),
lambda t: t.ttype == T.Comment.Multiline,
]
tcomma = tlist.token_next_match(0, T.Punctuation, ',')
start = None
while tcomma is not None:
# Go back one idx to make sure to find the correct tcomma
idx = tlist.token_index(tcomma)
before = tlist.token_prev(idx)
after = tlist.token_next(idx)
# Check if the tokens around tcomma belong to a list
bpassed = apassed = False
for func in fend1_funcs:
if before is not None and func(before):
bpassed = True
if after is not None and func(after):
apassed = True
if not bpassed or not apassed:
# Something's wrong here, skip ahead to next ","
start = None
tcomma = tlist.token_next_match(idx + 1,
T.Punctuation, ',')
else:
if start is None:
start = before
after_idx = tlist.token_index(after, start=idx)
next_ = tlist.token_next(after_idx)
if next_ is None or not next_.match(T.Punctuation, ','):
# Reached the end of the list
tokens = tlist.tokens_between(start, after)
group = tlist.group_tokens(sql.IdentifierList, tokens)
start = None
tcomma = tlist.token_next_match(tlist.token_index(group) + 1,
T.Punctuation, ',')
else:
tcomma = next_