本文整理匯總了Python中sqlparse.engine.filter.StatementFilter類的典型用法代碼示例。如果您正苦於以下問題:Python StatementFilter類的具體用法?Python StatementFilter怎麽用?Python StatementFilter使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了StatementFilter類的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: split
def split(sql, encoding=None):
"""Split *sql* into single statements.
:param sql: A string containting one or more SQL statements.
:param encoding: The encoding of the statement (optional).
:returns: A list of strings.
"""
stream = lexer.tokenize(sql, encoding)
splitter = StatementFilter()
stream = splitter.process(None, stream)
return [unicode(stmt).strip() for stmt in stream]
示例2: run
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
if self.preprocess:
for filter_ in self.preprocess:
stream = filter_.process(self, stream)
if (self.stmtprocess or self.postprocess or self.split_statements
or self._grouping):
splitter = StatementFilter()
stream = splitter.process(self, stream)
# import StripCommentsFilter in the run() method to avoid a circular dependency.
# For stripping comments, the only grouping method we want to invoke is
# grouping.group(), this considerably improves performance.
strip_comments_only = False
if self.stmtprocess and len(self.stmtprocess) == 1:
from sqlparse.filters import StripCommentsFilter
strip_comments_only = isinstance(self.stmtprocess[0], StripCommentsFilter)
if self._grouping:
def _group(stream):
for stmt in stream:
if strip_comments_only:
grouping.group_comments(stmt)
else:
grouping.group(stmt)
yield stmt
stream = _group(stream)
if self.stmtprocess:
def _run1(stream):
ret = []
for stmt in stream:
for filter_ in self.stmtprocess:
filter_.process(self, stmt)
ret.append(stmt)
return ret
stream = _run1(stream)
if self.postprocess:
def _run2(stream):
for stmt in stream:
stmt.tokens = list(self._flatten(stmt.tokens))
for filter_ in self.postprocess:
stmt = filter_.process(self, stmt)
yield stmt
stream = _run2(stream)
return stream
示例3: run
def run(self, sql):
stream = lexer.tokenize(sql)
# Process token stream
if self.preprocess:
for filter_ in self.preprocess:
stream = filter_.process(self, stream)
if (self.stmtprocess or self.postprocess or self.split_statements
or self._grouping):
splitter = StatementFilter()
stream = splitter.process(self, stream)
if self._grouping:
def _group(stream):
# modified by rrana
pass
for stmt in stream:
grouping.group(stmt)
yield stmt
stream = _group(stream)
if self.stmtprocess:
def _run1(stream):
ret = []
for stmt in stream:
for filter_ in self.stmtprocess:
filter_.process(self, stmt)
ret.append(stmt)
return ret
stream = _run1(stream)
if self.postprocess:
def _run2(stream):
for stmt in stream:
stmt.tokens = list(self._flatten(stmt.tokens))
for filter_ in self.postprocess:
stmt = filter_.process(self, stmt)
yield stmt
stream = _run2(stream)
return stream
示例4: split2
def split2(stream):
splitter = StatementFilter()
return list(splitter.process(None, stream))
示例5: split2
def split2(stream):
from sqlparse.engine.filter import StatementFilter
splitter = StatementFilter()
return list(splitter.process(None, stream))