當前位置: 首頁>>代碼示例>>Python>>正文


Python sqlparse.parse方法代碼示例

本文整理匯總了Python中sqlparse.parse方法的典型用法代碼示例。如果您正苦於以下問題:Python sqlparse.parse方法的具體用法?Python sqlparse.parse怎麽用?Python sqlparse.parse使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sqlparse的用法示例。


在下文中一共展示了sqlparse.parse方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: parse_partial_identifier

# 需要導入模塊: import sqlparse [as 別名]
# 或者: from sqlparse import parse [as 別名]
def parse_partial_identifier(word):
    """Attempt to parse a (partially typed) word as an identifier

    word may include a schema qualification, like `schema_name.partial_name`
    or `schema_name.` There may also be unclosed quotation marks, like
    `"schema`, or `schema."partial_name`

    :param word: string representing a (partially complete) identifier
    :return: sqlparse.sql.Identifier, or None
    """

    p = sqlparse.parse(word)[0]
    n_tok = len(p.tokens)
    if n_tok == 1 and isinstance(p.tokens[0], Identifier):
        return p.tokens[0]
    elif p.token_next_by(m=(Error, '"'))[1]:
        # An unmatched double quote, e.g. '"foo', 'foo."', or 'foo."bar'
        # Close the double quote, then reparse
        return parse_partial_identifier(word + '"')
    else:
        return None 
開發者ID:dbcli,項目名稱:pgcli,代碼行數:23,代碼來源:utils.py

示例2: _run

# 需要導入模塊: import sqlparse [as 別名]
# 或者: from sqlparse import parse [as 別名]
def _run(self, sql, sqla, exception):
        LOG.debug("sql: '%s'", sql)

        x = sqlparse.parse(sql)[0]
        for x in x.tokens:
            if not x.is_whitespace:
                LOG.debug("  %r %s", x, type(x))

        try:
            actual_sqla = to_sqla(sql)
        except Exception as e:
            if not exception:
                raise
            self.assertRegex(str(e), exception.strip())
        else:
            self.assertEqual(actual_sqla, sqla) 
開發者ID:pglass,項目名稱:sqlitis,代碼行數:18,代碼來源:test_to_sqla.py

示例3: tableFromCreateStatement

# 需要導入模塊: import sqlparse [as 別名]
# 或者: from sqlparse import parse [as 別名]
def tableFromCreateStatement(schema, stmt):
    """
    Add a table from a CREATE TABLE sqlparse statement object.

    @param schema: The schema to add the table statement to.
    @type schema: L{Schema}

    @param stmt: The C{CREATE TABLE} statement object.
    @type stmt: L{Statement}
    """
    i = iterSignificant(stmt)
    expect(i, ttype=Keyword.DDL, value="CREATE")
    expect(i, ttype=Keyword, value="TABLE")
    function = expect(i, cls=Function)
    i = iterSignificant(function)
    name = expect(i, cls=Identifier).get_name().encode("utf-8")
    self = Table(schema, name)
    parens = expect(i, cls=Parenthesis)
    cp = _ColumnParser(self, iterSignificant(parens), parens)
    cp.parse()
    return self 
開發者ID:apple,項目名稱:ccs-twistedextensions,代碼行數:23,代碼來源:parseschema.py

示例4: extract_tables

# 需要導入模塊: import sqlparse [as 別名]
# 或者: from sqlparse import parse [as 別名]
def extract_tables(sql):
    """Extract the table names from an SQL statment.

    Returns a list of (schema, table, alias) tuples

    """
    parsed = sqlparse.parse(sql)
    if not parsed:
        return []

    # INSERT statements must stop looking for tables at the sign of first
    # Punctuation. eg: INSERT INTO abc (col1, col2) VALUES (1, 2)
    # abc is the table name, but if we don't stop at the first lparen, then
    # we'll identify abc, col1 and col2 as table names.
    insert_stmt = parsed[0].token_first().value.lower() == "insert"
    stream = extract_from_part(parsed[0], stop_at_punctuation=insert_stmt)
    return list(extract_table_identifiers(stream)) 
開發者ID:dbcli,項目名稱:litecli,代碼行數:19,代碼來源:parseutils.py

示例5: test_placeholder

# 需要導入模塊: import sqlparse [as 別名]
# 或者: from sqlparse import parse [as 別名]
def test_placeholder(self):
        def _get_tokens(sql):
            return sqlparse.parse(sql)[0].tokens[-1].tokens
        t = _get_tokens('select * from foo where user = ?')
        self.assert_(t[-1].ttype is sqlparse.tokens.Name.Placeholder)
        self.assertEqual(t[-1].value, '?')
        t = _get_tokens('select * from foo where user = :1')
        self.assert_(t[-1].ttype is sqlparse.tokens.Name.Placeholder)
        self.assertEqual(t[-1].value, ':1')
        t = _get_tokens('select * from foo where user = :name')
        self.assert_(t[-1].ttype is sqlparse.tokens.Name.Placeholder)
        self.assertEqual(t[-1].value, ':name')
        t = _get_tokens('select * from foo where user = %s')
        self.assert_(t[-1].ttype is sqlparse.tokens.Name.Placeholder)
        self.assertEqual(t[-1].value, '%s')
        t = _get_tokens('select * from foo where user = $a')
        self.assert_(t[-1].ttype is sqlparse.tokens.Name.Placeholder)
        self.assertEqual(t[-1].value, '$a') 
開發者ID:sriniiyer,項目名稱:codenn,代碼行數:20,代碼來源:test_parse.py

示例6: test_issue26

# 需要導入模塊: import sqlparse [as 別名]
# 或者: from sqlparse import parse [as 別名]
def test_issue26(self):
        # parse stand-alone comments
        p = sqlparse.parse('--hello')[0]
        self.assertEqual(len(p.tokens), 1)
        self.assert_(p.tokens[0].ttype is T.Comment.Single)
        p = sqlparse.parse('-- hello')[0]
        self.assertEqual(len(p.tokens), 1)
        self.assert_(p.tokens[0].ttype is T.Comment.Single)
        p = sqlparse.parse('--hello\n')[0]
        self.assertEqual(len(p.tokens), 1)
        self.assert_(p.tokens[0].ttype is T.Comment.Single)
        p = sqlparse.parse('--')[0]
        self.assertEqual(len(p.tokens), 1)
        self.assert_(p.tokens[0].ttype is T.Comment.Single)
        p = sqlparse.parse('--\n')[0]
        self.assertEqual(len(p.tokens), 1)
        self.assert_(p.tokens[0].ttype is T.Comment.Single) 
開發者ID:sriniiyer,項目名稱:codenn,代碼行數:19,代碼來源:test_regressions.py

示例7: update_names

# 需要導入模塊: import sqlparse [as 別名]
# 或者: from sqlparse import parse [as 別名]
def update_names(self, text):
        for parsed in sqlparse.parse(text):
            for token in parsed.flatten():
                if token.ttype in Name:
                    self.name_counts[token.value] += 1 
開發者ID:dbcli,項目名稱:pgcli,代碼行數:7,代碼來源:prioritization.py

示例8: find_prev_keyword

# 需要導入模塊: import sqlparse [as 別名]
# 或者: from sqlparse import parse [as 別名]
def find_prev_keyword(sql, n_skip=0):
    """ Find the last sql keyword in an SQL statement

    Returns the value of the last keyword, and the text of the query with
    everything after the last keyword stripped
    """
    if not sql.strip():
        return None, ""

    parsed = sqlparse.parse(sql)[0]
    flattened = list(parsed.flatten())
    flattened = flattened[: len(flattened) - n_skip]

    logical_operators = ("AND", "OR", "NOT", "BETWEEN")

    for t in reversed(flattened):
        if t.value == "(" or (
            t.is_keyword and (t.value.upper() not in logical_operators)
        ):
            # Find the location of token t in the original parsed statement
            # We can't use parsed.token_index(t) because t may be a child token
            # inside a TokenList, in which case token_index throws an error
            # Minimal example:
            #   p = sqlparse.parse('select * from foo where bar')
            #   t = list(p.flatten())[-3]  # The "Where" token
            #   p.token_index(t)  # Throws ValueError: not in list
            idx = flattened.index(t)

            # Combine the string values of all tokens in the original list
            # up to and including the target keyword token t, to produce a
            # query string with everything after the keyword token removed
            text = "".join(tok.value for tok in flattened[: idx + 1])
            return t, text

    return None, ""


# Postgresql dollar quote signs look like `$$` or `$tag$` 
開發者ID:dbcli,項目名稱:pgcli,代碼行數:40,代碼來源:utils.py

示例9: is_open_quote

# 需要導入模塊: import sqlparse [as 別名]
# 或者: from sqlparse import parse [as 別名]
def is_open_quote(sql):
    """Returns true if the query contains an unclosed quote"""

    # parsed can contain one or more semi-colon separated commands
    parsed = sqlparse.parse(sql)
    return any(_parsed_is_open_quote(p) for p in parsed) 
開發者ID:dbcli,項目名稱:pgcli,代碼行數:8,代碼來源:utils.py

示例10: __init__

# 需要導入模塊: import sqlparse [as 別名]
# 或者: from sqlparse import parse [as 別名]
def __init__(self, full_text, text_before_cursor):
        self.identifier = None
        self.word_before_cursor = word_before_cursor = last_word(
            text_before_cursor, include="many_punctuations"
        )
        full_text = _strip_named_query(full_text)
        text_before_cursor = _strip_named_query(text_before_cursor)

        full_text, text_before_cursor, self.local_tables = isolate_query_ctes(
            full_text, text_before_cursor
        )

        self.text_before_cursor_including_last_word = text_before_cursor

        # If we've partially typed a word then word_before_cursor won't be an
        # empty string. In that case we want to remove the partially typed
        # string before sending it to the sqlparser. Otherwise the last token
        # will always be the partially typed string which renders the smart
        # completion useless because it will always return the list of
        # keywords as completion.
        if self.word_before_cursor:
            if word_before_cursor[-1] == "(" or word_before_cursor[0] == "\\":
                parsed = sqlparse.parse(text_before_cursor)
            else:
                text_before_cursor = text_before_cursor[: -len(word_before_cursor)]
                parsed = sqlparse.parse(text_before_cursor)
                self.identifier = parse_partial_identifier(word_before_cursor)
        else:
            parsed = sqlparse.parse(text_before_cursor)

        full_text, text_before_cursor, parsed = _split_multiple_statements(
            full_text, text_before_cursor, parsed
        )

        self.full_text = full_text
        self.text_before_cursor = text_before_cursor
        self.parsed = parsed

        self.last_token = parsed and parsed.token_prev(len(parsed.tokens))[1] or "" 
開發者ID:dbcli,項目名稱:pgcli,代碼行數:41,代碼來源:sqlcompletion.py

示例11: _statement_from_function

# 需要導入模塊: import sqlparse [as 別名]
# 或者: from sqlparse import parse [as 別名]
def _statement_from_function(full_text, text_before_cursor, statement):
    current_pos = len(text_before_cursor)
    body_start, body_end = _find_function_body(full_text)
    if body_start is None:
        return full_text, text_before_cursor, statement
    if not body_start <= current_pos < body_end:
        return full_text, text_before_cursor, statement
    full_text = full_text[body_start:body_end]
    text_before_cursor = text_before_cursor[body_start:]
    parsed = sqlparse.parse(text_before_cursor)
    return _split_multiple_statements(full_text, text_before_cursor, parsed) 
開發者ID:dbcli,項目名稱:pgcli,代碼行數:13,代碼來源:sqlcompletion.py

示例12: extract_column_names

# 需要導入模塊: import sqlparse [as 別名]
# 或者: from sqlparse import parse [as 別名]
def extract_column_names(sql):
    p = parse(sql)[0]
    return _extract_column_names(p) 
開發者ID:dbcli,項目名稱:pgcli,代碼行數:5,代碼來源:test_ctes.py

示例13: test_token_str_pos

# 需要導入模塊: import sqlparse [as 別名]
# 或者: from sqlparse import parse [as 別名]
def test_token_str_pos():
    sql = "SELECT * FROM xxx"
    p = parse(sql)[0]
    idx = p.token_index(p.tokens[-1])
    assert token_start_pos(p.tokens, idx) == len("SELECT * FROM ")

    sql = "SELECT * FROM \nxxx"
    p = parse(sql)[0]
    idx = p.token_index(p.tokens[-1])
    assert token_start_pos(p.tokens, idx) == len("SELECT * FROM \n") 
開發者ID:dbcli,項目名稱:pgcli,代碼行數:12,代碼來源:test_ctes.py

示例14: sql_tokenize

# 需要導入模塊: import sqlparse [as 別名]
# 或者: from sqlparse import parse [as 別名]
def sql_tokenize(string):
    """ Tokenizes a SQL statement into tokens.

    Inputs:
       string: string to tokenize.

    Outputs:
       a list of tokens.
    """
    tokens = []
    statements = sqlparse.parse(string)

    # SQLparse gives you a list of statements.
    for statement in statements:
        # Flatten the tokens in each statement and add to the tokens list.
        flat_tokens = sqlparse.sql.TokenList(statement.tokens).flatten()
        for token in flat_tokens:
            strip_token = str(token).strip()
            if len(strip_token) > 0:
                tokens.append(strip_token)

    newtokens = []
    keep = True
    for i, token in enumerate(tokens):
        if token == ".":
            newtoken = newtokens[-1] + "." + tokens[i + 1]
            newtokens = newtokens[:-1] + [newtoken]
            keep = False
        elif keep:
            newtokens.append(token)
        else:
            keep = True

    return newtokens 
開發者ID:lil-lab,項目名稱:atis,代碼行數:36,代碼來源:tokenizers.py

示例15: collect_mysql_metrics

# 需要導入模塊: import sqlparse [as 別名]
# 或者: from sqlparse import parse [as 別名]
def collect_mysql_metrics(context, trace, instance, args):
    connection = instance.connection_proxy

    db = connection.extract_db
    hostname = connection.extract_hostname
    port = connection.extract_port

    command, table = None, None

    query = sqlparse.parse(args[0])
    if query:
        query = query[0]
        command = query.get_type()
        table = query.get_name()

    request = Request(
        command=ensure_utf8(command),
        key=None,
        hostname=ensure_utf8(hostname),
        port=ensure_utf8(port),
        connectionName=None,
        db=ensure_utf8(db),
        table=ensure_utf8(table),
    )
    request = request._asdict()
    context.iopipe.mark.db_trace(trace, "mysql", request) 
開發者ID:iopipe,項目名稱:iopipe-python,代碼行數:28,代碼來源:auto_db.py


注:本文中的sqlparse.parse方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。