本文整理汇总了Python中sqlalchemy.sql.sqltext函数的典型用法代码示例。如果您正苦于以下问题:Python sqltext函数的具体用法?Python sqltext怎么用?Python sqltext使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sqltext函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_bso_columns
def _get_bso_columns(table_name):
return (
Column("id", String(64), primary_key=True, autoincrement=False),
Column("userid", Integer, primary_key=True, nullable=False,
autoincrement=False),
Column("collection", Integer, primary_key=True, nullable=False,
autoincrement=False),
Column("sortindex", Integer),
Column("version", BigInteger),
Column("timestamp", BigInteger),
Column("payload", Text, nullable=False, server_default=""),
Column("payload_size", Integer, nullable=False,
server_default=sqltext("0")),
Column("ttl", Integer, server_default=sqltext(str(MAX_TTL))),
# Declare indexes.
# We need to include the tablename in the index name due to sharding,
# because index names in sqlite are global, not per-table.
# Index on "ttl" for easy pruning of expired items.
Index("%s_ttl_idx" % (table_name,), "ttl"),
# Index on "version" for easy filtering by older/newer.
Index("%s_usr_col_ver_idx" % (table_name,),
"userid", "collection", "version"),
# There is intentinally no index on "sortindex".
# Clients almost always filter on "version" using the above index,
# and cannot take advantage of a separate index for sorting.
)
示例2: _get_bso_columns
def _get_bso_columns(table_name):
return (
Column("userid", Integer, primary_key=True, nullable=False,
autoincrement=False),
Column("collection", Integer, primary_key=True, nullable=False,
autoincrement=False),
Column("id", String(64), primary_key=True, autoincrement=False),
Column("sortindex", Integer),
Column("modified", BigInteger, nullable=False),
# I'd like to default this to the emptry string, but
# MySQL doesn't let you set a default on a TEXT column.
Column("payload", PAYLOAD_TYPE, nullable=False),
Column("payload_size", Integer, nullable=False,
server_default=sqltext("0")),
Column("ttl", Integer, nullable=False,
server_default=sqltext(str(MAX_TTL))),
# Declare indexes.
# We need to include the tablename in the index name due to sharding,
# because index names in sqlite are global, not per-table.
# Index on "ttl" for easy pruning of expired items.
Index("%s_ttl_idx" % (table_name,), "ttl"),
# Index on "modified" for easy filtering by timestamp.
Index("%s_usr_col_mod_idx" % (table_name,),
"userid", "collection", "modified"),
# There is intentinally no index on "sortindex".
# Clients almost always filter on "modified" using the above index,
# and cannot take advantage of a separate index for sorting.
)
示例3: execute
def execute(self, query, params=None, annotations=None):
"""Execute a database query, with retry and exception-catching logic.
This method executes the given query against the database, lazily
establishing an actual live connection as required. It catches
operational database errors and normalizes them into a BackendError
exception.
"""
if params is None:
params = {}
if annotations is None:
annotations = {}
# If there is no active connection, create a fresh one.
# This will affect the control flow below.
connection = self._connection
session_was_active = True
if connection is None:
connection = self._connector.engine.connect()
transaction = connection.begin()
session_was_active = False
try:
# It's possible for the backend to fail in a way that the query
# can be retried, e.g. the server timed out the connection we
# got from the pool. If so then we can retry the query with a
# new connection, but only if the failed connection was never
# successfully used as part of this transaction.
try:
query_str = self._render_query(query, params, annotations)
return connection.execute(sqltext(query_str), **params)
except DBAPIError, exc:
if not is_retryable_db_error(self._connector.engine, exc):
raise
if session_was_active:
raise
# Don't try to close the connection if it's already dead.
if not exc.connection_invalidated:
transaction.rollback()
connection.close()
connection = self._connector.engine.connect()
transaction = connection.begin()
annotations["retry"] = "1"
query_str = self._render_query(query, params, annotations)
return connection.execute(sqltext(query_str), **params)
finally:
# Now that the underlying connection has been used, remember it
# so that all subsequent queries are part of the same transaction.
if not session_was_active:
self._connection = connection
self._transaction = transaction
示例4: visit_conditional_insert
def visit_conditional_insert(element, compiler, **kwargs):
# magic copied from sqlalchemy.sql.compiler.SQLCompiler.visit_insert
compiler.isinsert = True
try:
# pylint: disable=E0611
from sqlalchemy.sql import crud
colparams = crud._get_crud_params(compiler, element)
except ImportError: # SQLAlchemy <= 1.0
colparams = compiler._get_colparams(element)
text = 'INSERT INTO %s' % compiler.process(element.table, asfrom=True)
text += ' (%s)\n' % ', '.join(compiler.preparer.format_column(c[0])
for c in colparams)
text += 'SELECT %s\n' % ', '.join(c[1] for c in colparams)
text += compiler.default_from()
# default_from() returns '' for MySQL but that's wrong, MySQL requires
# FROM DUAL if there is a following WHERE clause.
if isinstance(compiler.dialect, MySQLDialect):
text += 'FROM DUAL\n'
# We need FOR UPDATE in the inner SELECT for MySQL, to ensure we acquire an
# exclusive lock immediately, instead of acquiring a shared lock and then
# subsequently upgrading it to an exclusive lock, which is subject to
# deadlocks if another transaction is doing the same thing.
nonexistence_clause = not_(exists(Select(
columns=[sqltext('1')], from_obj=[element.table],
whereclause=element.unique_condition, for_update=True)))
text += 'WHERE ' + compiler.process(nonexistence_clause)
return text
示例5: add_service
def add_service(self, service, pattern, **kwds):
"""Add definition for a new service."""
res = self._safe_execute(sqltext("""
insert into services (service, pattern)
values (:servicename, :pattern)
"""), servicename=service, pattern=pattern, **kwds)
res.close()
return res.lastrowid
示例6: _exec_with_cleanup
def _exec_with_cleanup(self, connection, query_str, **params):
"""Execution wrapper that kills queries if it is interrupted.
This is a wrapper around connection.execute() that will clean up
any running query if the execution is interrupted by a control-flow
exception such as KeyboardInterrupt or gevent.Timeout.
The cleanup currently works only for the PyMySQL driver. Other
drivers will still execute fine, they just won't get the cleanup.
"""
try:
return connection.execute(sqltext(query_str), **params)
except Exception:
# Normal exceptions are passed straight through.
raise
except BaseException:
# Control-flow exceptions trigger the cleanup logic.
exc, val, tb = sys.exc_info()
logger.warn("query was interrupted by %s", val)
# Only cleanup SELECT, INSERT or UPDATE statements.
# There are concerns that rolling back DELETEs is too costly.
if not SAFE_TO_KILL_QUERY.match(query_str):
msg = " refusing to kill unsafe query: %s"
logger.warn(msg, query_str[:100])
raise
try:
# The KILL command is specific to MySQL, and this method of
# getting the threadid is specific to the PyMySQL driver.
# Other drivers will cause an AttributeError, failing through
# to the "finally" clause at the end of this block.
thread_id = connection.connection.server_thread_id[0]
logger.warn(" killing connection %d", thread_id)
cleanup_query = "KILL %d" % (thread_id,)
# Use a freshly-created connection so that we don't block
# waiting for something from the pool. Unfortunately this
# requires use of a private API and raw cursor access.
cleanup_conn = self._connector.engine.pool._create_connection()
try:
cleanup_cursor = cleanup_conn.connection.cursor()
try:
cleanup_cursor.execute(cleanup_query)
except Exception:
msg = " failed to kill %d"
logger.exception(msg, thread_id)
raise
finally:
cleanup_cursor.close()
msg = " successfully killed %d"
logger.warn(msg, thread_id)
finally:
cleanup_conn.close()
finally:
try:
# Don't return this connection to the pool.
connection.invalidate()
finally:
# Always re-raise the original error.
raise exc, val, tb
示例7: test_default_node_available_capacity
def test_default_node_available_capacity(self):
node = "https://phx13"
self.backend.add_node("sync-1.0", node, capacity=100)
available = int(math.ceil(self.backend.capacity_release_rate * 100))
query = sqltext("SELECT * FROM nodes WHERE node=:node")
res = self.backend._safe_execute(query, node=node)
row = res.fetchone()
res.close()
self.assertEqual(row["available"], available)
示例8: remove_node
def remove_node(self, service, node, timestamp=None):
"""Remove definition for a node."""
res = self._safe_execute(sqltext(
"""
delete from nodes
where service=:service and node=:node
"""),
service=service, node=node
)
res.close()
self.unassign_node(service, node, timestamp)
示例9: remove_node
def remove_node(self, service, node, timestamp=None):
"""Remove definition for a node."""
nodeid = self.get_node_id(service, node)
res = self._safe_execute(sqltext(
"""
delete from nodes where id=:nodeid
"""),
service=service, nodeid=nodeid
)
res.close()
self.unassign_node(service, node, timestamp, nodeid=nodeid)
示例10: unassign_node
def unassign_node(self, service, node, timestamp=None):
"""Clear any assignments to a node."""
if timestamp is None:
timestamp = get_timestamp()
res = self._safe_execute(sqltext(
"""
update users
set replaced_at=:timestamp
where service=:service and node=:node
"""),
service=service, node=node, timestamp=timestamp
)
res.close()
示例11: get_node_id
def get_node_id(self, service, node):
"""Get numeric id for a node."""
res = self._safe_execute(sqltext(
"""
select id from nodes
where service=:service and node=:node
"""),
service=service, node=node
)
row = res.fetchone()
res.close()
if row is None:
raise ValueError("unknown node: " + node)
return row[0]
示例12: unassign_node
def unassign_node(self, service, node, timestamp=None, nodeid=None):
"""Clear any assignments to a node."""
if timestamp is None:
timestamp = get_timestamp()
if nodeid is None:
nodeid = self.get_node_id(service, node)
res = self._safe_execute(sqltext(
"""
update users
set replaced_at=:timestamp
where nodeid=:nodeid
"""),
nodeid=nodeid, timestamp=timestamp
)
res.close()
示例13: add_node
def add_node(self, service, node, capacity, **kwds):
"""Add definition for a new node."""
res = self._safe_execute(sqltext(
"""
insert into nodes (service, node, available, capacity,
current_load, downed, backoff)
values (:service, :node, :available, :capacity,
:current_load, :downed, :backoff)
"""),
service=service, node=node, capacity=capacity,
available=kwds.get('available', capacity),
current_load=kwds.get('current_load', 0),
downed=kwds.get('downed', 0),
backoff=kwds.get('backoff', 0),
)
res.close()
示例14: add_node
def add_node(self, service, node, capacity, **kwds):
"""Add definition for a new node."""
available = kwds.get('available')
# We release only a fraction of the node's capacity to start.
if available is None:
available = math.ceil(capacity * self.capacity_release_rate)
res = self._safe_execute(sqltext(
"""
insert into nodes (service, node, available, capacity,
current_load, downed, backoff)
values (:service, :node, :available, :capacity,
:current_load, :downed, :backoff)
"""),
service=service, node=node, capacity=capacity, available=available,
current_load=kwds.get('current_load', 0),
downed=kwds.get('downed', 0),
backoff=kwds.get('backoff', 0),
)
res.close()
示例15: sqltext
import traceback
from sqlalchemy import String, Index, Boolean
from sqlalchemy import Table, MetaData, create_engine, Column
from sqlalchemy.exc import OperationalError, TimeoutError
from sqlalchemy.pool import NullPool
from sqlalchemy.sql import text as sqltext, select, or_, and_
from addonreg import logger
_GET = sqltext("""\
SELECT addonid, sha256, registered
FROM hashes
WHERE addonid = :addonid
AND sha256 = :sha256
""")
_INSERT = sqltext("""\
INSERT INTO hashes
(addonid, sha256, registered)
VALUES (:addonid, :sha256, 1)
""")
_MULTIPLE_GET = """\
SELECT addonid, sha256, registered
FROM hashes
WHERE
"""
metadata = MetaData()