本文整理汇总了Python中sqlalchemy.pool.QueuePool方法的典型用法代码示例。如果您正苦于以下问题:Python pool.QueuePool方法的具体用法?Python pool.QueuePool怎么用?Python pool.QueuePool使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sqlalchemy.pool
的用法示例。
在下文中一共展示了pool.QueuePool方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from sqlalchemy import pool [as 别名]
# 或者: from sqlalchemy.pool import QueuePool [as 别名]
def __init__(self, sqluri, node_url, **kw):
self.sqluri = sqluri
self.node_url = node_url
self.driver = urlparse(sqluri).scheme.lower()
sqlkw = {
"logging_name": "syncserver",
"connect_args": {},
"poolclass": QueuePool,
"pool_reset_on_return": True,
}
if self.driver == "sqlite":
# We must mark it as safe to share sqlite connections between
# threads. The pool will ensure there's no race conditions.
sqlkw["connect_args"]["check_same_thread"] = False
# If using a :memory: database, we must use a QueuePool of
# size 1 so that a single connection is shared by all threads.
if urlparse(sqluri).path.lower() in ("/", "/:memory:"):
sqlkw["pool_size"] = 1
sqlkw["max_overflow"] = 0
if "mysql" in self.driver:
# Guard against the db closing idle conections.
sqlkw["pool_recycle"] = kw.get("pool_recycle", 3600)
self._engine = create_engine(sqluri, **sqlkw)
users.create(self._engine, checkfirst=True)
示例2: test_no_connect_on_recreate
# 需要导入模块: from sqlalchemy import pool [as 别名]
# 或者: from sqlalchemy.pool import QueuePool [as 别名]
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (
pool.SingletonThreadPool,
pool.StaticPool,
pool.QueuePool,
pool.NullPool,
pool.AssertionPool,
):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.connect.side_effect = Exception("error!")
p.dispose()
p.recreate()
示例3: test_overflow_reset_on_failed_connect
# 需要导入模块: from sqlalchemy import pool [as 别名]
# 或者: from sqlalchemy.pool import QueuePool [as 别名]
def test_overflow_reset_on_failed_connect(self):
dbapi = Mock()
def failing_dbapi():
time.sleep(2)
raise Exception("connection failed")
creator = dbapi.connect
def create():
return creator()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
c1 = self._with_teardown(p.connect()) # noqa
c2 = self._with_teardown(p.connect()) # noqa
c3 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1)
creator = failing_dbapi
assert_raises(Exception, p.connect)
eq_(p._overflow, 1)
示例4: _assert_cleanup_on_pooled_reconnect
# 需要导入模块: from sqlalchemy import pool [as 别名]
# 或者: from sqlalchemy.pool import QueuePool [as 别名]
def _assert_cleanup_on_pooled_reconnect(self, dbapi, p):
# p is QueuePool with size=1, max_overflow=2,
# and one connection in the pool that will need to
# reconnect when next used (either due to recycle or invalidate)
with self._no_wr_finalize():
eq_(p.checkedout(), 0)
eq_(p._overflow, 0)
dbapi.shutdown(True)
assert_raises_context_ok(Exception, p.connect)
eq_(p._overflow, 0)
eq_(p.checkedout(), 0) # and not 1
dbapi.shutdown(False)
c1 = self._with_teardown(p.connect()) # noqa
assert p._pool.empty() # poolsize is one, so we're empty OK
c2 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1) # and not 2
# this hangs if p._overflow is 2
c3 = self._with_teardown(p.connect())
c3.close()
示例5: _pool_fixture
# 需要导入模块: from sqlalchemy import pool [as 别名]
# 或者: from sqlalchemy.pool import QueuePool [as 别名]
def _pool_fixture(self, pre_ping, pool_kw=None):
dialect = url.make_url(
"postgresql://foo:bar@localhost/test"
).get_dialect()()
dialect.dbapi = self.dbapi
_pool = pool.QueuePool(
creator=lambda: self.dbapi.connect("foo.db"),
pre_ping=pre_ping,
dialect=dialect,
**(pool_kw if pool_kw else {})
)
dialect.is_disconnect = lambda e, conn, cursor: isinstance(
e, MockDisconnect
)
return _pool
示例6: create_database_engine
# 需要导入模块: from sqlalchemy import pool [as 别名]
# 或者: from sqlalchemy.pool import QueuePool [as 别名]
def create_database_engine(user, password, database, host, port):
"""Create a database engine"""
driver = 'mysql+pymysql'
url = URL(driver, user, password, host, port, database,
query={'charset': 'utf8mb4'})
# Generic parameters for the engine.
#
# SSL param needs a non-empty dict to be activated in pymsql.
# That is why a fake parameter 'activate' is given but not
# used by the library.
#
engine_params = {
'poolclass': QueuePool,
'pool_size': 25,
'pool_pre_ping': True,
'echo': False,
'connect_args': {
'ssl': {
'activate': True
}
}
}
engine = create_engine(url, **engine_params)
try:
engine.connect().close()
except InternalError:
# Try non-SSL connection
engine_params['connect_args'].pop('ssl')
engine = create_engine(url, **engine_params)
engine.connect().close()
return engine
示例7: setUp
# 需要导入模块: from sqlalchemy import pool [as 别名]
# 或者: from sqlalchemy.pool import QueuePool [as 别名]
def setUp(self):
self.test_db_file = "/tmp/gftest.db"
connection_url = "sqlite:///{}".format(self.test_db_file)
self.config = Config({
"db_test": {
"connect_url": "sqlite:///:memory:",
"pool_policy": "test"
},
"db_test2": {
"connect_url": connection_url
},
"dbpool_test": {
"poolclass": "QueuePool",
"pool_size": 10,
"pool_recycle": 3600,
"pool_timeout": 20
}
})
conn = sqlite3.connect(self.test_db_file)
create_table_sql = """
create table user (
id integer primary key,
name varchar(10) unique,
grade int not null,
description text not null
)
"""
conn.execute(create_table_sql)
conn.execute((
"insert into user (id, name, grade, description) values "
"(1, 'SamChi', 1, 'I am SamChi')"
))
conn.commit()
示例8: test_init_all
# 需要导入模块: from sqlalchemy import pool [as 别名]
# 或者: from sqlalchemy.pool import QueuePool [as 别名]
def test_init_all(self):
engine_manager = EngineManager()
# 同一个manager对象,重复初始化不会发生任何问题
for i in xrange(0, 100):
engine_manager.validate_config(self.config)
engine_manager.init_all(self.config)
test_engine = engine_manager.engine("test")
self.assertIsNotNone(test_engine)
# 连接池相关参数
pool = test_engine.engine.pool
self.assertIsInstance(pool, QueuePool)
self.assertEquals(pool.size(), 10)
self.assertEquals(pool._recycle, 3600)
self.assertEquals(pool._timeout, 20)
# 测试可否连接
connection = test_engine.engine.connect()
result = connection.execute("select 1 + 1")
self.assertEquals(tuple(result)[0][0], 2)
test_engine2 = engine_manager.engine("test2")
self.assertIsNotNone(test_engine2)
connection = test_engine2.engine.connect()
result = connection.execute("select * from user")
self.assertEquals(tuple(result)[0],
(1, "SamChi", 1, "I am SamChi"))
示例9: test_init_poolclass_set
# 需要导入模块: from sqlalchemy import pool [as 别名]
# 或者: from sqlalchemy.pool import QueuePool [as 别名]
def test_init_poolclass_set(self, Database, request):
from sqlalchemy.pool import QueuePool
path = '/tmp/palladium.testing-{}.sqlite'.format(os.getpid())
request.addfinalizer(lambda: os.remove(path))
db = Database('sqlite:///{}'.format(path), poolclass=QueuePool)
assert isinstance(db.engine.pool, QueuePool)
示例10: _init_connection_args
# 需要导入模块: from sqlalchemy import pool [as 别名]
# 或者: from sqlalchemy.pool import QueuePool [as 别名]
def _init_connection_args(
url, engine_args,
max_pool_size=None, max_overflow=None, pool_timeout=None, **kw):
pool_class = url.get_dialect().get_pool_class(url)
if issubclass(pool_class, pool.QueuePool):
if max_pool_size is not None:
engine_args['pool_size'] = max_pool_size
if max_overflow is not None:
engine_args['max_overflow'] = max_overflow
if pool_timeout is not None:
engine_args['pool_timeout'] = pool_timeout
示例11: _queuepool_dbapi_fixture
# 需要导入模块: from sqlalchemy import pool [as 别名]
# 或者: from sqlalchemy.pool import QueuePool [as 别名]
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
return (
dbapi,
pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw),
)
示例12: test_queue_pool
# 需要导入模块: from sqlalchemy import pool [as 别名]
# 或者: from sqlalchemy.pool import QueuePool [as 别名]
def test_queue_pool(self):
self._do_test(pool.QueuePool, ["R", "CL", "R"])
示例13: test_listen_targets_per_subclass
# 需要导入模块: from sqlalchemy import pool [as 别名]
# 或者: from sqlalchemy.pool import QueuePool [as 别名]
def test_listen_targets_per_subclass(self):
"""test that listen() called on a subclass remains specific to
that subclass."""
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, "connect", listen_one)
event.listen(pool.QueuePool, "connect", listen_two)
event.listen(pool.SingletonThreadPool, "connect", listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
示例14: _test_overflow
# 需要导入模块: from sqlalchemy import pool [as 别名]
# 或者: from sqlalchemy.pool import QueuePool [as 别名]
def _test_overflow(self, thread_count, max_overflow):
reaper = testing.engines.ConnectionKiller()
dbapi = MockDBAPI()
mutex = threading.Lock()
def creator():
time.sleep(0.05)
with mutex:
return dbapi.connect()
p = pool.QueuePool(
creator=creator, pool_size=3, timeout=2, max_overflow=max_overflow
)
reaper.add_pool(p)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(0.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in range(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
self.assert_(max(peaks) <= max_overflow)
reaper.assert_all_closed()
示例15: test_notify_waiters
# 需要导入模块: from sqlalchemy import pool [as 别名]
# 或者: from sqlalchemy.pool import QueuePool [as 别名]
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator():
canary.append(1)
return dbapi.connect()
p1 = pool.QueuePool(
creator=creator, pool_size=1, timeout=None, max_overflow=0
)
def waiter(p):
conn = p.connect()
canary.append(2)
time.sleep(0.5)
conn.close()
c1 = p1.connect()
threads = []
for i in range(5):
t = threading.Thread(target=waiter, args=(p1,))
t.start()
threads.append(t)
time.sleep(0.5)
eq_(canary, [1])
# this also calls invalidate()
# on c1
p1._invalidate(c1)
for t in threads:
t.join(join_timeout)
eq_(canary, [1, 1, 2, 2, 2, 2, 2])