本文整理汇总了Python中taskflow.persistence.backends.fetch函数的典型用法代码示例。如果您正苦于以下问题:Python fetch函数的具体用法?Python fetch怎么用?Python fetch使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了fetch函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_backend
def get_backend(backend_uri=None):
tmp_dir = None
if not backend_uri:
if not backend_uri:
tmp_dir = tempfile.mkdtemp()
backend_uri = "file:///%s" % tmp_dir
try:
backend = backends.fetch(_make_conf(backend_uri))
except exceptions.NotFound as e:
# Fallback to one that will work if the provided backend is not found.
if not tmp_dir:
tmp_dir = tempfile.mkdtemp()
backend_uri = "file:///%s" % tmp_dir
LOG.exception("Falling back to file backend using temporary"
" directory located at: %s", tmp_dir)
backend = backends.fetch(_make_conf(backend_uri))
else:
raise e
try:
# Ensure schema upgraded before we continue working.
with contextlib.closing(backend.get_connection()) as conn:
conn.upgrade()
yield backend
finally:
# Make sure to cleanup the temporary path if one was created for us.
if tmp_dir:
rm_path(tmp_dir)
示例2: save_factory_details
def save_factory_details(flow_detail, flow_factory, factory_args, factory_kwargs, backend=None):
"""Saves the given factories reimportable attributes into the flow detail.
This function saves the factory name, arguments, and keyword arguments
into the given flow details object and if a backend is provided it will
also ensure that the backend saves the flow details after being updated.
:param flow_detail: FlowDetail that holds state of the flow to load
:param flow_factory: function or string: function that creates the flow
:param factory_args: list or tuple of factory positional arguments
:param factory_kwargs: dict of factory keyword arguments
:param backend: storage backend to use or configuration
"""
if not factory_args:
factory_args = []
if not factory_kwargs:
factory_kwargs = {}
factory_name, _factory_fun = _fetch_validate_factory(flow_factory)
factory_data = {"factory": {"name": factory_name, "args": factory_args, "kwargs": factory_kwargs}}
if not flow_detail.meta:
flow_detail.meta = factory_data
else:
flow_detail.meta.update(factory_data)
if backend is not None:
if isinstance(backend, dict):
backend = p_backends.fetch(backend)
with contextlib.closing(backend.get_connection()) as conn:
conn.update_flow_details(flow_detail)
示例3: _create_engine
def _create_engine(**kwargs):
flow = lf.Flow('test-flow').add(utils.DummyTask())
backend = backends.fetch({'connection': 'memory'})
flow_detail = pu.create_flow_detail(flow, backend=backend)
options = kwargs.copy()
return engine.WorkerBasedActionEngine(flow, flow_detail,
backend, options)
示例4: load
def load(flow, store=None, flow_detail=None, book=None,
engine_conf=None, backend=None,
namespace=ENGINES_NAMESPACE, engine=ENGINE_DEFAULT, **kwargs):
"""Load a flow into an engine.
This function creates and prepares an engine to run the provided flow. All
that is left after this returns is to run the engine with the
engines :py:meth:`~taskflow.engines.base.Engine.run` method.
Which engine to load is specified via the ``engine`` parameter. It
can be a string that names the engine type to use, or a string that
is a URI with a scheme that names the engine type to use and further
options contained in the URI's host, port, and query parameters...
Which storage backend to use is defined by the backend parameter. It
can be backend itself, or a dictionary that is passed to
:py:func:`~taskflow.persistence.backends.fetch` to obtain a
viable backend.
:param flow: flow to load
:param store: dict -- data to put to storage to satisfy flow requirements
:param flow_detail: FlowDetail that holds the state of the flow (if one is
not provided then one will be created for you in the provided backend)
:param book: LogBook to create flow detail in if flow_detail is None
:param engine_conf: engine type or URI and options (**deprecated**)
:param backend: storage backend to use or configuration that defines it
:param namespace: driver namespace for stevedore (or empty for default)
:param engine: string engine type or URI string with scheme that contains
the engine type and any URI specific components that will
become part of the engine options.
:param kwargs: arbitrary keyword arguments passed as options (merged with
any extracted ``engine`` and ``engine_conf`` options),
typically used for any engine specific options that do not
fit as any of the existing arguments.
:returns: engine
"""
kind, options = _extract_engine(engine_conf=engine_conf,
engine=engine, **kwargs)
if isinstance(backend, dict):
backend = p_backends.fetch(backend)
if flow_detail is None:
flow_detail = p_utils.create_flow_detail(flow, book=book,
backend=backend)
LOG.debug('Looking for %r engine driver in %r', kind, namespace)
try:
mgr = stevedore.driver.DriverManager(
namespace, kind,
invoke_on_load=True,
invoke_args=(flow, flow_detail, backend, options))
engine = mgr.driver
except RuntimeError as e:
raise exc.NotFound("Could not find engine '%s'" % (kind), e)
else:
if store:
engine.storage.inject(store)
return engine
示例5: upgrade_backend
def upgrade_backend(self, persistence_backend):
try:
backend = backends.fetch(persistence_backend)
with contextlib.closing(backend.get_connection()) as conn:
conn.upgrade()
except exceptions.NotFound as e:
raise e
示例6: main
def main():
# Need to share the same backend, so that data can be shared...
persistence_conf = {
'connection': 'memory',
}
saver = persistence.fetch(persistence_conf)
with contextlib.closing(saver.get_connection()) as conn:
# This ensures that the needed backend setup/data directories/schema
# upgrades and so on... exist before they are attempted to be used...
conn.upgrade()
fc1 = fake_client.FakeClient()
# Done like this to share the same client storage location so the correct
# zookeeper features work across clients...
fc2 = fake_client.FakeClient(storage=fc1.storage)
entities = [
generate_reviewer(fc1, saver),
generate_conductor(fc2, saver),
]
for t, stopper in entities:
t.start()
try:
watch = timeutils.StopWatch(duration=RUN_TIME)
watch.start()
while not watch.expired():
time.sleep(0.1)
finally:
for t, stopper in reversed(entities):
stopper()
t.join()
示例7: load_from_factory
def load_from_factory(flow_factory, factory_args=None, factory_kwargs=None,
store=None, book=None, engine_conf=None, backend=None,
namespace=ENGINES_NAMESPACE, engine=ENGINE_DEFAULT,
**kwargs):
"""Loads a flow from a factory function into an engine.
Gets flow factory function (or name of it) and creates flow with
it. Then, the flow is loaded into an engine with the :func:`load() <load>`
function, and the factory function fully qualified name is saved to flow
metadata so that it can be later resumed.
:param flow_factory: function or string: function that creates the flow
:param factory_args: list or tuple of factory positional arguments
:param factory_kwargs: dict of factory keyword arguments
Further arguments are interpreted as for :func:`load() <load>`.
:returns: engine
"""
_factory_name, factory_fun = _fetch_validate_factory(flow_factory)
if not factory_args:
factory_args = []
if not factory_kwargs:
factory_kwargs = {}
flow = factory_fun(*factory_args, **factory_kwargs)
if isinstance(backend, dict):
backend = p_backends.fetch(backend)
flow_detail = p_utils.create_flow_detail(flow, book=book, backend=backend)
save_factory_details(flow_detail,
flow_factory, factory_args, factory_kwargs,
backend=backend)
return load(flow=flow, store=store, flow_detail=flow_detail, book=book,
engine_conf=engine_conf, backend=backend, namespace=namespace,
engine=engine, **kwargs)
示例8: execute_flow
def execute_flow(flow):
"""
Create all necessary prerequisites like task database and thread pool and
execute TaskFlow flow.
:param flow: TaskFlow flow instance
"""
backend = backends.fetch({
'connection': 'sqlite:///' + TASK_DATABASE_FILE,
'isolation_level': 'SERIALIZABLE'
})
executor = futurist.ThreadPoolExecutor(max_workers=MAX_WORKERS)
conn = backend.get_connection()
logbook, flow_detail = _ensure_db_initialized(conn, flow)
engine = engines.load(
flow, flow_detail=flow_detail, backend=backend, book=logbook,
engine='parallel', executor=executor)
engine.compile()
_workaround_reverted_reset(flow_detail)
try:
engine.run()
except exceptions.WrappedFailure as wf:
for failure in wf:
if failure.exc_info is not None:
traceback.print_exception(*failure.exc_info)
else:
print failure
示例9: run_poster
def run_poster():
# This just posts a single job and then ends...
print("Starting poster with pid: %s" % ME)
my_name = "poster-%s" % ME
persist_backend = persistence_backends.fetch(PERSISTENCE_URI)
with contextlib.closing(persist_backend):
with contextlib.closing(persist_backend.get_connection()) as conn:
conn.upgrade()
job_backend = job_backends.fetch(my_name, JB_CONF,
persistence=persist_backend)
job_backend.connect()
with contextlib.closing(job_backend):
# Create information in the persistence backend about the
# unit of work we want to complete and the factory that
# can be called to create the tasks that the work unit needs
# to be done.
lb = models.LogBook("post-from-%s" % my_name)
fd = models.FlowDetail("song-from-%s" % my_name,
uuidutils.generate_uuid())
lb.add(fd)
with contextlib.closing(persist_backend.get_connection()) as conn:
conn.save_logbook(lb)
engines.save_factory_details(fd, make_bottles,
[HOW_MANY_BOTTLES], {},
backend=persist_backend)
# Post, and be done with it!
jb = job_backend.post("song-from-%s" % my_name, book=lb)
print("Posted: %s" % jb)
print("Goodbye...")
示例10: test_entrypoint
def test_entrypoint(self):
# Test that the entrypoint fetching also works (even with dialects)
# using the same configuration we used in setUp() but not using
# the impl_sqlalchemy SQLAlchemyBackend class directly...
with contextlib.closing(backends.fetch(self.db_conf)) as backend:
with contextlib.closing(backend.get_connection()):
pass
示例11: test_file_persistence_entry_point
def test_file_persistence_entry_point(self):
conf = {
'connection': 'file:',
'path': self.path
}
with contextlib.closing(backends.fetch(conf)) as be:
self.assertIsInstance(be, impl_dir.DirBackend)
示例12: get_backend
def get_backend():
try:
backend_uri = sys.argv[1]
except Exception:
backend_uri = 'sqlite://'
backend = backends.fetch({'connection': backend_uri})
backend.get_connection().upgrade()
return backend
示例13: test_dir_persistence_entry_point
def test_dir_persistence_entry_point(self):
conf = {
'connection': 'dir:',
'path': self.path
}
backend = backends.fetch(conf)
self.assertIsInstance(backend, impl_dir.DirBackend)
backend.close()
示例14: load
def load(flow, store=None, flow_detail=None, book=None,
engine_conf=None, backend=None, namespace=ENGINES_NAMESPACE):
"""Load flow into engine.
This function creates and prepares engine to run the
flow. All that is left is to run the engine with 'run()' method.
Which engine to load is specified in 'engine_conf' parameter. It
can be a string that names engine type or a dictionary which holds
engine type (with 'engine' key) and additional engine-specific
configuration (for example, executor for multithreaded engine).
Which storage backend to use is defined by backend parameter. It
can be backend itself, or a dictionary that is passed to
taskflow.persistence.backends.fetch to obtain backend.
:param flow: flow to load
:param store: dict -- data to put to storage to satisfy flow requirements
:param flow_detail: FlowDetail that holds the state of the flow (if one is
not provided then one will be created for you in the provided backend)
:param book: LogBook to create flow detail in if flow_detail is None
:param engine_conf: engine type and configuration configuration
:param backend: storage backend to use or configuration
:param namespace: driver namespace for stevedore (default is fine
if you don't know what is it)
:returns: engine
"""
if engine_conf is None:
engine_conf = {'engine': 'default'}
# NOTE(imelnikov): this allows simpler syntax.
if isinstance(engine_conf, six.string_types):
engine_conf = {'engine': engine_conf}
if isinstance(backend, dict):
backend = p_backends.fetch(backend)
if flow_detail is None:
flow_detail = p_utils.create_flow_detail(flow, book=book,
backend=backend)
mgr = stevedore.driver.DriverManager(
namespace, engine_conf['engine'],
invoke_on_load=True,
invoke_kwds={
'conf': engine_conf.copy(),
'flow': flow,
'flow_detail': flow_detail,
'backend': backend
})
engine = mgr.driver
if store:
engine.storage.inject(store)
return engine
示例15: run_conductor
def run_conductor(only_run_once=False):
# This continuously consumers until its stopped via ctrl-c or other
# kill signal...
event_watches = {}
# This will be triggered by the conductor doing various activities
# with engines, and is quite nice to be able to see the various timing
# segments (which is useful for debugging, or watching, or figuring out
# where to optimize).
def on_conductor_event(cond, event, details):
print("Event '%s' has been received..." % event)
print("Details = %s" % details)
if event.endswith("_start"):
w = timing.StopWatch()
w.start()
base_event = event[0:-len("_start")]
event_watches[base_event] = w
if event.endswith("_end"):
base_event = event[0:-len("_end")]
try:
w = event_watches.pop(base_event)
w.stop()
print("It took %0.3f seconds for event '%s' to finish"
% (w.elapsed(), base_event))
except KeyError:
pass
if event == 'running_end' and only_run_once:
cond.stop()
print("Starting conductor with pid: %s" % ME)
my_name = "conductor-%s" % ME
persist_backend = persistence_backends.fetch(PERSISTENCE_URI)
with contextlib.closing(persist_backend):
with contextlib.closing(persist_backend.get_connection()) as conn:
conn.upgrade()
job_backend = job_backends.fetch(my_name, JB_CONF,
persistence=persist_backend)
job_backend.connect()
with contextlib.closing(job_backend):
cond = conductor_backends.fetch('blocking', my_name, job_backend,
persistence=persist_backend)
on_conductor_event = functools.partial(on_conductor_event, cond)
cond.notifier.register(cond.notifier.ANY, on_conductor_event)
# Run forever, and kill -9 or ctrl-c me...
try:
cond.run()
finally:
cond.stop()
cond.wait()