本文整理汇总了Python中concurrent.futures.FIRST_EXCEPTION属性的典型用法代码示例。如果您正苦于以下问题:Python futures.FIRST_EXCEPTION属性的具体用法?Python futures.FIRST_EXCEPTION怎么用?Python futures.FIRST_EXCEPTION使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类concurrent.futures
的用法示例。
在下文中一共展示了futures.FIRST_EXCEPTION属性的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _create_and_install_waiters
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import FIRST_EXCEPTION [as 别名]
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED]
for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count,
stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count,
stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
示例2: test_first_exception
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import FIRST_EXCEPTION [as 别名]
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(self.wait_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
def cb_done(f):
_executor_mixin._test_event.set()
future1.add_done_callback(cb_done)
finished, pending = futures.wait([future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
assert _executor_mixin._test_event.is_set()
assert set([future1, future2]) == finished
assert set([future3]) == pending
_executor_mixin._test_event.clear()
示例3: wait_tasks_or_abort
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import FIRST_EXCEPTION [as 别名]
def wait_tasks_or_abort(futures, timeout=60, kill_switch_ev=None):
try:
LazySingletonTasksCoordinator.wait_tasks(futures, return_when=FIRST_EXCEPTION, raise_exceptions=True)
except Exception as e:
if kill_switch_ev is not None:
# Used when we want to keep both raise the exception and wait for all tasks to finish
kill_switch_ev.set()
LazySingletonTasksCoordinator.wait_tasks(futures, return_when=ALL_COMPLETED,
raise_exceptions=False, timeout=timeout)
raise e
示例4: run
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import FIRST_EXCEPTION [as 别名]
def run(self, funcs):
"""Run a set of functions in parallel, returning their results.
Make sure any function you pass exits with a reasonable timeout. If it
doesn't return within the timeout or the result is ignored due an exception
in a separate thread it will continue to stick around until it finishes,
including blocking process exit.
Args:
funcs: An iterable of functions or iterable of args to functools.partial.
Returns:
A list of return values with the values matching the order in funcs.
Raises:
Propagates the first exception encountered in one of the functions.
"""
funcs = [f if callable(f) else functools.partial(*f) for f in funcs]
if len(funcs) == 1: # Ignore threads if it's not needed.
return [funcs[0]()]
if len(funcs) > self._workers: # Lazy init and grow as needed.
self.shutdown()
self._workers = len(funcs)
self._executor = futures.ThreadPoolExecutor(self._workers)
futs = [self._executor.submit(f) for f in funcs]
done, not_done = futures.wait(futs, self._timeout, futures.FIRST_EXCEPTION)
# Make sure to propagate any exceptions.
for f in done:
if not f.cancelled() and f.exception() is not None:
if not_done:
# If there are some calls that haven't finished, cancel and recreate
# the thread pool. Otherwise we may have a thread running forever
# blocking parallel calls.
for nd in not_done:
nd.cancel()
self.shutdown(False) # Don't wait, they may be deadlocked.
raise f.exception()
# Either done or timed out, so don't wait again.
return [f.result(timeout=0) for f in futs]
示例5: test_first_exception
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import FIRST_EXCEPTION [as 别名]
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
示例6: test_first_exception_some_already_complete
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import FIRST_EXCEPTION [as 别名]
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
示例7: test_first_exception_one_already_failed
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import FIRST_EXCEPTION [as 别名]
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
示例8: run
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import FIRST_EXCEPTION [as 别名]
def run(self, funcs):
"""Run a set of functions in parallel, returning their results.
Make sure any function you pass exits with a reasonable timeout. If it
doesn't return within the timeout or the result is ignored due an exception
in a separate thread it will continue to stick around until it finishes,
including blocking process exit.
Args:
funcs: An iterable of functions or iterable of args to functools.partial.
Returns:
A list of return values with the values matching the order in funcs.
Raises:
Propagates the first exception encountered in one of the functions.
"""
funcs = [f if callable(f) else functools.partial(*f) for f in funcs]
if len(funcs) == 1: # Ignore threads if it's not needed.
return [funcs[0]()]
if len(funcs) > self._workers: # Lazy init and grow as needed.
self.shutdown()
self._workers = len(funcs)
self._executor = futures.ThreadPoolExecutor(self._workers)
futs = [self._executor.submit(f) for f in funcs]
done, not_done = futures.wait(futs, self._timeout, futures.FIRST_EXCEPTION)
# Make sure to propagate any exceptions.
for f in done:
if not f.cancelled() and f.exception() is not None:
if not_done:
# If there are some calls that haven't finished, cancel and recreate
# the thread pool. Otherwise we may have a thread running forever
# blocking parallel calls.
for nd in not_done:
nd.cancel()
self.shutdown(False) # Don't wait, they may be deadlocked.
raise f.exception()
# Either done or timed out, so don't wait again.
return [f.result(timeout=0) for f in futs]
示例9: test_first_exception_some_already_complete
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import FIRST_EXCEPTION [as 别名]
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait([SUCCESSFUL_FUTURE, CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
assert set([SUCCESSFUL_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE,
future1]) == finished
assert set([CANCELLED_FUTURE, future2]) == pending
示例10: test_first_exception_one_already_failed
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import FIRST_EXCEPTION [as 别名]
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait([EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
assert set([EXCEPTION_FUTURE]) == finished
assert set([future1]) == pending
示例11: count
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import FIRST_EXCEPTION [as 别名]
def count(self):
total_partitions = self.__store._store_locator._total_partitions
job_id = generate_job_id(self.__session_id, tag=RollPair.COUNT)
job = ErJob(id=job_id,
name=RollPair.COUNT,
inputs=[self.ctx.populate_processor(self.__store)])
args = list()
for i in range(total_partitions):
partition_input = job._inputs[0]._partitions[i]
task = ErTask(id=generate_task_id(job_id, i),
name=job._name,
inputs=[partition_input],
job=job)
args.append(([task], partition_input._processor._command_endpoint))
futures = self.__command_client.async_call(
args=args,
output_types=[ErPair],
command_uri=CommandURI(f'{RollPair.EGG_PAIR_URI_PREFIX}/{RollPair.RUN_TASK}'))
done = wait(futures, return_when=FIRST_EXCEPTION).done
result = 0
for future in done:
pair = future.result()[0]
result += self.functor_serdes.deserialize(pair._value)
return result
# todo:1: move to command channel to utilize batch command
示例12: reduce
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import FIRST_EXCEPTION [as 别名]
def reduce(self, func, output=None, options: dict = None):
total_partitions = self.__store._store_locator._total_partitions
job_id = generate_job_id(self.__session_id, tag=RollPair.REDUCE)
serialized_func = ErFunctor(name=RollPair.REDUCE, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))
job = ErJob(id=job_id,
name=RollPair.REDUCE,
inputs=[self.ctx.populate_processor(self.__store)],
functors=[serialized_func])
args = list()
for i in range(total_partitions):
partition_input = job._inputs[0]._partitions[i]
task = ErTask(id=generate_task_id(job_id, i),
name=job._name,
inputs=[partition_input],
job=job)
args.append(([task], partition_input._processor._command_endpoint))
futures = self.__command_client.async_call(
args=args,
output_types=[ErPair],
command_uri=CommandURI(f'{RollPair.EGG_PAIR_URI_PREFIX}/{RollPair.RUN_TASK}'))
done = wait(futures, return_when=FIRST_EXCEPTION).done
result = None
first = True
for future in done:
pair = future.result()[0]
seq_op_result = self.functor_serdes.deserialize(pair._value)
if seq_op_result is not None:
if not first:
result = func(result, seq_op_result)
else:
result = seq_op_result
first = False
return result
示例13: aggregate
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import FIRST_EXCEPTION [as 别名]
def aggregate(self, zero_value, seq_op, comb_op, output=None, options: dict = None):
total_partitions = self.__store._store_locator._total_partitions
job_id = generate_job_id(self.__session_id, tag=RollPair.AGGREGATE)
serialized_zero_value = ErFunctor(name=RollPair.AGGREGATE, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(zero_value))
serialized_seq_op = ErFunctor(name=RollPair.AGGREGATE, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(seq_op))
job = ErJob(id=job_id,
name=RollPair.AGGREGATE,
inputs=[self.ctx.populate_processor(self.__store)],
functors=[serialized_zero_value, serialized_seq_op])
args = list()
for i in range(total_partitions):
partition_input = job._inputs[0]._partitions[i]
task = ErTask(id=generate_task_id(job_id, i),
name=job._name,
inputs=[partition_input],
job=job)
args.append(([task], partition_input._processor._command_endpoint))
futures = self.__command_client.async_call(
args=args,
output_types=[ErPair],
command_uri=CommandURI(f'{RollPair.EGG_PAIR_URI_PREFIX}/{RollPair.RUN_TASK}'))
done = wait(futures, return_when=FIRST_EXCEPTION).done
result = None
first = True
for future in done:
pair = future.result()[0]
seq_op_result = self.functor_serdes.deserialize(pair._value)
if not first:
result = comb_op(result, seq_op_result)
else:
result = seq_op_result
first = False
return result
示例14: wait
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import FIRST_EXCEPTION [as 别名]
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different
Executors) to wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The
options are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are
cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
with f._condition:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)