本文整理匯總了Python中multiprocessing.queues.Queue方法的典型用法代碼示例。如果您正苦於以下問題:Python queues.Queue方法的具體用法?Python queues.Queue怎麽用?Python queues.Queue使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類multiprocessing.queues
的用法示例。
在下文中一共展示了queues.Queue方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: Manager
# 需要導入模塊: from multiprocessing import queues [as 別名]
# 或者: from multiprocessing.queues import Queue [as 別名]
def Manager():
'''
Returns a manager associated with a running server process
The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from multiprocessing.managers import SyncManager
m = SyncManager()
m.start()
return m
#brython fix me
#def Pipe(duplex=True):
# '''
# Returns two connection object connected by a pipe
# '''
# from multiprocessing.connection import Pipe
# return Pipe(duplex)
示例2: Manager
# 需要導入模塊: from multiprocessing import queues [as 別名]
# 或者: from multiprocessing.queues import Queue [as 別名]
def Manager():
'''
Returns a manager associated with a running server process
The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from multiprocessing.managers import SyncManager
m = SyncManager()
m.start()
return m
示例3: update_output
# 需要導入模塊: from multiprocessing import queues [as 別名]
# 或者: from multiprocessing.queues import Queue [as 別名]
def update_output(
args,
extra_state: Dict[str, Any],
output_queue: Optional[mp_queues.Queue],
num_updates: int,
train_ppl: float,
wps: Optional[float],
):
if distributed_utils.is_master(args) and output_queue is not None:
progress_output: Tuple[int, Dict] = (
num_updates,
{
"train_ppl": train_ppl,
"tune_loss": utils.item(extra_state["tune_eval"]["loss"]),
"tune_ppl": extra_state["tune_eval"]["perplexity"],
"wps": utils.item(wps),
# translation_samples isn't currently used by the queue reader,
# so just pass None for now until we start needing it.
"translation_samples": None,
},
)
output_queue.put_nowait(progress_output)
extra_state["training_progress"].append(progress_output)
return extra_state
示例4: yk_monitor
# 需要導入模塊: from multiprocessing import queues [as 別名]
# 或者: from multiprocessing.queues import Queue [as 別名]
def yk_monitor(self, mon_l):
# forming command to run parallel monitoring processes
mon_cmd = ' & '.join(["xinput test {}".format(y_id) for y_id in mon_l])
monitor = subprocess.Popen(mon_cmd, shell=True, stdout=subprocess.PIPE)
stdout_queue = Queue()
stdout_reader = AsynchronousFileReader(monitor.stdout, stdout_queue)
stdout_reader.start()
triggered = False
timestamp = time.time()
while not stdout_reader.eof and time.time() - timestamp < TIMEOUT:
while stdout_queue.qsize() > 0:
stdout_queue.get() # emptying queue
triggered = True
time.sleep(.04)
if triggered:
print('YubiKey triggered. Now disabling.')
break
time.sleep(.001)
if not triggered:
print('No YubiKey triggered. Timeout.')
# FIRING UP YUBIGUARD ---------------------------------------------------------
示例5: Queue
# 需要導入模塊: from multiprocessing import queues [as 別名]
# 或者: from multiprocessing.queues import Queue [as 別名]
def Queue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import Queue
return Queue(maxsize)
示例6: multi_process_main
# 需要導入模塊: from multiprocessing import queues [as 別名]
# 或者: from multiprocessing.queues import Queue [as 別名]
def multi_process_main(
args: Any,
start_rank: int = 0,
init_fn: Optional[Callable[[], None]] = None,
trainer_class=None,
**train_step_kwargs,
):
pytorch_translate_options.print_args(args)
output_queue = torch.multiprocessing.get_context("spawn").Queue()
# Train with multiprocessing.
spawn_context = torch.multiprocessing.spawn(
fn=multi_process_train,
args=(
args,
output_queue,
start_rank,
init_fn,
trainer_class,
train_step_kwargs,
),
nprocs=args.local_num_gpus,
# We don't block here to allow caller to process output_queue in
# parallel with training.
join=False,
)
return (spawn_context, output_queue)
示例7: _logger
# 需要導入模塊: from multiprocessing import queues [as 別名]
# 或者: from multiprocessing.queues import Queue [as 別名]
def _logger(name, level, msg, exc_info=None):
elapsed = time.monotonic() - start_time
hours = int(elapsed // 60)
seconds = elapsed - (hours * 60)
logging.log(level, f'{hours:3}:{seconds:06.3f} {name:20} {msg}', exc_info=exc_info)
# -- Queue handling support
示例8: _sleep_secs
# 需要導入模塊: from multiprocessing import queues [as 別名]
# 或者: from multiprocessing.queues import Queue [as 別名]
def _sleep_secs(max_sleep, end_time=999999999999999.9):
# Calculate time left to sleep, no less than 0
return max(0.0, min(end_time - time.time(), max_sleep))
# -- Standard Event Queue manager
示例9: _process_worker
# 需要導入模塊: from multiprocessing import queues [as 別名]
# 或者: from multiprocessing.queues import Queue [as 別名]
def _process_worker(call_queue, result_queue, initializer, initargs):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A ctx.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A ctx.Queue of _ResultItems that will written
to by the worker.
initializer: A callable initializer, or None
initargs: A tuple of args for the initializer
"""
if initializer is not None:
try:
initializer(*initargs)
except BaseException:
_base.LOGGER.critical('Exception in initializer:', exc_info=True)
# The parent will notice that the process stopped and
# mark the pool broken
return
while True:
call_item = call_queue.get(block=True)
if call_item is None:
# Wake up queue management thread
result_queue.put(os.getpid())
return
try:
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException as e:
exc = _ExceptionWithTraceback(e, e.__traceback__)
_sendback_result(result_queue, call_item.work_id, exception=exc)
else:
_sendback_result(result_queue, call_item.work_id, result=r)
# Liberate the resource as soon as possible, to avoid holding onto
# open files or shared memory that is not needed anymore
del call_item
示例10: _add_call_item_to_queue
# 需要導入模塊: from multiprocessing import queues [as 別名]
# 或者: from multiprocessing.queues import Queue [as 別名]
def _add_call_item_to_queue(pending_work_items,
work_ids,
call_queue):
"""Fills call_queue with _WorkItems from pending_work_items.
This function never blocks.
Args:
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
are consumed and the corresponding _WorkItems from
pending_work_items are transformed into _CallItems and put in
call_queue.
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems.
"""
while True:
if call_queue.full():
return
try:
work_id = work_ids.get(block=False)
except queue.Empty:
return
else:
work_item = pending_work_items[work_id]
if work_item.future.set_running_or_notify_cancel():
call_queue.put(_CallItem(work_id,
work_item.fn,
work_item.args,
work_item.kwargs),
block=True)
else:
del pending_work_items[work_id]
continue
示例11: __init__
# 需要導入模塊: from multiprocessing import queues [as 別名]
# 或者: from multiprocessing.queues import Queue [as 別名]
def __init__(self, maxsize: int = 0, total_sources: int = 1):
super().__init__(maxsize=maxsize, ctx=multiprocessing.get_context())
self.lock = multiprocessing.Lock()
self.namespace = utils.Namespace(
remaining=total_sources, exception=False, force_stop=False
)
self.exception_queue: Queue[PipelineException] = Queue(
ctx=multiprocessing.get_context()
)
示例12: close
# 需要導入模塊: from multiprocessing import queues [as 別名]
# 或者: from multiprocessing.queues import Queue [as 別名]
def close(self):
"""
Function to close the actual logger; Waits for queue closing and sets
the abortion event
"""
if hasattr(self, "_flush_queue"):
if isinstance(self._flush_queue, MpQueue):
self._flush_queue.close()
self._flush_queue.join_thread()
if hasattr(self, "abort_event"):
self._abort_event.set()
示例13: __init__
# 需要導入模塊: from multiprocessing import queues [as 別名]
# 或者: from multiprocessing.queues import Queue [as 別名]
def __init__(self, fd, queue):
assert isinstance(queue, Queue)
assert callable(fd.readline)
Thread.__init__(self)
self._fd = fd
self._queue = queue
示例14: multi_process_train
# 需要導入模塊: from multiprocessing import queues [as 別名]
# 或者: from multiprocessing.queues import Queue [as 別名]
def multi_process_train(
device_id: int,
args,
output_queue: Optional[mp_queues.Queue],
start_rank: int = 0,
init_fn: Optional[Callable[[], None]] = None,
trainer_class=None,
train_step_kwargs=None,
):
# Enable faulthandler for better Python tracebacks upon segfaults under
# multiprocessing. Without this, the stack trace only shows the
# SpawnContext.join() call, rather than the actual line where the child
# process segfaulted.
faulthandler.enable(all_threads=True)
if init_fn:
init_fn()
args.device_id = device_id
args.distributed_rank = start_rank + device_id
if args.distributed_world_size > 1:
args.distributed_rank = distributed_utils.distributed_init(args)
if torch.cuda.is_available():
torch.cuda.set_device(args.device_id)
trainer, task, epoch_itr = setup_training(args, trainer_class)
# Distributed_init does initialization and works as a barrier.
# Therefore, any expensive data preprocessing should happen before.
extra_state, epoch_itr, checkpoint_manager = setup_training_state(
args=args, trainer=trainer, task=task, epoch_itr=epoch_itr
)
# Replay previous training progress so the output_queue contains all
# previous training progress even when we resume training from an existing
# checkpoint.
if distributed_utils.is_master(args) and output_queue is not None:
for progress_output in extra_state["training_progress"]:
output_queue.put_nowait(progress_output)
train(
args=args,
extra_state=extra_state,
trainer=trainer,
task=task,
epoch_itr=epoch_itr,
checkpoint_manager=checkpoint_manager,
output_queue=output_queue,
**train_step_kwargs,
)