当前位置: 首页>>代码示例>>Python>>正文


Python queues.Queue方法代码示例

本文整理汇总了Python中multiprocessing.queues.Queue方法的典型用法代码示例。如果您正苦于以下问题:Python queues.Queue方法的具体用法?Python queues.Queue怎么用?Python queues.Queue使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.queues的用法示例。


在下文中一共展示了queues.Queue方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: Manager

# 需要导入模块: from multiprocessing import queues [as 别名]
# 或者: from multiprocessing.queues import Queue [as 别名]
def Manager():
    '''
    Returns a manager associated with a running server process

    The managers methods such as `Lock()`, `Condition()` and `Queue()`
    can be used to create shared objects.
    '''
    from multiprocessing.managers import SyncManager
    m = SyncManager()
    m.start()
    return m

#brython fix me
#def Pipe(duplex=True):
#    '''
#    Returns two connection object connected by a pipe
#    '''
#    from multiprocessing.connection import Pipe
#    return Pipe(duplex) 
开发者ID:war-and-code,项目名称:jawfish,代码行数:21,代码来源:__init__.py

示例2: Manager

# 需要导入模块: from multiprocessing import queues [as 别名]
# 或者: from multiprocessing.queues import Queue [as 别名]
def Manager():
    '''
    Returns a manager associated with a running server process

    The managers methods such as `Lock()`, `Condition()` and `Queue()`
    can be used to create shared objects.
    '''
    from multiprocessing.managers import SyncManager
    m = SyncManager()
    m.start()
    return m 
开发者ID:IronLanguages,项目名称:ironpython2,代码行数:13,代码来源:__init__.py

示例3: update_output

# 需要导入模块: from multiprocessing import queues [as 别名]
# 或者: from multiprocessing.queues import Queue [as 别名]
def update_output(
    args,
    extra_state: Dict[str, Any],
    output_queue: Optional[mp_queues.Queue],
    num_updates: int,
    train_ppl: float,
    wps: Optional[float],
):
    if distributed_utils.is_master(args) and output_queue is not None:
        progress_output: Tuple[int, Dict] = (
            num_updates,
            {
                "train_ppl": train_ppl,
                "tune_loss": utils.item(extra_state["tune_eval"]["loss"]),
                "tune_ppl": extra_state["tune_eval"]["perplexity"],
                "wps": utils.item(wps),
                # translation_samples isn't currently used by the queue reader,
                # so just pass None for now until we start needing it.
                "translation_samples": None,
            },
        )
        output_queue.put_nowait(progress_output)
        extra_state["training_progress"].append(progress_output)

    return extra_state 
开发者ID:pytorch,项目名称:translate,代码行数:27,代码来源:train.py

示例4: yk_monitor

# 需要导入模块: from multiprocessing import queues [as 别名]
# 或者: from multiprocessing.queues import Queue [as 别名]
def yk_monitor(self, mon_l):
        # forming command to run parallel monitoring processes
        mon_cmd = ' & '.join(["xinput test {}".format(y_id) for y_id in mon_l])
        monitor = subprocess.Popen(mon_cmd, shell=True, stdout=subprocess.PIPE)

        stdout_queue = Queue()
        stdout_reader = AsynchronousFileReader(monitor.stdout, stdout_queue)
        stdout_reader.start()

        triggered = False
        timestamp = time.time()
        while not stdout_reader.eof and time.time() - timestamp < TIMEOUT:
            while stdout_queue.qsize() > 0:
                stdout_queue.get()  # emptying queue
                triggered = True
                time.sleep(.04)
            if triggered:
                print('YubiKey triggered. Now disabling.')
                break

            time.sleep(.001)
        if not triggered:
            print('No YubiKey triggered. Timeout.')

# FIRING UP YUBIGUARD --------------------------------------------------------- 
开发者ID:pykong,项目名称:YubiGuard,代码行数:27,代码来源:YubiGuard.py

示例5: Queue

# 需要导入模块: from multiprocessing import queues [as 别名]
# 或者: from multiprocessing.queues import Queue [as 别名]
def Queue(maxsize=0):
    '''
    Returns a queue object
    '''
    from multiprocessing.queues import Queue
    return Queue(maxsize) 
开发者ID:war-and-code,项目名称:jawfish,代码行数:8,代码来源:__init__.py

示例6: multi_process_main

# 需要导入模块: from multiprocessing import queues [as 别名]
# 或者: from multiprocessing.queues import Queue [as 别名]
def multi_process_main(
    args: Any,
    start_rank: int = 0,
    init_fn: Optional[Callable[[], None]] = None,
    trainer_class=None,
    **train_step_kwargs,
):
    pytorch_translate_options.print_args(args)
    output_queue = torch.multiprocessing.get_context("spawn").Queue()
    # Train with multiprocessing.
    spawn_context = torch.multiprocessing.spawn(
        fn=multi_process_train,
        args=(
            args,
            output_queue,
            start_rank,
            init_fn,
            trainer_class,
            train_step_kwargs,
        ),
        nprocs=args.local_num_gpus,
        # We don't block here to allow caller to process output_queue in
        # parallel with training.
        join=False,
    )
    return (spawn_context, output_queue) 
开发者ID:pytorch,项目名称:translate,代码行数:28,代码来源:train.py

示例7: _logger

# 需要导入模块: from multiprocessing import queues [as 别名]
# 或者: from multiprocessing.queues import Queue [as 别名]
def _logger(name, level, msg, exc_info=None):
    elapsed = time.monotonic() - start_time
    hours = int(elapsed // 60)
    seconds = elapsed - (hours * 60)
    logging.log(level, f'{hours:3}:{seconds:06.3f} {name:20} {msg}', exc_info=exc_info)


# -- Queue handling support 
开发者ID:PamelaM,项目名称:mptools,代码行数:10,代码来源:_mptools.py

示例8: _sleep_secs

# 需要导入模块: from multiprocessing import queues [as 别名]
# 或者: from multiprocessing.queues import Queue [as 别名]
def _sleep_secs(max_sleep, end_time=999999999999999.9):
    # Calculate time left to sleep, no less than 0
    return max(0.0, min(end_time - time.time(), max_sleep))


# -- Standard Event Queue manager 
开发者ID:PamelaM,项目名称:mptools,代码行数:8,代码来源:_mptools.py

示例9: _process_worker

# 需要导入模块: from multiprocessing import queues [as 别名]
# 或者: from multiprocessing.queues import Queue [as 别名]
def _process_worker(call_queue, result_queue, initializer, initargs):
    """Evaluates calls from call_queue and places the results in result_queue.

    This worker is run in a separate process.

    Args:
        call_queue: A ctx.Queue of _CallItems that will be read and
            evaluated by the worker.
        result_queue: A ctx.Queue of _ResultItems that will written
            to by the worker.
        initializer: A callable initializer, or None
        initargs: A tuple of args for the initializer
    """
    if initializer is not None:
        try:
            initializer(*initargs)
        except BaseException:
            _base.LOGGER.critical('Exception in initializer:', exc_info=True)
            # The parent will notice that the process stopped and
            # mark the pool broken
            return
    while True:
        call_item = call_queue.get(block=True)
        if call_item is None:
            # Wake up queue management thread
            result_queue.put(os.getpid())
            return
        try:
            r = call_item.fn(*call_item.args, **call_item.kwargs)
        except BaseException as e:
            exc = _ExceptionWithTraceback(e, e.__traceback__)
            _sendback_result(result_queue, call_item.work_id, exception=exc)
        else:
            _sendback_result(result_queue, call_item.work_id, result=r)

        # Liberate the resource as soon as possible, to avoid holding onto
        # open files or shared memory that is not needed anymore
        del call_item 
开发者ID:CedricGuillemet,项目名称:Imogen,代码行数:40,代码来源:process.py

示例10: _add_call_item_to_queue

# 需要导入模块: from multiprocessing import queues [as 别名]
# 或者: from multiprocessing.queues import Queue [as 别名]
def _add_call_item_to_queue(pending_work_items,
                            work_ids,
                            call_queue):
    """Fills call_queue with _WorkItems from pending_work_items.

    This function never blocks.

    Args:
        pending_work_items: A dict mapping work ids to _WorkItems e.g.
            {5: <_WorkItem...>, 6: <_WorkItem...>, ...}
        work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
            are consumed and the corresponding _WorkItems from
            pending_work_items are transformed into _CallItems and put in
            call_queue.
        call_queue: A multiprocessing.Queue that will be filled with _CallItems
            derived from _WorkItems.
    """
    while True:
        if call_queue.full():
            return
        try:
            work_id = work_ids.get(block=False)
        except queue.Empty:
            return
        else:
            work_item = pending_work_items[work_id]

            if work_item.future.set_running_or_notify_cancel():
                call_queue.put(_CallItem(work_id,
                                         work_item.fn,
                                         work_item.args,
                                         work_item.kwargs),
                               block=True)
            else:
                del pending_work_items[work_id]
                continue 
开发者ID:CedricGuillemet,项目名称:Imogen,代码行数:38,代码来源:process.py

示例11: __init__

# 需要导入模块: from multiprocessing import queues [as 别名]
# 或者: from multiprocessing.queues import Queue [as 别名]
def __init__(self, maxsize: int = 0, total_sources: int = 1):
        super().__init__(maxsize=maxsize, ctx=multiprocessing.get_context())

        self.lock = multiprocessing.Lock()
        self.namespace = utils.Namespace(
            remaining=total_sources, exception=False, force_stop=False
        )
        self.exception_queue: Queue[PipelineException] = Queue(
            ctx=multiprocessing.get_context()
        ) 
开发者ID:cgarciae,项目名称:pypeln,代码行数:12,代码来源:queue.py

示例12: close

# 需要导入模块: from multiprocessing import queues [as 别名]
# 或者: from multiprocessing.queues import Queue [as 别名]
def close(self):
        """
        Function to close the actual logger; Waits for queue closing and sets
        the abortion event

        """
        if hasattr(self, "_flush_queue"):
            if isinstance(self._flush_queue, MpQueue):
                self._flush_queue.close()
                self._flush_queue.join_thread()

        if hasattr(self, "abort_event"):
            self._abort_event.set() 
开发者ID:delira-dev,项目名称:delira,代码行数:15,代码来源:base_logger.py

示例13: __init__

# 需要导入模块: from multiprocessing import queues [as 别名]
# 或者: from multiprocessing.queues import Queue [as 别名]
def __init__(self, fd, queue):
        assert isinstance(queue, Queue)
        assert callable(fd.readline)
        Thread.__init__(self)
        self._fd = fd
        self._queue = queue 
开发者ID:pykong,项目名称:YubiGuard,代码行数:8,代码来源:YubiGuard.py

示例14: multi_process_train

# 需要导入模块: from multiprocessing import queues [as 别名]
# 或者: from multiprocessing.queues import Queue [as 别名]
def multi_process_train(
    device_id: int,
    args,
    output_queue: Optional[mp_queues.Queue],
    start_rank: int = 0,
    init_fn: Optional[Callable[[], None]] = None,
    trainer_class=None,
    train_step_kwargs=None,
):
    # Enable faulthandler for better Python tracebacks upon segfaults under
    # multiprocessing. Without this, the stack trace only shows the
    # SpawnContext.join() call, rather than the actual line where the child
    # process segfaulted.
    faulthandler.enable(all_threads=True)

    if init_fn:
        init_fn()
    args.device_id = device_id
    args.distributed_rank = start_rank + device_id
    if args.distributed_world_size > 1:
        args.distributed_rank = distributed_utils.distributed_init(args)

    if torch.cuda.is_available():
        torch.cuda.set_device(args.device_id)

    trainer, task, epoch_itr = setup_training(args, trainer_class)
    # Distributed_init does initialization and works as a barrier.
    # Therefore, any expensive data preprocessing should happen before.

    extra_state, epoch_itr, checkpoint_manager = setup_training_state(
        args=args, trainer=trainer, task=task, epoch_itr=epoch_itr
    )

    # Replay previous training progress so the output_queue contains all
    # previous training progress even when we resume training from an existing
    # checkpoint.
    if distributed_utils.is_master(args) and output_queue is not None:
        for progress_output in extra_state["training_progress"]:
            output_queue.put_nowait(progress_output)

    train(
        args=args,
        extra_state=extra_state,
        trainer=trainer,
        task=task,
        epoch_itr=epoch_itr,
        checkpoint_manager=checkpoint_manager,
        output_queue=output_queue,
        **train_step_kwargs,
    ) 
开发者ID:pytorch,项目名称:translate,代码行数:52,代码来源:train.py


注:本文中的multiprocessing.queues.Queue方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。