當前位置: 首頁>>代碼示例>>Python>>正文


Python multiprocessing.SimpleQueue方法代碼示例

本文整理匯總了Python中multiprocessing.SimpleQueue方法的典型用法代碼示例。如果您正苦於以下問題:Python multiprocessing.SimpleQueue方法的具體用法?Python multiprocessing.SimpleQueue怎麽用?Python multiprocessing.SimpleQueue使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在multiprocessing的用法示例。


在下文中一共展示了multiprocessing.SimpleQueue方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: init_worker

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import SimpleQueue [as 別名]
def init_worker(status_queue: multiprocessing.SimpleQueue,
                param_queue: multiprocessing.SimpleQueue,
                result_queue: multiprocessing.SimpleQueue) -> None:
    global result
    global coverage_run

    # Make sure the generator is re-seeded, as we have inherited
    # the seed from the parent process.
    random.seed()

    result = ChannelingTestResult(result_queue)
    if not param_queue.empty():
        server_addr = param_queue.get()

        if server_addr is not None:
            os.environ['EDGEDB_TEST_CLUSTER_ADDR'] = json.dumps(server_addr)

    coverage_run = devmode.CoverageConfig.start_coverage_if_requested()

    status_queue.put(True) 
開發者ID:edgedb,項目名稱:edgedb,代碼行數:22,代碼來源:runner.py

示例2: multi_proc_run

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import SimpleQueue [as 別名]
def multi_proc_run(num_proc, fun, fun_args=(), fun_kwargs=None):
    """Runs a function in a multi-proc setting (unless num_proc == 1)."""
    # There is no need for multi-proc in the single-proc case
    fun_kwargs = fun_kwargs if fun_kwargs else {}
    if num_proc == 1:
        fun(*fun_args, **fun_kwargs)
        return
    # Handle errors from training subprocesses
    error_queue = multiprocessing.SimpleQueue()
    error_handler = ErrorHandler(error_queue)
    # Get a random port to use (without using global random number generator)
    port = random.Random().randint(cfg.PORT_RANGE[0], cfg.PORT_RANGE[1])
    # Run each training subprocess
    ps = []
    for i in range(num_proc):
        p_i = multiprocessing.Process(
            target=run, args=(i, num_proc, port, error_queue, fun, fun_args, fun_kwargs)
        )
        ps.append(p_i)
        p_i.start()
        error_handler.add_child(p_i.pid)
    # Wait for each subprocess to finish
    for p in ps:
        p.join() 
開發者ID:facebookresearch,項目名稱:pycls,代碼行數:26,代碼來源:distributed.py

示例3: __init__

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import SimpleQueue [as 別名]
def __init__(self):
        self.detection_queue = mp.SimpleQueue()
        self.avg_inference_speed = mp.Value('d', 0.01)
        self.detection_start = mp.Value('d', 0.0)
        self.detect_process = None
        self.start_or_restart() 
開發者ID:blakeblackshear,項目名稱:frigate,代碼行數:8,代碼來源:edgetpu.py

示例4: __init__

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import SimpleQueue [as 別名]
def __init__(self, max_workers=None):
        """Initializes a new ProcessPoolExecutor instance.

        Args:
            max_workers: The maximum number of processes that can be used to
                execute the given calls. If None or not given then as many
                worker processes will be created as the machine has processors.
        """
        _check_system_limits()

        if max_workers is None:
            self._max_workers = os.cpu_count() or 1
        else:
            if max_workers <= 0:
                raise ValueError("max_workers must be greater than 0")

            self._max_workers = max_workers

        # Make the call queue slightly larger than the number of processes to
        # prevent the worker processes from idling. But don't make it too big
        # because futures in the call queue cannot be cancelled.
        self._call_queue = multiprocessing.Queue(self._max_workers +
                                                 EXTRA_QUEUED_CALLS)
        # Killed worker processes can produce spurious "broken pipe"
        # tracebacks in the queue's own worker thread. But we detect killed
        # processes anyway, so silence the tracebacks.
        self._call_queue._ignore_epipe = True
        self._result_queue = SimpleQueue()
        self._work_ids = queue.Queue()
        self._queue_management_thread = None
        # Map of pids to processes
        self._processes = {}

        # Shutdown is a two-step process.
        self._shutdown_thread = False
        self._shutdown_lock = threading.Lock()
        self._broken = False
        self._queue_count = 0
        self._pending_work_items = {} 
開發者ID:Microvellum,項目名稱:Fluid-Designer,代碼行數:41,代碼來源:process.py

示例5: __init__

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import SimpleQueue [as 別名]
def __init__(self, max_workers=None):
        """Initializes a new ProcessPoolExecutor instance.

        Args:
            max_workers: The maximum number of processes that can be used to
                execute the given calls. If None or not given then as many
                worker processes will be created as the machine has processors.
        """
        _check_system_limits()

        if max_workers is None:
            self._max_workers = os.cpu_count() or 1
        else:
            self._max_workers = max_workers

        # Make the call queue slightly larger than the number of processes to
        # prevent the worker processes from idling. But don't make it too big
        # because futures in the call queue cannot be cancelled.
        self._call_queue = multiprocessing.Queue(self._max_workers +
                                                 EXTRA_QUEUED_CALLS)
        # Killed worker processes can produce spurious "broken pipe"
        # tracebacks in the queue's own worker thread. But we detect killed
        # processes anyway, so silence the tracebacks.
        self._call_queue._ignore_epipe = True
        self._result_queue = SimpleQueue()
        self._work_ids = queue.Queue()
        self._queue_management_thread = None
        # Map of pids to processes
        self._processes = {}

        # Shutdown is a two-step process.
        self._shutdown_thread = False
        self._shutdown_lock = threading.Lock()
        self._broken = False
        self._queue_count = 0
        self._pending_work_items = {} 
開發者ID:IronLanguages,項目名稱:ironpython3,代碼行數:38,代碼來源:process.py

示例6: test_empty

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import SimpleQueue [as 別名]
def test_empty(self):
        queue = multiprocessing.SimpleQueue()
        child_can_start = multiprocessing.Event()
        parent_can_continue = multiprocessing.Event()

        proc = multiprocessing.Process(
            target=self._test_empty,
            args=(queue, child_can_start, parent_can_continue)
        )
        proc.daemon = True
        proc.start()

        self.assertTrue(queue.empty())

        child_can_start.set()
        parent_can_continue.wait()

        self.assertFalse(queue.empty())
        self.assertEqual(queue.get(), True)
        self.assertEqual(queue.get(), False)
        self.assertTrue(queue.empty())

        proc.join()

#
# Mixins
# 
開發者ID:ShikyoKira,項目名稱:Project-New-Reign---Nemesis-Main,代碼行數:29,代碼來源:_test_multiprocessing.py

示例7: __init__

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import SimpleQueue [as 別名]
def __init__(
        self,
        setup_queue: multiprocessing.SimpleQueue,
        result_queue: multiprocessing.SimpleQueue,
    ) -> None:
        self.setup_queue = setup_queue
        self.result_queue = result_queue
        super().__init__() 
開發者ID:PacktPublishing,項目名稱:Mastering-Object-Oriented-Python-Second-Edition,代碼行數:10,代碼來源:ch13_ex6.py

示例8: server_6

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import SimpleQueue [as 別名]
def server_6() -> None:

    # Two queues
    setup_q: multiprocessing.SimpleQueue = multiprocessing.SimpleQueue()
    results_q: multiprocessing.SimpleQueue = multiprocessing.SimpleQueue()

    # The summarization process: waiting for work
    result = Summarize(results_q)
    result.start()

    # The simulation process: also waiting for work.
    # We might want to create a Pool of these so that
    # we can get even more done at one time.
    simulators = []
    for i in range(4):
        sim = Simulation(setup_q, results_q)
        sim.start()
        simulators.append(sim)

    # Queue up some objects to work on.
    table = Table(decks=6, limit=50, dealer=Hit17(), split=ReSplit(), payout=(3, 2))
    for bet in Flat, Martingale, OneThreeTwoSix:
        player = Player(SomeStrategy(), bet(), 100, 25)
        for sample in range(5):
            setup_q.put((table, player))

    # Queue a terminator for each simulator.
    for sim in simulators:
        setup_q.put((None, None))

    # Wait for the simulations to all finish.
    for sim in simulators:
        sim.join()

    # Queue up a results terminator.
    # Results processing done?
    results_q.put((None, None, None))
    result.join()
    del results_q
    del setup_q 
開發者ID:PacktPublishing,項目名稱:Mastering-Object-Oriented-Python-Second-Edition,代碼行數:42,代碼來源:ch13_ex6.py

示例9: setUp

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import SimpleQueue [as 別名]
def setUp(self):
        # Create the queues for logging and submission
        self.submission_queue = mp.SimpleQueue()
        self.fasta_out = "temporary.fasta"
        self.gtf_out = "temporary.gtf" 
開發者ID:EI-CoreBioinformatics,項目名稱:mikado,代碼行數:7,代碼來源:prepare_misc_test.py

示例10: run

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import SimpleQueue [as 別名]
def run(self, result):
        # We use SimpleQueues because they are more predictable.
        # They do the necessary IO directly, without using a
        # helper thread.
        result_queue = multiprocessing.SimpleQueue()
        status_queue = multiprocessing.SimpleQueue()
        worker_param_queue = multiprocessing.SimpleQueue()

        # Prepopulate the worker param queue with server connection
        # information.
        for _ in range(self.num_workers):
            worker_param_queue.put(self.server_conn)

        result_thread = threading.Thread(
            name='test-monitor', target=monitor_thread,
            args=(result_queue, result), daemon=True)
        result_thread.start()

        initargs = (status_queue, worker_param_queue, result_queue)

        pool = multiprocessing.Pool(
            self.num_workers,
            initializer=mproc_fixes.WorkerScope(init_worker, shutdown_worker),
            initargs=initargs)

        # Wait for all workers to initialize.
        for _ in range(self.num_workers):
            status_queue.get()

        with pool:
            ar = pool.map_async(_run_test, iter(self.tests), chunksize=1)

            while True:
                try:
                    ar.get(timeout=0.1)
                except multiprocessing.TimeoutError:
                    if self.stop_requested:
                        break
                    else:
                        continue
                else:
                    break

            # Post the terminal message to the queue so that
            # test-monitor can stop.
            result_queue.put((None, None, None))

            # Give the test-monitor thread some time to
            # process the queue messages.  If something
            # goes wrong, the thread will be forcibly
            # joined by a timeout.
            result_thread.join(timeout=3)

        # Wait for pool to shutdown, this includes test teardowns.
        pool.join()

        return result 
開發者ID:edgedb,項目名稱:edgedb,代碼行數:59,代碼來源:runner.py

示例11: __init__

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import SimpleQueue [as 別名]
def __init__(self, loader):
        self.dataset = loader.dataset
        self.collate_fn = loader.collate_fn
        self.batch_sampler = loader.batch_sampler
        self.num_workers = loader.num_workers
        self.pin_memory = loader.pin_memory
        self.done_event = threading.Event()

        self.worker_init_fn = loader.worker_init_fn
        self.worker_init_args = loader.worker_init_args
        self.worker_init_kwargs = loader.worker_init_kwargs

        self.sample_iter = iter(self.batch_sampler)

        if self.num_workers > 0:
            self.index_queue = multiprocessing.SimpleQueue()
            self.data_queue = multiprocessing.SimpleQueue()
            self.batches_outstanding = 0
            self.shutdown = False
            self.send_idx = 0
            self.rcvd_idx = 0
            self.reorder_dict = {}

            self.seeds = loader.gen_seeds()
            self.workers = [
                multiprocessing.Process(
                    target=_worker_loop_seed,
                    args=(i, self.dataset, self.index_queue, self.data_queue, self.collate_fn, self.seeds[i],
                          self.worker_init_fn, self.worker_init_args[i], self.worker_init_kwargs[i]))
                for i in range(self.num_workers)]

            for w in self.workers:
                w.daemon = True  # ensure that the worker exits on process exit
                w.start()

            if self.pin_memory:
                in_data = self.data_queue
                self.data_queue = queue.Queue()
                self.pin_thread = threading.Thread(
                    target=_pin_memory_loop,
                    args=(in_data, self.data_queue, self.done_event))
                self.pin_thread.daemon = True
                self.pin_thread.start()

            # prime the prefetch loop
            for _ in range(2 * self.num_workers):
                self._put_indices()
        else:
            if self.worker_init_fn is not None:
                self.worker_init_fn(-1, *self.worker_init_args, **self.worker_init_kwargs) 
開發者ID:vacancy,項目名稱:Jacinle,代碼行數:52,代碼來源:dataloader_torch030.py

示例12: launch_process_group

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import SimpleQueue [as 別名]
def launch_process_group(func: typing.Callable,
                         args: argparse.Namespace,
                         num_processes: int,
                         num_nodes: int = 1,
                         node_rank: int = 0,
                         master_addr: str = "127.0.0.1",
                         master_port: int = 29500,
                         join: bool = True,
                         daemon: bool = False):
    # world size in terms of number of processes
    dist_world_size = num_processes * num_nodes

    # set PyTorch distributed related environmental variables
    current_env = os.environ.copy()
    current_env["MASTER_ADDR"] = master_addr
    current_env["MASTER_PORT"] = str(master_port)
    current_env["WORLD_SIZE"] = str(dist_world_size)
    if 'OMP_NUM_THREADS' not in os.environ and num_processes > 1:
        current_env["OMP_NUM_THREADS"] = str(4)

    error_queues = []
    processes = []

    for local_rank in range(num_processes):
        # each process's rank
        dist_rank = num_processes * node_rank + local_rank
        current_env["RANK"] = str(dist_rank)
        current_env["LOCAL_RANK"] = str(local_rank)
        args.local_rank = local_rank

        error_queue: mp.SimpleQueue[Exception] = mp.SimpleQueue()
        kwargs = {'args': args, 'env': current_env}
        process = mp.Process(
            target=_wrap,
            args=(func, kwargs, error_queue),
            daemon=daemon)
        process.start()
        error_queues.append(error_queue)
        processes.append(process)

    process_context = ProcessContext(processes, error_queues)
    if not join:
        return process_context

    while not process_context.join():
        pass 
開發者ID:songlab-cal,項目名稱:tape,代碼行數:48,代碼來源:distributed_utils.py


注:本文中的multiprocessing.SimpleQueue方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。