當前位置: 首頁>>代碼示例>>Python>>正文


Python multiprocessing.Condition方法代碼示例

本文整理匯總了Python中multiprocessing.Condition方法的典型用法代碼示例。如果您正苦於以下問題:Python multiprocessing.Condition方法的具體用法?Python multiprocessing.Condition怎麽用?Python multiprocessing.Condition使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在multiprocessing的用法示例。


在下文中一共展示了multiprocessing.Condition方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_waitfor

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Condition [as 別名]
def test_waitfor(self):
        # based on test in test/lock_tests.py
        cond = self.Condition()
        state = self.Value('i', -1)

        p = self.Process(target=self._test_waitfor_f, args=(cond, state))
        p.daemon = True
        p.start()

        with cond:
            result = cond.wait_for(lambda : state.value==0)
            self.assertTrue(result)
            self.assertEqual(state.value, 0)

        for i in range(4):
            time.sleep(0.01)
            with cond:
                state.value += 1
                cond.notify()

        p.join(5)
        self.assertFalse(p.is_alive())
        self.assertEqual(p.exitcode, 0) 
開發者ID:Microvellum,項目名稱:Fluid-Designer,代碼行數:25,代碼來源:_test_multiprocessing.py

示例2: test_copy_shared_mem

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Condition [as 別名]
def test_copy_shared_mem():
    csr = (spsp.random(num_nodes, num_nodes, density=0.1, format='csr') != 0).astype(np.int64)
    gidx = dgl.graph_index.create_graph_index(csr, True)

    cond_v = Condition()
    shared_v = Value('i', 0)
    p1 = Process(target=create_mem, args=(gidx, cond_v, shared_v))
    p2 = Process(target=check_mem, args=(gidx, cond_v, shared_v))
    p1.start()
    p2.start()
    p1.join()
    p2.join()

# Skip test this file
#if __name__ == '__main__':
#    test_copy_shared_mem()
#    test_init()
#    test_sync_barrier()
#    test_compute() 
開發者ID:dmlc,項目名稱:dgl,代碼行數:21,代碼來源:test_shared_mem_store.py

示例3: test_waitfor_timeout

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Condition [as 別名]
def test_waitfor_timeout(self):
        # based on test in test/lock_tests.py
        cond = self.Condition()
        state = self.Value('i', 0)
        success = self.Value('i', False)
        sem = self.Semaphore(0)

        p = self.Process(target=self._test_waitfor_timeout_f,
                         args=(cond, state, success, sem))
        p.daemon = True
        p.start()
        self.assertTrue(sem.acquire(timeout=10))

        # Only increment 3 times, so state == 4 is never reached.
        for i in range(3):
            time.sleep(0.01)
            with cond:
                state.value += 1
                cond.notify()

        p.join(5)
        self.assertTrue(success.value) 
開發者ID:Microvellum,項目名稱:Fluid-Designer,代碼行數:24,代碼來源:_test_multiprocessing.py

示例4: test_wait_result

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Condition [as 別名]
def test_wait_result(self):
        if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
            pid = os.getpid()
        else:
            pid = None

        c = self.Condition()
        with c:
            self.assertFalse(c.wait(0))
            self.assertFalse(c.wait(0.1))

            p = self.Process(target=self._test_wait_result, args=(c, pid))
            p.start()

            self.assertTrue(c.wait(10))
            if pid is not None:
                self.assertRaises(KeyboardInterrupt, c.wait, 10)

            p.join() 
開發者ID:Microvellum,項目名稱:Fluid-Designer,代碼行數:21,代碼來源:_test_multiprocessing.py

示例5: __init__

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Condition [as 別名]
def __init__(self, *args):
        TServer.__init__(self, *args)
        self.numWorkers = 10
        self.workers = []
        self.isRunning = Value('b', False)
        self.stopCondition = Condition()
        self.postForkCallback = None 
開發者ID:XiaoMi,項目名稱:galaxy-sdk-python,代碼行數:9,代碼來源:TProcessPoolServer.py

示例6: __init__

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Condition [as 別名]
def __init__(self):
        self.lock = multiprocessing.Lock()
        self.readers_condition = multiprocessing.Condition(self.lock)
        self.writer_condition = multiprocessing.Condition(self.lock)
        self.readers = multiprocessing.RawValue(ctypes.c_uint, 0)
        self.writer = multiprocessing.RawValue(ctypes.c_bool, False) 
開發者ID:bslatkin,項目名稱:ringbuffer,代碼行數:8,代碼來源:ringbuffer.py

示例7: test_timeout

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Condition [as 別名]
def test_timeout(self):
        cond = self.Condition()
        wait = TimingWrapper(cond.wait)
        cond.acquire()
        res = wait(TIMEOUT1)
        cond.release()
        self.assertEqual(res, False)
        self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) 
開發者ID:Microvellum,項目名稱:Fluid-Designer,代碼行數:10,代碼來源:_test_multiprocessing.py

示例8: __init__

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Condition [as 別名]
def __init__(self, maxsize=0):
        '''initialize the queue'''
        self.mutex = multiprocessing.Lock()
        self.not_empty = multiprocessing.Condition(self.mutex)
        self.not_full = multiprocessing.Condition(self.mutex)
        self.maxsize = maxsize
        self._tags = {}  # list of refid's for each tag
        self._queue = {}  # the actual queue data
        self._refcount = {}  # how many tags refer to a given refid in the queue
        self.id_generator = id_generator() 
開發者ID:ActiveState,項目名稱:code,代碼行數:12,代碼來源:recipe-576632.py

示例9: __init__

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Condition [as 別名]
def __init__(self, target, num_workers, description=None):
        # type: (_MultiprocessOffload, function, int, str) -> None
        """Ctor for Multiprocess Offload
        :param _MultiprocessOffload self: this
        :param function target: target function for process
        :param int num_workers: number of worker processes
        :param str description: description
        """
        self._task_queue = multiprocessing.Queue()
        self._done_queue = multiprocessing.Queue()
        self._done_cv = multiprocessing.Condition()
        self._term_signal = multiprocessing.Value('i', 0)
        self._procs = []
        self._check_thread = None
        self._initialize_processes(target, num_workers, description) 
開發者ID:Azure,項目名稱:blobxfer,代碼行數:17,代碼來源:offload.py

示例10: done_cv

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Condition [as 別名]
def done_cv(self):
        # type: (_MultiprocessOffload) -> multiprocessing.Condition
        """Get Done condition variable
        :param _MultiprocessOffload self: this
        :rtype: multiprocessing.Condition
        :return: cv for download done
        """
        return self._done_cv 
開發者ID:Azure,項目名稱:blobxfer,代碼行數:10,代碼來源:offload.py

示例11: test_notify

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Condition [as 別名]
def test_notify(self):
        cond = self.Condition()
        sleeping = self.Semaphore(0)
        woken = self.Semaphore(0)

        p = self.Process(target=self.f, args=(cond, sleeping, woken))
        p.daemon = True
        p.start()

        p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
        p.daemon = True
        p.start()

        # wait for both children to start sleeping
        sleeping.acquire()
        sleeping.acquire()

        # check no process/thread has woken up
        time.sleep(DELTA)
        self.assertReturnsIfImplemented(0, get_value, woken)

        # wake up one process/thread
        cond.acquire()
        cond.notify()
        cond.release()

        # check one process/thread has woken up
        time.sleep(DELTA)
        self.assertReturnsIfImplemented(1, get_value, woken)

        # wake up another
        cond.acquire()
        cond.notify()
        cond.release()

        # check other has woken up
        time.sleep(DELTA)
        self.assertReturnsIfImplemented(2, get_value, woken)

        # check state is not mucked up
        self.check_invariant(cond)
        p.join() 
開發者ID:Microvellum,項目名稱:Fluid-Designer,代碼行數:44,代碼來源:_test_multiprocessing.py

示例12: init_workers

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Condition [as 別名]
def init_workers(self):
        """
        Initialize all types of workers and start their worker processes.
        """

        actor_queues = [faster_fifo.Queue() for _ in range(self.cfg.num_workers)]

        policy_worker_queues = dict()
        for policy_id in range(self.cfg.num_policies):
            policy_worker_queues[policy_id] = []
            for i in range(self.cfg.policy_workers_per_policy):
                policy_worker_queues[policy_id].append(TorchJoinableQueue())

        log.info('Initializing learners...')
        policy_locks = [multiprocessing.Lock() for _ in range(self.cfg.num_policies)]
        resume_experience_collection_cv = [multiprocessing.Condition() for _ in range(self.cfg.num_policies)]

        learner_idx = 0
        for policy_id in range(self.cfg.num_policies):
            learner_worker = LearnerWorker(
                learner_idx, policy_id, self.cfg, self.obs_space, self.action_space,
                self.report_queue, policy_worker_queues[policy_id], self.traj_buffers,
                policy_locks[policy_id], resume_experience_collection_cv[policy_id],
            )
            learner_worker.start_process()
            learner_worker.init()

            self.learner_workers[policy_id] = learner_worker
            learner_idx += 1

        log.info('Initializing policy workers...')
        for policy_id in range(self.cfg.num_policies):
            self.policy_workers[policy_id] = []

            policy_queue = faster_fifo.Queue()
            self.policy_queues[policy_id] = policy_queue

            for i in range(self.cfg.policy_workers_per_policy):
                policy_worker = PolicyWorker(
                    i, policy_id, self.cfg, self.obs_space, self.action_space, self.traj_buffers,
                    policy_queue, actor_queues, self.report_queue, policy_worker_queues[policy_id][i],
                    policy_locks[policy_id], resume_experience_collection_cv[policy_id],
                )
                self.policy_workers[policy_id].append(policy_worker)
                policy_worker.start_process()

        log.info('Initializing actors...')

        # We support actor worker initialization in groups, which can be useful for some envs that
        # e.g. crash when too many environments are being initialized in parallel.
        # Currently the limit is not used since it is not required for any envs supported out of the box,
        # so we parallelize initialization as hard as we can.
        # If this is required for your environment, perhaps a better solution would be to use global locks,
        # like FileLock (see doom_gym.py)
        self.actor_workers = []
        max_parallel_init = int(1e9)  # might be useful to limit this for some envs
        worker_indices = list(range(self.cfg.num_workers))
        for i in range(0, self.cfg.num_workers, max_parallel_init):
            workers = self.init_subset(worker_indices[i:i + max_parallel_init], actor_queues)
            self.actor_workers.extend(workers) 
開發者ID:alex-petrenko,項目名稱:sample-factory,代碼行數:62,代碼來源:appo.py


注:本文中的multiprocessing.Condition方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。