当前位置: 首页>>代码示例>>Python>>正文


Python multiprocessing.Condition方法代码示例

本文整理汇总了Python中multiprocessing.Condition方法的典型用法代码示例。如果您正苦于以下问题:Python multiprocessing.Condition方法的具体用法?Python multiprocessing.Condition怎么用?Python multiprocessing.Condition使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing的用法示例。


在下文中一共展示了multiprocessing.Condition方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_waitfor

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Condition [as 别名]
def test_waitfor(self):
        # based on test in test/lock_tests.py
        cond = self.Condition()
        state = self.Value('i', -1)

        p = self.Process(target=self._test_waitfor_f, args=(cond, state))
        p.daemon = True
        p.start()

        with cond:
            result = cond.wait_for(lambda : state.value==0)
            self.assertTrue(result)
            self.assertEqual(state.value, 0)

        for i in range(4):
            time.sleep(0.01)
            with cond:
                state.value += 1
                cond.notify()

        p.join(5)
        self.assertFalse(p.is_alive())
        self.assertEqual(p.exitcode, 0) 
开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:25,代码来源:_test_multiprocessing.py

示例2: test_copy_shared_mem

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Condition [as 别名]
def test_copy_shared_mem():
    csr = (spsp.random(num_nodes, num_nodes, density=0.1, format='csr') != 0).astype(np.int64)
    gidx = dgl.graph_index.create_graph_index(csr, True)

    cond_v = Condition()
    shared_v = Value('i', 0)
    p1 = Process(target=create_mem, args=(gidx, cond_v, shared_v))
    p2 = Process(target=check_mem, args=(gidx, cond_v, shared_v))
    p1.start()
    p2.start()
    p1.join()
    p2.join()

# Skip test this file
#if __name__ == '__main__':
#    test_copy_shared_mem()
#    test_init()
#    test_sync_barrier()
#    test_compute() 
开发者ID:dmlc,项目名称:dgl,代码行数:21,代码来源:test_shared_mem_store.py

示例3: test_waitfor_timeout

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Condition [as 别名]
def test_waitfor_timeout(self):
        # based on test in test/lock_tests.py
        cond = self.Condition()
        state = self.Value('i', 0)
        success = self.Value('i', False)
        sem = self.Semaphore(0)

        p = self.Process(target=self._test_waitfor_timeout_f,
                         args=(cond, state, success, sem))
        p.daemon = True
        p.start()
        self.assertTrue(sem.acquire(timeout=10))

        # Only increment 3 times, so state == 4 is never reached.
        for i in range(3):
            time.sleep(0.01)
            with cond:
                state.value += 1
                cond.notify()

        p.join(5)
        self.assertTrue(success.value) 
开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:24,代码来源:_test_multiprocessing.py

示例4: test_wait_result

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Condition [as 别名]
def test_wait_result(self):
        if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
            pid = os.getpid()
        else:
            pid = None

        c = self.Condition()
        with c:
            self.assertFalse(c.wait(0))
            self.assertFalse(c.wait(0.1))

            p = self.Process(target=self._test_wait_result, args=(c, pid))
            p.start()

            self.assertTrue(c.wait(10))
            if pid is not None:
                self.assertRaises(KeyboardInterrupt, c.wait, 10)

            p.join() 
开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:21,代码来源:_test_multiprocessing.py

示例5: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Condition [as 别名]
def __init__(self, *args):
        TServer.__init__(self, *args)
        self.numWorkers = 10
        self.workers = []
        self.isRunning = Value('b', False)
        self.stopCondition = Condition()
        self.postForkCallback = None 
开发者ID:XiaoMi,项目名称:galaxy-sdk-python,代码行数:9,代码来源:TProcessPoolServer.py

示例6: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Condition [as 别名]
def __init__(self):
        self.lock = multiprocessing.Lock()
        self.readers_condition = multiprocessing.Condition(self.lock)
        self.writer_condition = multiprocessing.Condition(self.lock)
        self.readers = multiprocessing.RawValue(ctypes.c_uint, 0)
        self.writer = multiprocessing.RawValue(ctypes.c_bool, False) 
开发者ID:bslatkin,项目名称:ringbuffer,代码行数:8,代码来源:ringbuffer.py

示例7: test_timeout

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Condition [as 别名]
def test_timeout(self):
        cond = self.Condition()
        wait = TimingWrapper(cond.wait)
        cond.acquire()
        res = wait(TIMEOUT1)
        cond.release()
        self.assertEqual(res, False)
        self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) 
开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:10,代码来源:_test_multiprocessing.py

示例8: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Condition [as 别名]
def __init__(self, maxsize=0):
        '''initialize the queue'''
        self.mutex = multiprocessing.Lock()
        self.not_empty = multiprocessing.Condition(self.mutex)
        self.not_full = multiprocessing.Condition(self.mutex)
        self.maxsize = maxsize
        self._tags = {}  # list of refid's for each tag
        self._queue = {}  # the actual queue data
        self._refcount = {}  # how many tags refer to a given refid in the queue
        self.id_generator = id_generator() 
开发者ID:ActiveState,项目名称:code,代码行数:12,代码来源:recipe-576632.py

示例9: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Condition [as 别名]
def __init__(self, target, num_workers, description=None):
        # type: (_MultiprocessOffload, function, int, str) -> None
        """Ctor for Multiprocess Offload
        :param _MultiprocessOffload self: this
        :param function target: target function for process
        :param int num_workers: number of worker processes
        :param str description: description
        """
        self._task_queue = multiprocessing.Queue()
        self._done_queue = multiprocessing.Queue()
        self._done_cv = multiprocessing.Condition()
        self._term_signal = multiprocessing.Value('i', 0)
        self._procs = []
        self._check_thread = None
        self._initialize_processes(target, num_workers, description) 
开发者ID:Azure,项目名称:blobxfer,代码行数:17,代码来源:offload.py

示例10: done_cv

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Condition [as 别名]
def done_cv(self):
        # type: (_MultiprocessOffload) -> multiprocessing.Condition
        """Get Done condition variable
        :param _MultiprocessOffload self: this
        :rtype: multiprocessing.Condition
        :return: cv for download done
        """
        return self._done_cv 
开发者ID:Azure,项目名称:blobxfer,代码行数:10,代码来源:offload.py

示例11: test_notify

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Condition [as 别名]
def test_notify(self):
        cond = self.Condition()
        sleeping = self.Semaphore(0)
        woken = self.Semaphore(0)

        p = self.Process(target=self.f, args=(cond, sleeping, woken))
        p.daemon = True
        p.start()

        p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
        p.daemon = True
        p.start()

        # wait for both children to start sleeping
        sleeping.acquire()
        sleeping.acquire()

        # check no process/thread has woken up
        time.sleep(DELTA)
        self.assertReturnsIfImplemented(0, get_value, woken)

        # wake up one process/thread
        cond.acquire()
        cond.notify()
        cond.release()

        # check one process/thread has woken up
        time.sleep(DELTA)
        self.assertReturnsIfImplemented(1, get_value, woken)

        # wake up another
        cond.acquire()
        cond.notify()
        cond.release()

        # check other has woken up
        time.sleep(DELTA)
        self.assertReturnsIfImplemented(2, get_value, woken)

        # check state is not mucked up
        self.check_invariant(cond)
        p.join() 
开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:44,代码来源:_test_multiprocessing.py

示例12: init_workers

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Condition [as 别名]
def init_workers(self):
        """
        Initialize all types of workers and start their worker processes.
        """

        actor_queues = [faster_fifo.Queue() for _ in range(self.cfg.num_workers)]

        policy_worker_queues = dict()
        for policy_id in range(self.cfg.num_policies):
            policy_worker_queues[policy_id] = []
            for i in range(self.cfg.policy_workers_per_policy):
                policy_worker_queues[policy_id].append(TorchJoinableQueue())

        log.info('Initializing learners...')
        policy_locks = [multiprocessing.Lock() for _ in range(self.cfg.num_policies)]
        resume_experience_collection_cv = [multiprocessing.Condition() for _ in range(self.cfg.num_policies)]

        learner_idx = 0
        for policy_id in range(self.cfg.num_policies):
            learner_worker = LearnerWorker(
                learner_idx, policy_id, self.cfg, self.obs_space, self.action_space,
                self.report_queue, policy_worker_queues[policy_id], self.traj_buffers,
                policy_locks[policy_id], resume_experience_collection_cv[policy_id],
            )
            learner_worker.start_process()
            learner_worker.init()

            self.learner_workers[policy_id] = learner_worker
            learner_idx += 1

        log.info('Initializing policy workers...')
        for policy_id in range(self.cfg.num_policies):
            self.policy_workers[policy_id] = []

            policy_queue = faster_fifo.Queue()
            self.policy_queues[policy_id] = policy_queue

            for i in range(self.cfg.policy_workers_per_policy):
                policy_worker = PolicyWorker(
                    i, policy_id, self.cfg, self.obs_space, self.action_space, self.traj_buffers,
                    policy_queue, actor_queues, self.report_queue, policy_worker_queues[policy_id][i],
                    policy_locks[policy_id], resume_experience_collection_cv[policy_id],
                )
                self.policy_workers[policy_id].append(policy_worker)
                policy_worker.start_process()

        log.info('Initializing actors...')

        # We support actor worker initialization in groups, which can be useful for some envs that
        # e.g. crash when too many environments are being initialized in parallel.
        # Currently the limit is not used since it is not required for any envs supported out of the box,
        # so we parallelize initialization as hard as we can.
        # If this is required for your environment, perhaps a better solution would be to use global locks,
        # like FileLock (see doom_gym.py)
        self.actor_workers = []
        max_parallel_init = int(1e9)  # might be useful to limit this for some envs
        worker_indices = list(range(self.cfg.num_workers))
        for i in range(0, self.cfg.num_workers, max_parallel_init):
            workers = self.init_subset(worker_indices[i:i + max_parallel_init], actor_queues)
            self.actor_workers.extend(workers) 
开发者ID:alex-petrenko,项目名称:sample-factory,代码行数:62,代码来源:appo.py


注:本文中的multiprocessing.Condition方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。