当前位置: 首页>>代码示例>>Python>>正文


Python multiprocessing.Value方法代码示例

本文整理汇总了Python中multiprocessing.Value方法的典型用法代码示例。如果您正苦于以下问题:Python multiprocessing.Value方法的具体用法?Python multiprocessing.Value怎么用?Python multiprocessing.Value使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing的用法示例。


在下文中一共展示了multiprocessing.Value方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_init

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Value [as 别名]
def test_init():
    manager = Manager()
    return_dict = manager.dict()

    # make server init before worker
    server_init = Value('i', False)
    serv_p = Process(target=server_func, args=(2, 'test_graph1', server_init))
    serv_p.start()
    while server_init.value == 0:
      time.sleep(1)
    work_p1 = Process(target=check_init_func, args=(0, 'test_graph1', return_dict))
    work_p2 = Process(target=check_init_func, args=(1, 'test_graph1', return_dict))
    work_p1.start()
    work_p2.start()
    serv_p.join()
    work_p1.join()
    work_p2.join()
    for worker_id in return_dict.keys():
        assert return_dict[worker_id] == 0, "worker %d fails" % worker_id 
开发者ID:dmlc,项目名称:dgl,代码行数:21,代码来源:test_shared_mem_store.py

示例2: test_compute

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Value [as 别名]
def test_compute():
    manager = Manager()
    return_dict = manager.dict()

    # make server init before worker
    server_init = Value('i', 0)
    serv_p = Process(target=server_func, args=(2, 'test_graph3', server_init))
    serv_p.start()
    while server_init.value == 0:
      time.sleep(1)
    work_p1 = Process(target=check_compute_func, args=(0, 'test_graph3', return_dict))
    work_p2 = Process(target=check_compute_func, args=(1, 'test_graph3', return_dict))
    work_p1.start()
    work_p2.start()
    serv_p.join()
    work_p1.join()
    work_p2.join()
    for worker_id in return_dict.keys():
        assert return_dict[worker_id] == 0, "worker %d fails" % worker_id 
开发者ID:dmlc,项目名称:dgl,代码行数:21,代码来源:test_shared_mem_store.py

示例3: test_sync_barrier

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Value [as 别名]
def test_sync_barrier():
    manager = Manager()
    return_dict = manager.dict()

    # make server init before worker
    server_init = Value('i', 0)
    serv_p = Process(target=server_func, args=(2, 'test_graph4', server_init))
    serv_p.start()
    while server_init.value == 0:
      time.sleep(1)
    work_p1 = Process(target=check_sync_barrier, args=(0, 'test_graph4', return_dict))
    work_p2 = Process(target=check_sync_barrier, args=(1, 'test_graph4', return_dict))
    work_p1.start()
    work_p2.start()
    serv_p.join()
    work_p1.join()
    work_p2.join()
    for worker_id in return_dict.keys():
        assert return_dict[worker_id] == 0, "worker %d fails" % worker_id 
开发者ID:dmlc,项目名称:dgl,代码行数:21,代码来源:test_shared_mem_store.py

示例4: test_copy_shared_mem

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Value [as 别名]
def test_copy_shared_mem():
    csr = (spsp.random(num_nodes, num_nodes, density=0.1, format='csr') != 0).astype(np.int64)
    gidx = dgl.graph_index.create_graph_index(csr, True)

    cond_v = Condition()
    shared_v = Value('i', 0)
    p1 = Process(target=create_mem, args=(gidx, cond_v, shared_v))
    p2 = Process(target=check_mem, args=(gidx, cond_v, shared_v))
    p1.start()
    p2.start()
    p1.join()
    p2.join()

# Skip test this file
#if __name__ == '__main__':
#    test_copy_shared_mem()
#    test_init()
#    test_sync_barrier()
#    test_compute() 
开发者ID:dmlc,项目名称:dgl,代码行数:21,代码来源:test_shared_mem_store.py

示例5: test_chain_sampling_multidim_model

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Value [as 别名]
def test_chain_sampling_multidim_model(self):
        """Test that sampling from DREAM history for multi-dimensional model when the history is known matches with expected possible samples."""
        self.params, self.like = multidmodel()
        model = Model(likelihood=self.like, sampled_parameters=self.params)
        dream = Dream(model=model)
        history_arr = mp.Array('d', [0]*2*dream.total_var_dimension)
        n = mp.Value('i', 0)
        pydream.Dream_shared_vars.history = history_arr
        pydream.Dream_shared_vars.count = n
        chains_added_to_history = []
        for i in range(2):
            start = i*dream.total_var_dimension
            end = start+dream.total_var_dimension
            chain = dream.draw_from_prior(model.sampled_parameters)
            pydream.Dream_shared_vars.history[start:end] = chain
            chains_added_to_history.append(chain)       
        sampled_chains = dream.sample_from_history(nseedchains=2, DEpairs=1, ndimensions=dream.total_var_dimension)
        sampled_chains = np.array(sampled_chains)
        chains_added_to_history = np.array(chains_added_to_history)
        self.assertIs(np.array_equal(chains_added_to_history[chains_added_to_history[:,0].argsort()], sampled_chains[sampled_chains[:,0].argsort()]), True) 
开发者ID:LoLab-VU,项目名称:PyDREAM,代码行数:22,代码来源:test_dream.py

示例6: test_history_recording_simple_model

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Value [as 别名]
def test_history_recording_simple_model(self):
        """Test that history in memory matches with that recorded for test one-dimensional model."""
        self.param, self.like = onedmodel()
        model = Model(self.like, self.param)
        step = Dream(model=model, model_name='test_history_recording')
        history_arr = mp.Array('d', [0]*4*step.total_var_dimension)
        n = mp.Value('i', 0)
        nchains = mp.Value('i', 3)
        pydream.Dream_shared_vars.history = history_arr
        pydream.Dream_shared_vars.count = n
        pydream.Dream_shared_vars.nchains = nchains
        test_history = np.array([[1], [3], [5], [7]])
        for chainpoint in test_history:
            for point in chainpoint:
                step.record_history(nseedchains=0, ndimensions=step.total_var_dimension, q_new=point, len_history=len(history_arr))
        history_arr_np = np.frombuffer(pydream.Dream_shared_vars.history.get_obj())
        history_arr_np_reshaped = history_arr_np.reshape(np.shape(test_history))
        self.assertIs(np.array_equal(history_arr_np_reshaped, test_history), True)
        remove('test_history_recording_DREAM_chain_history.npy')
        remove('test_history_recording_DREAM_chain_adapted_crossoverprob.npy')
        remove('test_history_recording_DREAM_chain_adapted_gammalevelprob.npy') 
开发者ID:LoLab-VU,项目名称:PyDREAM,代码行数:23,代码来源:test_dream.py

示例7: test_history_recording_multidim_model

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Value [as 别名]
def test_history_recording_multidim_model(self):
        """Test that history in memory matches with that recorded for test multi-dimensional model."""
        self.param, self.like = multidmodel()
        model = Model(self.like, self.param)
        dream = Dream(model=model, model_name='test_history_recording')
        history_arr = mp.Array('d', [0]*4*dream.total_var_dimension*3)
        n = mp.Value('i', 0)
        nchains = mp.Value('i', 3)
        pydream.Dream_shared_vars.history = history_arr
        pydream.Dream_shared_vars.count = n
        pydream.Dream_shared_vars.nchains = nchains
        test_history = np.array([[[1, 2, 3, 4], [3, 4, 5, 6], [5, 6, 7, 8]], [[7, 8, 9, 10], [9, 12, 18, 20], [11, 14, 18, 8]], [[13, 14, 18, 4], [15, 17, 11, 8], [17, 28, 50, 4]], [[19, 21, 1, 18], [21, 19, 19, 11], [23, 4, 3, 2]]])
        for chainpoint in test_history:
            for point in chainpoint:
                dream.record_history(nseedchains=0, ndimensions=dream.total_var_dimension, q_new=point, len_history=len(history_arr))
        history_arr_np = np.frombuffer(pydream.Dream_shared_vars.history.get_obj())
        history_arr_np_reshaped = history_arr_np.reshape(np.shape(test_history))
        self.assertIs(np.array_equal(history_arr_np_reshaped, test_history), True)
        remove('test_history_recording_DREAM_chain_history.npy')
        remove('test_history_recording_DREAM_chain_adapted_crossoverprob.npy')
        remove('test_history_recording_DREAM_chain_adapted_gammalevelprob.npy') 
开发者ID:LoLab-VU,项目名称:PyDREAM,代码行数:23,代码来源:test_dream.py

示例8: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Value [as 别名]
def __init__(self, cfg):
        self.__is_use_can_port = cfg['use_can']

        # Data for store
        self.rx_counter_servo_unit = multiprocessing.Value(ctypes.c_int,0)
        self.rx_time_us_diff = multiprocessing.Value(ctypes.c_int,0)
        self.rx_button_y = multiprocessing.Value(ctypes.c_bool,False)
        self.rx_button_g = multiprocessing.Value(ctypes.c_bool,False)
        self.rx_button_r = multiprocessing.Value(ctypes.c_bool,False)
        self.rx_actual_angle = multiprocessing.Value(ctypes.c_float,0.0)
        self.can_error_count_rx = multiprocessing.Value(ctypes.c_int,0)

        # Initialize process
        self.__m = multiprocessing.Process(target=self.__process_can, \
                                           args=(cfg['can_name'], \
                                                 cfg['can_bustype'], \
                                                 cfg['can_bitrate'], \
                                                 cfg['can_dbc_path'], \
                                                 cfg['can_rx_interval']))
        # Start process
        self.__m.start()

        return 
开发者ID:YanbaruRobotics,项目名称:PythonPilot,代码行数:25,代码来源:io_can.py

示例9: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Value [as 别名]
def __init__(self, cfg):
        # Bridge Tx data from subscribe process to publish process
        self.__can_error_count_tx = multiprocessing.Value(ctypes.c_int,0)

        # Initialize process
        self.__m_pub = multiprocessing.Process(target=self.__process_pub, \
                                               args=(cfg,
                                                     cfg['zmq_localhost'], \
                                                     cfg['zmq_port_pubsub_devicerx'], \
                                                     cfg['zmq_topic_devicerx'], \
                                                     cfg['zmq_interval_devicerx']))
        self.__m_sub = multiprocessing.Process(target=self.__process_sub, \
                                               args=(cfg,
                                                     cfg['zmq_localhost'], \
                                                     cfg['zmq_port_pubsub_devicetx'], \
                                                     cfg['zmq_topic_devicetx'], \
                                                     cfg['zmq_interval_devicetx']))
        # Start process
        self.__m_pub.start()
        self.__m_sub.start()

        return 
开发者ID:YanbaruRobotics,项目名称:PythonPilot,代码行数:24,代码来源:zmq_device_pubsub.py

示例10: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Value [as 别名]
def __init__(self, action_dim, observation_dim, **kwargs):
        # shared variable that all processes will see
        self.crash_flag = Value('i', 0)
        self.reset_call_flag = Value('i', 0)

        # Communicator Parameters
        communicator_setups = {'generic1': {'Communicator': MockCommunicator,
                                            'kwargs': {}},
                               'generic2': {'Communicator': MockCommunicator,
                                            'kwargs': {}}
                              }

        self._uniform_array_ = np.frombuffer(Array('d', 3).get_obj(), dtype=np.float64)

        super().__init__(communicator_setups=communicator_setups,
                         action_dim=action_dim,
                         observation_dim=observation_dim,
                         **kwargs) 
开发者ID:kindredresearch,项目名称:SenseAct,代码行数:20,代码来源:test_rtrl_base_env.py

示例11: _sync

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Value [as 别名]
def _sync(pack):
    """Simulate a package travelling through the cluster."""
    from django_q.cluster import worker, monitor

    task_queue = Queue()
    result_queue = Queue()
    task = SignedPackage.loads(pack)
    task_queue.put(task)
    task_queue.put("STOP")
    worker(task_queue, result_queue, Value("f", -1))
    result_queue.put("STOP")
    monitor(result_queue)
    task_queue.close()
    task_queue.join_thread()
    result_queue.close()
    result_queue.join_thread()
    return task["id"] 
开发者ID:Koed00,项目名称:django-q,代码行数:19,代码来源:tasks.py

示例12: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Value [as 别名]
def __init__(self, num_processes, max_queue_size, fn):
        """

        Parameters
        ----------
        num_processes: int
            Number of processes to spawn
        max_queue_size: int
            Maximum samples in the queue before processes wait
        fn: function
            function that generates samples, executed on separate processes.
        """
        self.queue = mp.Queue(maxsize=int(max_queue_size))
        self.alive = mp.Value(c_bool, False, lock=False)
        self.num_proc = num_processes
        self.proc = list()
        self.fn = fn 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:19,代码来源:multiproc_data.py

示例13: _proc_loop

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Value [as 别名]
def _proc_loop(proc_id, alive, queue, fn):
        """
        Thread loop for generating data

        Parameters
        ----------
        proc_id: int
            Process id
        alive: multiprocessing.Value
            variable for signaling whether process should continue or not
        queue: multiprocessing.Queue
            queue for passing data back
        fn: function
            function object that returns a sample to be pushed into the queue
        """
        print("proc {} started".format(proc_id))
        try:
            while alive.value:
                data = fn()
                put_success = False
                while alive.value and not put_success:
                    try:
                        queue.put(data, timeout=0.5)
                        put_success = True
                    except QFullExcept:
                        # print("Queue Full")
                        pass
        except KeyboardInterrupt:
            print("W: interrupt received, stopping process {} ...".format(proc_id))
        print("Closing process {}".format(proc_id))
        queue.close() 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:33,代码来源:multiproc_data.py

示例14: start

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Value [as 别名]
def start(self):
        if self.process is None:
            self.scanning = Value(c_bool, True)
            self.process = Process(target=self._scan, args=(self.scanning,))
            self.process.start() 
开发者ID:ParadropLabs,项目名称:Paradrop,代码行数:7,代码来源:scanner.py

示例15: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Value [as 别名]
def __init__(self, config, executor_id, internal_storage):
        self.log_level = os.getenv('PYWREN_LOGLEVEL')
        self.config = config
        self.executor_id = executor_id
        self.storage_config = extract_storage_config(self.config)
        self.internal_storage = internal_storage
        self.compute_config = extract_compute_config(self.config)
        self.is_pywren_function = is_pywren_function()
        self.invokers = []

        self.remote_invoker = self.config['pywren'].get('remote_invoker', False)
        self.workers = self.config['pywren'].get('workers')
        logger.debug('ExecutorID {} - Total available workers: {}'
                     .format(self.executor_id, self.workers))

        self.compute_handlers = []
        cb = self.compute_config['backend']
        regions = self.compute_config[cb].get('region')
        if regions and type(regions) == list:
            for region in regions:
                compute_config = self.compute_config.copy()
                compute_config[cb]['region'] = region
                compute_handler = Compute(compute_config)
                self.compute_handlers.append(compute_handler)
        else:
            compute_handler = Compute(self.compute_config)
            self.compute_handlers.append(compute_handler)

        logger.debug('ExecutorID {} - Creating function invoker'.format(self.executor_id))

        self.token_bucket_q = Queue()
        self.pending_calls_q = Queue()
        self.running_flag = Value('i', 0)
        self.ongoing_activations = 0

        self.job_monitor = JobMonitor(self.config, self.internal_storage, self.token_bucket_q) 
开发者ID:pywren,项目名称:pywren-ibm-cloud,代码行数:38,代码来源:invoker.py


注:本文中的multiprocessing.Value方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。