当前位置: 首页>>代码示例>>Python>>正文


Python multiprocessing.Queue方法代码示例

本文整理汇总了Python中multiprocessing.Queue方法的典型用法代码示例。如果您正苦于以下问题:Python multiprocessing.Queue方法的具体用法?Python multiprocessing.Queue怎么用?Python multiprocessing.Queue使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing的用法示例。


在下文中一共展示了multiprocessing.Queue方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Queue [as 别名]
def __init__(self, address, port, authkey, input_file, debug = False, timing = False):
        self.logger = util.log.getLogger('log_client')
        self.logger.info('server: start')

        self.timing = timing

        self.address = address
        self.port = int(port)
        self.authkey = authkey

        self.input_file = input_file

        self.real_start_time = time()
        self.simulation_start_time = 0

        self.fp_thread = None
        self.fs_thread = None

        self.flow_mod_queue = Queue() 
开发者ID:sdn-ixp,项目名称:iSDX,代码行数:21,代码来源:log_client.py

示例2: add_step

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Queue [as 别名]
def add_step(self,module_name_and_params, extra_args):
        config=module_name_and_params.split()
        module_name=config[0]
        params=config[1:]

        # collect extra arguments from command line meant for this particular module
        if extra_args is not None: 
            for _name, _value in extra_args.__dict__.items():
                if _name.startswith(module_name):
                    _modname,_argname=_name.split(".",1) # for example lemmatizer_mod.gpu
                    params.append("--"+_argname)
                    params.append(str(_value))

        mod=importlib.import_module(module_name)
        step_in=self.q_out
        self.q_out=Queue(self.max_q_size) #new pipeline end
        args=mod.argparser.parse_args(params)
        process=Process(target=mod.launch,args=(args,step_in,self.q_out))
        process.daemon=True
        process.start()
        self.processes.append(process) 
开发者ID:TurkuNLP,项目名称:Turku-neural-parser-pipeline,代码行数:23,代码来源:pipeline.py

示例3: reset

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Queue [as 别名]
def reset(self):
        """
        Resets the generator by stopping all processes
        """
        self.alive.value = False
        qsize = 0
        try:
            while True:
                self.queue.get(timeout=0.1)
                qsize += 1
        except QEmptyExcept:
            pass
        print("Queue size on reset: {}".format(qsize))
        for i, p in enumerate(self.proc):
            p.join()
        self.proc.clear() 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:18,代码来源:multiproc_data.py

示例4: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Queue [as 别名]
def __init__(self, docker_config):
        self.log_level = os.getenv('PYWREN_LOGLEVEL')
        self.config = docker_config
        self.name = 'docker'
        self.host = docker_config['host']
        self.queue = multiprocessing.Queue()
        self.docker_client = None
        self._is_localhost = self.host in ['127.0.0.1', 'localhost']

        if self._is_localhost:
            try:
                self.docker_client = docker.from_env()
            except Exception:
                pass

        log_msg = 'PyWren v{} init for Docker - Host: {}'.format(__version__, self.host)
        logger.info(log_msg)
        if not self.log_level:
            print(log_msg) 
开发者ID:pywren,项目名称:pywren-ibm-cloud,代码行数:21,代码来源:docker.py

示例5: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Queue [as 别名]
def __init__(self,
                 model: 'ParallelDQNModel',
                 n_envs: int,
                 memory_queue: Queue,
                 model_update_queue: Queue,
                 done_queue: Queue,
                 discount_factor: float = 0.9999,
                 batch_size: int = 128,
                 learning_rate: float = 0.0001,
                 memory_capacity: int = 10000):
        super().__init__()

        self.model = model
        self.n_envs = n_envs
        self.memory_queue = memory_queue
        self.model_update_queue = model_update_queue
        self.done_queue = done_queue
        self.discount_factor = discount_factor
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.memory_capacity = memory_capacity 
开发者ID:tensortrade-org,项目名称:tensortrade,代码行数:23,代码来源:parallel_dqn_optimizer.py

示例6: reset_state

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Queue [as 别名]
def reset_state(self):
        super(MultiThreadMapData, self).reset_state()
        if self._threads:
            self._threads[0].stop()
            for t in self._threads:
                t.join()

        self._in_queue = queue.Queue()
        self._out_queue = queue.Queue()
        self._evt = threading.Event()
        self._threads = [MultiThreadMapData._Worker(
            self._in_queue, self._out_queue, self._evt, self.map_func)
            for _ in range(self.num_thread)]
        for t in self._threads:
            t.start()

        self._guard = DataFlowReentrantGuard()

        # Call once at the beginning, to ensure inq+outq has a total of buffer_size elements
        self._fill_buffer() 
开发者ID:tensorpack,项目名称:dataflow,代码行数:22,代码来源:parallel_map.py

示例7: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Queue [as 别名]
def __init__(self, env_producer):
        self.env_name = env_producer.get_env_name()
        self.config = environments.get_config(self.env_name)
        self.worker_size = self.config["worker_num"]
        self.env_producer = env_producer
        self.queues = []
        self.w_in_queue = Queue()
        self.init_workers()
        self.session = None
        self.trainable_vars = None
        self.accum_vars = None
        self.p_opt_vars = None
        self.v_opt_vars = None
        self.assign_op = None
        self.agent = None
        self.saver = None
        self.summary_writer = None
        self.beta = 1
        self.lr_multiplier = 1.0
        self.iter_count = 1
        self.variables_file_path = "models/%s/variables.txt" % self.env_name
        self.model_path = "models/%s/model" % self.env_name
        self.initialized = False
        self.cur_step = -1
        self.start() 
开发者ID:jet-black,项目名称:ppo-lstm-parallel,代码行数:27,代码来源:master.py

示例8: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Queue [as 别名]
def __init__(self, data_queue, data_paths, repeat=True):
        '''
        data_queue : Multiprocessing queue
        data_paths : list of data and label pair used to load data
        repeat : if set True, return data until exit is set
        '''
        super(DataProcess, self).__init__()
        # Queue to transfer the loaded mini batches
        self.data_queue = data_queue
        self.data_paths = data_paths
        self.num_data = len(data_paths)
        self.repeat = repeat

        # Tuple of data shape
        self.batch_size = cfg.CONST.BATCH_SIZE
        self.exit = Event()
        self.shuffle_db_inds() 
开发者ID:chrischoy,项目名称:3D-R2N2,代码行数:19,代码来源:data_process.py

示例9: test_process

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Queue [as 别名]
def test_process():
    from multiprocessing import Queue
    from lib.config import cfg
    from lib.data_io import category_model_id_pair

    cfg.TRAIN.PAD_X = 10
    cfg.TRAIN.PAD_Y = 10

    data_queue = Queue(2)
    category_model_pair = category_model_id_pair(dataset_portion=[0, 0.1])

    data_process = ReconstructionDataProcess(data_queue, category_model_pair)
    data_process.start()
    batch_img, batch_voxel = data_queue.get()

    kill_processes(data_queue, [data_process]) 
开发者ID:chrischoy,项目名称:3D-R2N2,代码行数:18,代码来源:data_process.py

示例10: test_integration_rabit_synchronize

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Queue [as 别名]
def test_integration_rabit_synchronize():
    q = Queue()

    port, _ = find_two_open_ports()

    host_count = 5
    host_list = range(host_count)
    expected_results = [{'idx': idx} for idx in host_list]

    for idx in host_list:
        p = Process(target=synchronize_fn, args=(host_count, port, idx == 0, idx, q))
        p.start()

    num_responses = 0
    while num_responses < host_count:
        host_aggregated_result = q.get(timeout=10)
        for host_individual_result in host_aggregated_result:
            assert host_individual_result in expected_results
        num_responses += 1 
开发者ID:aws,项目名称:sagemaker-xgboost-container,代码行数:21,代码来源:test_distributed.py

示例11: test_rabit_run_all_hosts_run

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Queue [as 别名]
def test_rabit_run_all_hosts_run():
    q = Queue()

    first_port, second_port = find_two_open_ports()

    host_count = 5
    host_list = range(host_count)
    expected_results = [idx for idx in host_list]

    for idx in host_list:
        p = Process(target=rabit_run_fn, args=(host_count, True, first_port, second_port, idx == 0, idx, q))
        p.start()

    num_responses = 0
    while num_responses < host_count:
        response = q.get(timeout=15)
        expected_results.remove(response)
        num_responses += 1

    assert len(expected_results) == 0 
开发者ID:aws,项目名称:sagemaker-xgboost-container,代码行数:22,代码来源:test_distributed.py

示例12: test_rabit_run_exclude_one_host

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Queue [as 别名]
def test_rabit_run_exclude_one_host():
    q = Queue()

    first_port, second_port = find_two_open_ports()

    idx_to_exclude = 3

    host_count = 5
    host_list = range(host_count)
    expected_results = [idx for idx in host_list if idx != idx_to_exclude]

    for idx in host_list:
        p = Process(target=rabit_run_fn, args=(
            host_count, idx != idx_to_exclude, first_port, second_port, idx == 0, idx, q))
        p.start()

    num_responses = 0
    while num_responses < host_count - 1:
        response = q.get(timeout=15)
        expected_results.remove(response)
        num_responses += 1

    assert len(expected_results) == 0 
开发者ID:aws,项目名称:sagemaker-xgboost-container,代码行数:25,代码来源:test_distributed.py

示例13: test_rabit_delay_master

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Queue [as 别名]
def test_rabit_delay_master():
    q = Queue()

    first_port, second_port = find_two_open_ports()

    host_count = 5
    host_list = range(host_count)
    expected_results = [idx for idx in host_list]

    for idx in host_list:
        p = Process(
            target=rabit_run_delay_master, args=(host_count, True, first_port, second_port, idx == 0, idx, q, None))
        p.start()

    num_responses = 0
    while num_responses < host_count:
        response = q.get(timeout=20)
        expected_results.remove(response)
        num_responses += 1

    assert len(expected_results) == 0 
开发者ID:aws,项目名称:sagemaker-xgboost-container,代码行数:23,代码来源:test_distributed.py

示例14: test_rabit_run_fail_bad_max_retry_attempts

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Queue [as 别名]
def test_rabit_run_fail_bad_max_retry_attempts(bad_max_retry_attempts):
    q = Queue()

    first_port, second_port = find_two_open_ports()

    host_count = 5
    host_list = range(host_count)

    for idx in host_list:
        p = Process(target=rabit_run_fail, args=(
            rabit_run_fn, host_count, True, first_port, second_port, idx == 0, idx, q, bad_max_retry_attempts))
        p.start()

    num_responses = 0
    while num_responses < host_count:
        host_result = q.get(timeout=10)
        assert "max_connect_attempts must be None or an integer greater than 0." in host_result
        num_responses += 1 
开发者ID:aws,项目名称:sagemaker-xgboost-container,代码行数:20,代码来源:test_distributed.py

示例15: multiCoreBench

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Queue [as 别名]
def multiCoreBench(engine, threads):

    # Give time for any previous run to finish
    time.sleep(2)

    # Dump results into a Queue
    outqueue = multiprocessing.Queue()

    # Spawn each singleCoreBench()
    processes = [
        multiprocessing.Process(
            target=singleCoreBench,
            args=(engine, outqueue)
        ) for ii in range(threads)
    ]

    # Launch eat singleCoreBench()
    for process in processes:
        process.start()

    # Wait for each thread and collect data
    return [outqueue.get() for ii in range(threads)] 
开发者ID:AndyGrant,项目名称:OpenBench,代码行数:24,代码来源:bench.py


注:本文中的multiprocessing.Queue方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。