当前位置: 首页>>代码示例>>Python>>正文


Python multiprocessing.Pool方法代码示例

本文整理汇总了Python中multiprocessing.Pool方法的典型用法代码示例。如果您正苦于以下问题:Python multiprocessing.Pool方法的具体用法?Python multiprocessing.Pool怎么用?Python multiprocessing.Pool使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing的用法示例。


在下文中一共展示了multiprocessing.Pool方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: model_selection

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pool [as 别名]
def model_selection(self, graphs, targets,
                        n_iter=30, subsample_size=None):
        """model_selection_randomized."""
        param_distr = {"r": list(range(1, 5)), "d": list(range(0, 10))}
        if subsample_size:
            graphs, targets = subsample(
                graphs, targets, subsample_size=subsample_size)

        pool = mp.Pool()
        scores = pool.map(_eval, [(graphs, targets, param_distr)] * n_iter)
        pool.close()
        pool.join()

        best_params = max(scores)[1]
        logger.debug("Best parameters:\n%s" % (best_params))
        self = EdenEstimator(**best_params)
        return self 
开发者ID:fabriziocosta,项目名称:EDeN,代码行数:19,代码来源:estimator.py

示例2: score_samples

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pool [as 别名]
def score_samples(kdes, samples, preds, n_jobs=None):
    """
    TODO
    :param kdes:
    :param samples:
    :param preds:
    :param n_jobs:
    :return:
    """
    if n_jobs is not None:
        p = mp.Pool(n_jobs)
    else:
        p = mp.Pool()
    results = np.asarray(
        p.map(
            score_point,
            [(x, kdes[i]) for x, i in zip(samples, preds)]
        )
    )
    p.close()
    p.join()

    return results 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:25,代码来源:util.py

示例3: add_tasks

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pool [as 别名]
def add_tasks(thread_task_list, identifier=None):
		"""
			Execute several functions (threads, processes) in parallel.

			@type thread_task_list: list of TaskThread

			@return: a list of respective return values
		"""
		assert isinstance(thread_task_list, list)

		if identifier is None:
			identifier = len(AsyncParallel.task_handler_list)

		# creates a pool of workers, add all tasks to the pool
		if AsyncParallel.pool is None:
			AsyncParallel.pool = mp.Pool(processes=AsyncParallel.max_processes)

		if identifier not in AsyncParallel.task_handler_list:
			AsyncParallel.task_handler_list[identifier] = []

		for task in thread_task_list:
			assert isinstance(task, TaskThread)
			AsyncParallel.task_handler_list[identifier].append(AsyncParallel.pool.apply_async(task.fun, task.args))
		return identifier 
开发者ID:CAMI-challenge,项目名称:CAMISIM,代码行数:26,代码来源:parallel.py

示例4: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pool [as 别名]
def __init__(self, params_descriptor, MWF=False, stft_backend="auto", multiprocess=True):
        """ Default constructor.

        :param params_descriptor: Descriptor for TF params to be used.
        :param MWF: (Optional) True if MWF should be used, False otherwise.
        """

        self._params = load_configuration(params_descriptor)
        self._sample_rate = self._params['sample_rate']
        self._MWF = MWF
        self._tf_graph = tf.Graph()
        self._predictor = None
        self._input_provider = None
        self._builder = None
        self._features = None
        self._session = None
        self._pool = Pool() if multiprocess else None
        self._tasks = []
        self._params["stft_backend"] = get_backend(stft_backend) 
开发者ID:deezer,项目名称:spleeter,代码行数:21,代码来源:separator.py

示例5: generate_data_for_registered_problem

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pool [as 别名]
def generate_data_for_registered_problem(problem_name):
  """Generate data for a registered problem."""
  tf.logging.info("Generating data for %s.", problem_name)
  if FLAGS.num_shards:
    raise ValueError("--num_shards should not be set for registered Problem.")
  problem = registry.problem(problem_name)
  task_id = None if FLAGS.task_id < 0 else FLAGS.task_id
  data_dir = os.path.expanduser(FLAGS.data_dir)
  tmp_dir = os.path.expanduser(FLAGS.tmp_dir)
  if task_id is None and problem.multiprocess_generate:
    if FLAGS.task_id_start != -1:
      assert FLAGS.task_id_end != -1
      task_id_start = FLAGS.task_id_start
      task_id_end = FLAGS.task_id_end
    else:
      task_id_start = 0
      task_id_end = problem.num_generate_tasks
    pool = multiprocessing.Pool(processes=FLAGS.num_concurrent_processes)
    problem.prepare_to_generate(data_dir, tmp_dir)
    args = [(problem_name, data_dir, tmp_dir, task_id)
            for task_id in range(task_id_start, task_id_end)]
    pool.map(generate_data_in_process, args)
  else:
    problem.generate_data(data_dir, tmp_dir, task_id) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:26,代码来源:t2t_datagen.py

示例6: run

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pool [as 别名]
def run(self, chunksize=2000, parallel=4):
        self.validate()

        if not self.replacers:
            return

        chunks = self.get_queryset_chunk_iterator(chunksize)

        if parallel == 0:
            for objs in chunks:
                _run(self, objs)
        else:
            connection.close()
            pool = Pool(processes=parallel)
            futures = [pool.apply_async(_run, (self, objs))
                       for objs in chunks]
            for future in futures:
                future.get()
            pool.close()
            pool.join() 
开发者ID:BetterWorks,项目名称:django-anonymizer,代码行数:22,代码来源:base.py

示例7: check_headers_parallel

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pool [as 别名]
def check_headers_parallel(self, urls, options=None, callback=None):
        if not options:
            options= self.options.result()

        if Pool:
            results = []
            freeze_support()
            pool = Pool(processes=100)
            for url in urls:
                result = pool.apply_async(self.check_headers, args=(url, options.get('redirects'), options), callback=callback)
                results.append(result)
            pool.close()
            pool.join() 
            return results
        else:
            raise Exception('no parallelism supported') 
开发者ID:koenbuyens,项目名称:securityheaders,代码行数:18,代码来源:securityheader.py

示例8: generate_experiment

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pool [as 别名]
def generate_experiment(exp_name, n_train_images, n_test_images, mode, class_diameters=(20, 20)):

    train_dir = os.path.join(cf.root_dir, exp_name, 'train')
    test_dir = os.path.join(cf.root_dir, exp_name, 'test')
    if not os.path.exists(train_dir):
        os.makedirs(train_dir)
    if not os.path.exists(test_dir):
        os.makedirs(test_dir)

    # enforced distance between object center and image edge.
    foreground_margin = np.max(class_diameters) // 2

    info = []
    info += [[train_dir, six, foreground_margin, class_diameters, mode] for six in range(n_train_images)]
    info += [[test_dir, six, foreground_margin, class_diameters, mode] for six in range(n_test_images)]

    print('starting creating {} images'.format(len(info)))
    pool = Pool(processes=12)
    pool.map(multi_processing_create_image, info, chunksize=1)
    pool.close()
    pool.join()

    aggregate_meta_info(train_dir)
    aggregate_meta_info(test_dir) 
开发者ID:MIC-DKFZ,项目名称:medicaldetectiontoolkit,代码行数:26,代码来源:generate_toys.py

示例9: main

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pool [as 别名]
def main():
    start = time.time()
    users = ['Carlos_F_Enguix', 'mmtung', 'dremio', 'MongoDB', 'JenWike', 'timberners_lee','ataspinar2', 'realDonaldTrump',
            'BarackObama', 'elonmusk', 'BillGates', 'BillClinton','katyperry','KimKardashian']

    pool = Pool(8)    
    for user in pool.map(get_user_info,users):
        twitter_user_info.append(user)

    cols=['id','fullname','date_joined','location','blog', 'num_tweets','following','followers','likes','lists']
    data_frame = pd.DataFrame(twitter_user_info, index=users, columns=cols)
    data_frame.index.name = "Users"
    data_frame.sort_values(by="followers", ascending=False, inplace=True, kind='quicksort', na_position='last')
    elapsed = time.time() - start
    print(f"Elapsed time: {elapsed}")
    display(data_frame) 
开发者ID:taspinar,项目名称:twitterscraper,代码行数:18,代码来源:get_twitter_user_data.py

示例10: main

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pool [as 别名]
def main(args):
    users = []

    for arg in args:
        users.append(arg)

    pool_size = len(users)
    if pool_size < 8:
        pool = Pool(pool_size)
    else:
        pool = Pool(8)

    for user in pool.map(get_user_info, users):
        twitter_user_info.append(user)

    cols = ['id', 'fullname', 'date_joined', 'location', 'blog', 'num_tweets', 'following', 'followers', 'likes',
            'lists']
    data_frame = pd.DataFrame(twitter_user_info, index=users, columns=cols)
    data_frame.index.name = "Users"
    data_frame.sort_values(by="followers", ascending=False, inplace=True, kind='quicksort', na_position='last')
    display(data_frame) 
开发者ID:taspinar,项目名称:twitterscraper,代码行数:23,代码来源:get_twitter_user_data_parallel.py

示例11: scrape_recipe_box

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pool [as 别名]
def scrape_recipe_box(scraper, site_str, page_iter, status_interval=50):

    if args.append:
        recipes = quick_load(site_str)
    else:
        recipes = {}
    start = time.time()
    if args.multi:
        pool = Pool(cpu_count() * 2)
        results = pool.map(scraper, page_iter)
        for r in results:
            recipes.update(r)
    else:
        for i in page_iter:
            recipes.update(scraper(i))
            if i % status_interval == 0:
                print('Scraping page {} of {}'.format(i, max(page_iter)))
                quick_save(site_str, recipes)
            time.sleep(args.sleep)

    print('Scraped {} recipes from {} in {:.0f} minutes'.format(
        len(recipes), site_str, (time.time() - start) / 60))
    quick_save(site_str, recipes) 
开发者ID:rtlee9,项目名称:recipe-box,代码行数:25,代码来源:get_recipes.py

示例12: get_params_for_mp

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pool [as 别名]
def get_params_for_mp(n_triples):
    n_cores = mp.cpu_count()
    pool = mp.Pool(n_cores)
    avg = n_triples // n_cores

    range_list = []
    start = 0
    for i in range(n_cores):
        num = avg + 1 if i < n_triples - avg * n_cores else avg
        range_list.append([start, start + num])
        start += num

    return n_cores, pool, range_list


# input: [(h1, {t1, t2 ...}), (h2, {t3 ...}), ...]
# output: {(h1, t1): paths, (h1, t2): paths, (h2, t3): paths, ...} 
开发者ID:hwwang55,项目名称:PathCon,代码行数:19,代码来源:utils.py

示例13: eval_genome

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pool [as 别名]
def eval_genome(genome, config):
    """
    This function will be run in parallel by ParallelEvaluator.  It takes two
    arguments (a single genome and the genome class configuration data) and
    should return one float (that genome's fitness).

    Note that this function needs to be in module scope for multiprocessing.Pool
    (which is what ParallelEvaluator uses) to find it.  Because of this, make
    sure you check for __main__ before executing any code (as we do here in the
    last few lines in the file), otherwise you'll have made a fork bomb
    instead of a neuroevolution demo. :)
    """

    net = neat.nn.FeedForwardNetwork.create(genome, config)
    error = 4.0
    for xi, xo in zip(xor_inputs, xor_outputs):
        output = net.activate(xi)
        error -= (output[0] - xo[0]) ** 2
    return error 
开发者ID:CodeReclaimers,项目名称:neat-python,代码行数:21,代码来源:evolve-feedforward-parallel.py

示例14: multimap

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pool [as 别名]
def multimap(namesToReferences, seqs):
    if not hasattr(multimap, "pool"):
        multimap.pool = multiprocessing.Pool(processes=misc.cpu_count_physical())

    pool = multimap.pool

    results = {}
    results = dict(pool.map_async(remaps, [(namesToReferences, seq) for seq in seqs]).get(999999))
    # results = dict(map(remaps, [(namesToReferences, seq) for seq in seqs]))

    return results 
开发者ID:svviz,项目名称:svviz,代码行数:13,代码来源:alignproc.py

示例15: _initialize_members

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pool [as 别名]
def _initialize_members(self, hdfs_app_path, kafkaproducer, conf_type):

        # getting parameters.
        self._logger = logging.getLogger('SPOT.INGEST.FLOW')
        self._hdfs_app_path = hdfs_app_path
        self._producer = kafkaproducer

        # get script path
        self._script_path = os.path.dirname(os.path.abspath(__file__))

        # read flow configuration.
        conf_file = "{0}/ingest_conf.json".format(os.path.dirname(os.path.dirname(self._script_path)))
        conf = json.loads(open(conf_file).read())
        self._conf = conf["pipelines"][conf_type]

        # set configuration.
        self._collector_path = self._conf['collector_path']        
        self._dsource = 'flow'
        self._hdfs_root_path = "{0}/{1}".format(hdfs_app_path, self._dsource)

        self._supported_files = self._conf['supported_files']

        # create collector watcher
        self._watcher = FileWatcher(self._collector_path,self._supported_files)
        
        # Multiprocessing. 
        self._processes = conf["collector_processes"]
        self._ingestion_interval = conf["ingestion_interval"]
        self._pool = Pool(processes=self._processes)
        # TODO: review re-use of hdfs.client
        self._hdfs_client = hdfs.get_client() 
开发者ID:apache,项目名称:incubator-spot,代码行数:33,代码来源:collector.py


注:本文中的multiprocessing.Pool方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。