当前位置: 首页>>代码示例>>Python>>正文


Python pool.Pool方法代码示例

本文整理汇总了Python中multiprocessing.pool.Pool方法的典型用法代码示例。如果您正苦于以下问题:Python pool.Pool方法的具体用法?Python pool.Pool怎么用?Python pool.Pool使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.pool的用法示例。


在下文中一共展示了pool.Pool方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: save_tfrecord

# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def save_tfrecord(filename, dataset, verbose=False):
    observations = len(dataset['length'])

    serialized = []
    with Pool(processes=4) as pool:
        for serialized_string in tqdm(pool.imap(
            tfrecord_serializer,
            zip(dataset['length'], dataset['source'], dataset['target']),
            chunksize=10
        ), total=observations, disable=not verbose):
            serialized.append(serialized_string)

    # Save seriealized dataset
    writer = tf.python_io.TFRecordWriter(
        filename,
        options=tf.python_io.TFRecordOptions(
            tf.python_io.TFRecordCompressionType.ZLIB
        )
    )

    for serialized_string in tqdm(serialized, disable=not verbose):
        writer.write(serialized_string)

    writer.close() 
开发者ID:distillpub,项目名称:post--memorization-in-rnns,代码行数:26,代码来源:generate.py

示例2: shuffled_analysis

# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def shuffled_analysis(iterations: int, meta: pd.DataFrame, counts: pd.DataFrame, interactions: pd.DataFrame,
                      cluster_interactions: list, base_result: pd.DataFrame, threads: int, separator: str,
                      suffixes: tuple = ('_1', '_2'), counts_data: str = 'ensembl') -> list:
    """
    Shuffles meta and calculates the means for each and saves it in a list.

    Runs it in a multiple threads to run it faster
    """
    core_logger.info('Running Statistical Analysis')
    with Pool(processes=threads) as pool:
        statistical_analysis_thread = partial(_statistical_analysis,
                                              base_result,
                                              cluster_interactions,
                                              counts,
                                              interactions,
                                              meta,
                                              separator,
                                              suffixes,
                                              counts_data=counts_data
                                              )
        results = pool.map(statistical_analysis_thread, range(iterations))

    return results 
开发者ID:Teichlab,项目名称:cellphonedb,代码行数:25,代码来源:cpdb_statistical_analysis_helper.py

示例3: fill_queue

# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def fill_queue(self):
        if self.results is None:
            self.results = queue.deque(maxlen=self.max_queue)
        if self.num_workers > 0:
            if self.pool is None:
                self.pool = Pool(processes=self.num_workers)

        while len(self.results) < self.max_queue:
            if self.distinct_levels is not None and self.idx >= self.distinct_levels:
                break
            elif not self.repeat_levels and self.idx >= len(self.file_data):
                break
            else:
                data = self.get_next_parameters()
                if data is None:
                    break
            self.idx += 1
            kwargs = {'seed': self._seed.spawn(1)[0]}
            if self.num_workers > 0:
                result = self.pool.apply_async(_game_from_data, data, kwargs)
            else:
                result = _game_from_data(*data, **kwargs)
            self.results.append((data, result)) 
开发者ID:PartnershipOnAI,项目名称:safelife,代码行数:25,代码来源:level_iterator.py

示例4: __init__

# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def __init__(self, configer=None, num_classes=None, boundary_threshold=0.00088, num_proc=15):

        assert configer is not None or num_classes is not None
        self.configer = configer

        if configer is not None:
            self.n_classes = self.configer.get('data', 'num_classes')
        else:
            self.n_classes = num_classes

        self.ignore_index = -1
        self.boundary_threshold = boundary_threshold
        self.pool = Pool(processes=num_proc)
        self.num_proc = num_proc

        self._Fpc = 0
        self._Fc = 0
        self.seg_map_cache = []
        self.gt_map_cache = [] 
开发者ID:openseg-group,项目名称:openseg.pytorch,代码行数:21,代码来源:F1_running_score.py

示例5: create_features_from_path

# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def create_features_from_path(self, train_path: str, test_path: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
        column_pairs = self.get_column_pairs()

        col1s = []
        col2s = []
        latent_vectors = []
        gc.collect()
        with Pool(4) as p:
            for col1, col2, latent_vector in p.map(
                    partial(self.compute_latent_vectors, train_path=train_path, test_path=test_path), column_pairs):
                col1s.append(col1)
                col2s.append(col2)
                latent_vectors.append(latent_vector.astype(np.float32))
        gc.collect()
        return self.get_feature(train_path, col1s, col2s, latent_vectors), \
               self.get_feature(test_path, col1s, col2s, latent_vectors) 
开发者ID:flowlight0,项目名称:talkingdata-adtracking-fraud-detection,代码行数:18,代码来源:category_vector.py

示例6: run

# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def run(self):
        import gevent
        from gevent import monkey
        monkey.patch_all()
        from gevent import pool
        # default 200
        # g_pool = pool.Pool(200)
        g_pool = pool.Pool(self.coroutine)
        tasks = [g_pool.spawn(self.gen_traffic, url) for url in self.url_list]
        gevent.joinall(tasks)
        traffic_list = []
        for i in tasks:
            if i.value is not None:
                traffic_list.append(i.value)
        # save traffic for rescan
        Engine.save_traffic(traffic_list, self.id) 
开发者ID:lwzSoviet,项目名称:NoXss,代码行数:18,代码来源:engine.py

示例7: verify_async

# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def verify_async(case_list,coroutine):
        """
        Verify used gevent lib
        :param case_list:
        :param coroutine:
        :return:
        """
        from gevent import monkey
        monkey.patch_all()
        result = []
        geventPool = pool.Pool(coroutine)
        tasks = [geventPool.spawn(Verify.request_and_verify, case) for case in case_list]
        gevent.joinall(tasks)
        for i in tasks:
            if i.value is not None:
                result.append(i.value)
        print_info('Total Verify-Case is: %s, %s error happened.' % (len(case_list), Verify.ERROR_COUNT))
        return result 
开发者ID:lwzSoviet,项目名称:NoXss,代码行数:20,代码来源:engine.py

示例8: deduplicate

# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def deduplicate(self, url_list):
        print 'Start to deduplicate for all urls.'
        filtered_path = self.file + '.filtered'
        if os.path.exists(filtered_path):
            print '%s has been filtered as %s.' % (self.file, filtered_path)
            with open(filtered_path)as f:
                filtered = f.read().split('\n')
                return filtered
        filtered = []
        # result = map(filter, url_list)
        from multiprocessing import cpu_count
        from multiprocessing.pool import Pool
        p=Pool(cpu_count())
        result=p.map(url_filter,url_list)
        for i in result:
            if isinstance(i, str):
                filtered.append(i)
        with open(filtered_path, 'w') as f:
            f.write('\n'.join(filtered))
        print 'Saved filtered urls to %s.' % filtered_path
        return filtered 
开发者ID:lwzSoviet,项目名称:NoXss,代码行数:23,代码来源:engine.py

示例9: _schedule_runs_lk

# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def _schedule_runs_lk(self, et_pool, job):
        """ Schedule runs to execute up to max possible parallelism
        suffix '_lk' means caller must already hold lock.

        :param et_pool: A multiprocessor pool handle
        :type: Pool
        :param job: current job
        :type: WorkerJob
        """
        while (self._has_more_runs_to_schedule(job) and
               job.runs_in_flight < job.max_runs_in_flight):
            run = job.schedule_next_run()
            if run.id is None:
                raise ValueError("Unexpected end of runs")

            self.etl_helper.etl_step_started(job.msg_dict, run.id, run.step)

            log('scheduled: {0}'.format(run.id))
            et_pool.apply_async(
                run.func,
                args=run.args,
                callback=self._create_run_complete_callback(job, run.id, run.step),
            )
            job.runs_in_flight += 1 
开发者ID:Yelp,项目名称:mycroft,代码行数:26,代码来源:base_worker.py

示例10: GenerateMode

# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def GenerateMode(corpus, context_token_limit):
  for dataset in datasets:
    print 'Generating questions for the %s set:' % dataset

    urls_filename = '%s/wayback_%s_urls.txt' % (corpus, dataset)
    urls = ReadUrls(urls_filename)

    p = Pool()
    question_context_lists = p.imap_unordered(
        GenerateMapper, izip(urls, repeat(corpus), repeat(context_token_limit)))

    progress_bar = ProgressBar(len(urls))
    for question_context_list in question_context_lists:
      if question_context_list:
        for question_context in question_context_list:
          WriteQuestionContext(question_context, corpus, dataset)

      progress_bar.Increment() 
开发者ID:deepmind,项目名称:rc-data,代码行数:20,代码来源:generate_questions.py

示例11: fit

# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def fit(self, X, y=None):
        """Fit all transformers using X.

        Parameters
        ----------
        X : iterable or array-like, depending on transformers
            Input data, used to fit transformers.

        y : array-like, shape (n_samples, ...), optional
            Targets for supervised learning.

        Returns
        -------
        self : FeatureUnion
            This estimator
        """
        self.transformer_list = list(self.transformer_list)
        self._validate_transformers()
        with Pool(self.n_jobs) as pool:
            transformers = pool.starmap(_fit_one_transformer,
                                        ((trans, X[trans.steps[0][1].columns], y) for _, trans, _ in self._iter()))
        self._update_transformer_list(transformers)
        return self 
开发者ID:pjankiewicz,项目名称:mercari-solution,代码行数:25,代码来源:feature_union.py

示例12: transform

# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def transform(self, X):
        """Transform X separately by each transformer, concatenate results.

        Parameters
        ----------
        X : iterable or array-like, depending on transformers
            Input data to be transformed.

        Returns
        -------
        X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
            hstack of results of transformers. sum_n_components is the
            sum of n_components (output dimension) over transformers.
        """
        with Pool(self.n_jobs) as pool:
            Xs = pool.starmap(_transform_one, ((trans, weight, X[trans.steps[0][1].columns])
                                               for name, trans, weight in self._iter()))
        if not Xs:
            # All transformers are None
            return np.zeros((X.shape[0], 0))
        if any(sparse.issparse(f) for f in Xs):
            Xs = sparse.hstack(Xs).tocsr()
        else:
            Xs = np.hstack(Xs)
        return Xs 
开发者ID:pjankiewicz,项目名称:mercari-solution,代码行数:27,代码来源:feature_union.py

示例13: calculate_cell_score_selim

# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def calculate_cell_score_selim(y_true, y_pred, num_threads=32, ids=None):
    yps = []
    for m in range(len(y_true)):
        yps.append((y_true[m].copy(), y_pred[m].copy()))
    pool = Pool(num_threads)
    results = pool.map(calculate_jaccard, yps)
    if ids:
        import pandas as pd
        s_iou = np.argsort(results)
        d = []
        for i in range(len(s_iou)):
            id = ids[s_iou[i]]
            res = results[s_iou[i]]
            d.append([id, res])
            pd.DataFrame(d, columns=["ID", "METRIC_SCORE"]).to_csv("gt_vs_oof.csv", index=False)

    return np.array(results).mean() 
开发者ID:selimsef,项目名称:dsb2018_topcoders,代码行数:19,代码来源:metric.py

示例14: process_specification_directory

# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def process_specification_directory(glob_pattern, outfile_name, namespace, write_baseclass=True,):
    with open(os.path.join(options.out_path, outfile_name), 'w+') as out_file:
        paths = [p for p in glob.glob(os.path.join(options.spec_path, glob_pattern))]
        classes = list()

        func = functools.partial(process_file, namespace)
        with Pool() as pool:
            classes.extend(pool.map(func, paths))
        print("Formatting...")
        formatted_code = FormatCode("\n".join(sorted(classes)))[0]
        if write_baseclass:
            header = BASE_CLASS
        else:
            header = "from zenpy.lib.api_objects import BaseObject\nimport dateutil.parser"

        out_file.write("\n\n\n".join((header, formatted_code))) 
开发者ID:facetoe,项目名称:zenpy,代码行数:18,代码来源:gen_classes.py

示例15: main

# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def main():
    # Parameters
    process_num = 24
    image_size = (512, 512)
    url = 'http://v18.proteinatlas.org/images/'
    csv_path =  "data/HPAv18RBGY_wodpl.csv"
    save_dir = "data/raw/external"

    os.makedirs(save_dir, exist_ok=True)

    print('Parent process %s.' % os.getpid())
    img_list = list(pd.read_csv(csv_path)['Id'])
    img_splits = np.array_split(img_list, process_num)
    assert sum([len(v) for v in img_splits]) == len(img_list)
    p = Pool(process_num)
    for i, split in enumerate(img_splits):
        p.apply_async(
            download, args=(str(i), list(split), url, save_dir, image_size)
        )
    print('Waiting for all subprocesses done...')
    p.close()
    p.join()
    print('All subprocesses done.') 
开发者ID:pudae,项目名称:kaggle-hpa,代码行数:25,代码来源:download.py


注:本文中的multiprocessing.pool.Pool方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。