当前位置: 首页>>代码示例>>Python>>正文


Python Pool.imap方法代码示例

本文整理汇总了Python中multiprocessing.pool.Pool.imap方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.imap方法的具体用法?Python Pool.imap怎么用?Python Pool.imap使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.pool.Pool的用法示例。


在下文中一共展示了Pool.imap方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: MapParallel

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import imap [as 别名]
class MapParallel(PipelineBlock):
    def __init__(self, function, n_processes=None):
        self.function = _MapFunctionClosure(function)
        self.pool = Pool(processes=n_processes)

    def run(self, input_data):
        return self.pool.imap(self.function, input_data, chunksize=1)
开发者ID:lmc2179,项目名称:chevrons,代码行数:9,代码来源:pipeline_parallel.py

示例2: _itergroundings

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import imap [as 别名]
 def _itergroundings(self, simplify=False, unsatfailure=False):
     global global_bpll_grounding
     global_bpll_grounding = self
     if self.multicore:
         pool = Pool(maxtasksperchild=1)
         try:
             for gndresult in pool.imap(with_tracing(create_formula_groundings), self.formulas):
                 for fidx, stat in gndresult:
                     for (varidx, validx, val) in stat: 
                         self._varidx2fidx[varidx].add(fidx)
                         self._addstat(fidx, varidx, validx, val)
                     checkmem()
                 yield None
         except CtrlCException as e:
             pool.terminate()
             raise e
         pool.close()
         pool.join()
     else:
         for gndresult in imap(create_formula_groundings, self.formulas):
             for fidx, stat in gndresult:
                 for (varidx, validx, val) in stat: 
                     self._varidx2fidx[varidx].add(fidx)
                     self._addstat(fidx, varidx, validx, val)
             yield None
开发者ID:Bovril,项目名称:pracmln,代码行数:27,代码来源:bpll.py

示例3: _itergroundings

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import imap [as 别名]
 def _itergroundings(self, simplify=True, unsatfailure=True):
     # generate all groundings
     if not self.formulas:
         return
     global global_fastConjGrounding
     global_fastConjGrounding = self
     batches = list(rndbatches(self.formulas, 20))
     batchsizes = [len(b) for b in batches]
     if self.verbose:
         bar = ProgressBar(width=100, steps=sum(batchsizes), color='green')
         i = 0
     if self.multicore:
         pool = Pool()
         try:
             for gfs in pool.imap(with_tracing(create_formula_groundings), batches):
                 if self.verbose:
                     bar.inc(batchsizes[i])
                     bar.label(str(cumsum(batchsizes, i + 1)))
                     i += 1
                 for gf in gfs: yield gf
         except Exception as e:
             logger.error('Error in child process. Terminating pool...')
             pool.close()
             raise e
         finally:
             pool.terminate()
             pool.join()
     else:
         for gfs in imap(create_formula_groundings, batches):
             if self.verbose:
                 bar.inc(batchsizes[i])
                 bar.label(str(cumsum(batchsizes, i + 1)))
                 i += 1
             for gf in gfs: yield gf
开发者ID:danielnyga,项目名称:pracmln,代码行数:36,代码来源:fastconj.py

示例4: FilterParallel

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import imap [as 别名]
class FilterParallel(PipelineBlock):
    def __init__(self, function, n_process=None):
        self.function = self._construct_filter_function(function)
        self.pool = Pool(processes=n_process)

    def _construct_filter_function(self, function):
        return _FilterFunctionClosure(function)

    def run(self, input_data):
        return self.pool.imap(self.function, input_data, chunksize=1)
开发者ID:lmc2179,项目名称:chevrons,代码行数:12,代码来源:pipeline_parallel.py

示例5: crawl

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import imap [as 别名]
def crawl():
    pool = Pool(cpu_count() - 2)
    image_list, num_images = load_image_list(args.list_file)
    print 'Loaded {} images'.format(num_images)
    cleaned_image_list, cleaned_num_images = clean_image_list(image_list)
    print '{} images to crawl'.format(cleaned_num_images)
    pbar = get_progress_bar(cleaned_num_images)

    for i, _ in enumerate(pool.imap(crawl_job, cleaned_image_list), 1):
        pbar.update(i)
    pbar.finish()
    Image.save_image_list(image_list, args.image_cache)
    Landmark.save_all(args.landmark_cache)
    logging.info('All done')
开发者ID:Dectinc,项目名称:deep_vlad,代码行数:16,代码来源:crawl_landmark.py

示例6: main

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import imap [as 别名]
def main():
    print('Starting.')
    args = parse_args()
    pool = Pool()
    runs = find_runs(args.source_folder, args.target_folder)
    runs = report_source_versions(runs)
    samples = read_samples(runs)
    # noinspection PyTypeChecker
    results = pool.imap(partial(compare_sample,
                                scenarios_reported=Scenarios.OTHER_CONSENSUS_CHANGED),
                        samples,
                        chunksize=50)
    scenario_summaries = defaultdict(list)
    i = None
    all_consensus_distances = []
    report_count = 0
    for i, (report, scenarios, consensus_distances) in enumerate(results):
        if report:
            report_count += 1
            if report_count > 100:
                break
        print(report, end='')
        all_consensus_distances.extend(consensus_distances)
        for key, messages in scenarios.items():
            scenario_summaries[key] += scenarios[key]
    for key, messages in sorted(scenario_summaries.items()):
        if messages:
            sample_names = {message.split()[0] for message in messages}
            summary = [key, len(messages), 'changes']
            body = ''.join(messages).rstrip('.')
            if body:
                summary.extend(['in', len(sample_names), 'samples'])
            print(*summary, end='.\n')
            print(body, end='')

    distance_data = pd.DataFrame(all_consensus_distances)
    non_zero_distances = distance_data[distance_data['distance'] != 0]
    region_names = sorted(non_zero_distances['region'].unique())
    names_iter = iter(region_names)
    for page_num, region_group in enumerate(zip_longest(names_iter, names_iter, names_iter), 1):
        group_distances = distance_data[distance_data['region'].isin(region_group)]
        plot_distances(group_distances,
                       'consensus_distances_{}.svg'.format(page_num),
                       'Consensus Distances Between Previous and v' + MICALL_VERSION)
        plot_distances(group_distances,
                       'consensus_diffs_{}.svg'.format(page_num),
                       'Consensus Differences Between Previous and v' + MICALL_VERSION,
                       'pct_diff')
    print('Finished {} samples.'.format(i))
开发者ID:cfe-lab,项目名称:MiCall,代码行数:51,代码来源:release_test_compare.py

示例7: FoldParallel

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import imap [as 别名]
class FoldParallel(PipelineBlock):
    def __init__(self, function, n_process=None):
        self.function = function
        self.pool = Pool(processes=n_process)

    def _construct_fold_function(self, function):
        return _FoldFunctionClosure(function)

    def run(self, input_data):
        batch_function = self._construct_fold_function(self.function)
        return self._fold_stream(self.pool.imap(batch_function, input_data, chunksize=1))

    def _fold_stream(self, input_data):
        input_iter = iter(input_data)
        x = next(input_iter)
        for element in input_iter:
            x = self.function(x, element)
        return x
开发者ID:lmc2179,项目名称:chevrons,代码行数:20,代码来源:pipeline_parallel.py

示例8: _train_base

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import imap [as 别名]
    def _train_base(self, compute_vector, entity_word_seqs):
        pool = Pool()

        entities = {}
        vectors = []

        def idx_seqs():
            for idx, (entity, seq) in enumerate(entity_word_seqs):
                entities[entity] = idx
                yield seq

        for vec in pool.imap(compute_vector, idx_seqs()):
            vectors.append(vec)

            if len(vectors) % 1000 == 0:
                logging.info("Computed %d vectors", len(vectors))

        self.entities = entities
        self.vectors = np.asarray(vectors)
开发者ID:nooralahzadeh,项目名称:entity2vec,代码行数:21,代码来源:entity_models.py

示例9: raw_line_map

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import imap [as 别名]
def raw_line_map(
    filename, line_length, func, start=0, stop=-1, threads=1, pass_teletext=True, pass_rejects=False, show_speed=True
):

    if show_speed:
        s = SpeedMonitor()

    if threads > 0:
        p = Pool(threads)
        map_func = lambda x, y: p.imap(x, y, chunksize=1000)
    else:
        map_func = itertools.imap

    for l in map_func(func, raw_line_reader(filename, line_length, start, stop)):
        if show_speed:
            s.tally(l.is_teletext)
        if l.is_teletext:
            if pass_teletext:
                yield l
        else:
            if pass_rejects:
                yield l
开发者ID:ali1234,项目名称:vhs-teletext,代码行数:24,代码来源:map.py

示例10: int

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import imap [as 别名]
    try:
        first = int(sys.argv[2], 10)
        count = int(sys.argv[3], 10)
        skip = int(sys.argv[4], 10)
    except:
        first = 0
        count = 10000000
        skip = 1


    if not os.path.isdir(path+'/t42/'):
        os.makedirs(path+'/t42/')


    if 1:
        p = Pool(multiprocessing.cpu_count())
        it = p.imap(process_file, list_files(path+'/vbi/', path+'/t42/', first, count, skip), chunksize=1)
        for i in it:
            pass

    else: # single thread mode for debugging
        def doit():
            map(process_file, list_files(path+'/vbi/', path+'/t42/', first, count, skip))
        cProfile.run('doit()', 'myprofile')
        p = pstats.Stats('myprofile')
        p.sort_stats('cumulative').print_stats(50)



开发者ID:neoKushan,项目名称:vhs-teletext,代码行数:28,代码来源:vbi.py


注:本文中的multiprocessing.pool.Pool.imap方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。