当前位置: 首页>>代码示例>>Python>>正文


Python Pool.get方法代码示例

本文整理汇总了Python中multiprocessing.Pool.get方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.get方法的具体用法?Python Pool.get怎么用?Python Pool.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.Pool的用法示例。


在下文中一共展示了Pool.get方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import get [as 别名]
def main():
    ''' Main program starts here '''
    global opub, odown, orep, info
# somefile is false starting turns to true if at elast one file found
    somefile=False
# read inputs and assign constraints
    assign_constraint()
    fdown = outfile + '_to_download.csv'
    frep = outfile + '_replica.csv'
    fpub = outfile + '_not_published.csv'
# test reading inputs
    print var0
    print exp0
    print mod0
    print fdown
    print frep
    print fpub
# if one of the output files exists issue a warning an exit
    if opath.isfile(fdown) or opath.isfile(frep) or opath.isfile(fpub):
       print "Warning: one of the output files exists, exit to not overwrite!"
       sys.exit() 
    info={}
# loop through experiments, 1st create a wget request for exp, then parse_file 
    for exp in exp0:
        wgetfile = "wget_" + exp + ".out"
        result=parse_file(wgetfile,var0,mod0,exp)
# if found any files matching constraints, process them one by one
# using multiprocessing Pool to parallelise process_file 
        if result:
           async_results = Pool(1).map_async(process_file, result)
           for dinfo in async_results.get():
               info.update(dinfo)
           somefile=True
        print "Finished checksum for existing files" 
# if it couldn't find any file for any experiment then exit
    if not somefile: 
     sys.exit("No files found for any of the experiments, exiting!") 
# open not published file
    opub=open(fpub, "w")
    opub.write("var_mip-table, model, experiment\n")
# build all requested combinations and compare to files found
    nopub_set = compare_query(var0,mod0,exp0)
# write replica and download output files
# open output files and write header
    odown=open(fdown, "w")
    odown.write("var, mip_table, model, experiment, ensemble, version, file url\n")
    orep=open(frep, "w")
    orep.write("var, mip_table, model, experiment, ensemble, version, filepath\n")
    write_file()
# close all the output files
    odown.close()
    orep.close()
    opub.close()
    print "Finished to write output files" 
# if table option create/open spreadsheet
# if table option write summary table in csv file
    if table: 
       write_table(nopub_set)
开发者ID:coecms,项目名称:CMIP5-utils,代码行数:60,代码来源:fetch_step2.py

示例2: test1

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import get [as 别名]
def test1():
    t= time.time()
    p = Pool(4)
    kk = 10
    results = []
    for x in range(1,7):
        results.append(p.apply_async(f, args=(x,kk)))
    output = [p.get() for p in results]
    print output
    print time.time() - t
开发者ID:Imoteph,项目名称:finishingTool,代码行数:12,代码来源:debugging.py

示例3: fit

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import get [as 别名]
 def fit(self, data_indices=None):
     """Uses .fit() method on each model
     operates on models in parallel"""
     p = Pool(self.processes)
     p.map_async(
         lambda x, kwargs: x.fit(self.df[self.vars_of_interest], df[[self.y]], **kwargs),
         zip(self.models, self.fit_kwarg_dicts),
     )
     out = p.get()
     self.fitted = True
     p.close()
     return out
开发者ID:dingocuster,项目名称:edaHelper,代码行数:14,代码来源:edaHelper.py

示例4: main

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import get [as 别名]
def main():
    args = get_args()

    start = time()
    with open(args.output, 'w') as f:
        wids = [line.strip() for line in
                open(args.input).readlines()[:args.number]]
        mapped = Pool(processes=8).map_async(identify_worker, wids)
        mapped.wait()
        print >> f, '\n'.join([x.encode('utf-8') for x in mapped.get()])
    end = time()
    total = end - start
    print '%d seconds elapsed' % total
开发者ID:Wikia,项目名称:identify_wiki,代码行数:15,代码来源:__main__.py

示例5: to_adops_xls

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import get [as 别名]
def to_adops_xls(args, wid_to_topics):
    my_workbook = xlwt.Workbook()
    ids_worksheet = my_workbook.add_sheet("Wikis to Topics")
    ids_worksheet.write(0, 0, 'Wiki')
    ids_worksheet.write(0, 1, 'URL')
    ids_worksheet.write(0, 2, 'Topic')
    ids_worksheet.write(0, 3, 'Rank')

    ids = wid_to_topics.keys()
    r = Pool(processes=16).map_async(wiki_data_for_ids, [ids[i:i+20] for i in range(0, len(ids), 20)])
    wiki_data = {}
    map(wiki_data.update, r.get())

    row = 1
    for wid, topics in wid_to_topics.items():
        top_five = sorted(wid_to_topics[wid].keys(), key=lambda x: wid_to_topics[wid][x], reverse=True)[:5]
        for counter, topic in enumerate(top_five):
            ids_worksheet.write(row, 0, wid)
            ids_worksheet.write(row, 1, wiki_data.get(wid, {}).get('url', '?'))
            ids_worksheet.write(row, 2, int(topic)+1)
            ids_worksheet.write(row, 3, counter)
            row += 1

    urls_worksheet = my_workbook.add_sheet("Topic Data")
    urls_worksheet.write(0, 0, 'Topic')
    urls_worksheet.write(0, 1, 'Phrase')
    urls_worksheet.write(0, 2, 'Weight')
    urls_worksheet.write(0, 3, 'Rank')

    row = 1
    for topic, line in enumerate(args.features_file):
        words = line.decode('utf8').split(u' + ')
        for rank, word_data in enumerate(words):
            weight, word = word_data.split('*')
            urls_worksheet.write(row, 0, topic+1)
            urls_worksheet.write(row, 1, word)
            urls_worksheet.write(row, 2, weight)
            urls_worksheet.write(row, 3, rank+1)
            row += 1

    my_workbook.save(args.topics_file.name.replace('.csv', '-adops-report.xls'))
    print args.topics_file.name.replace('.csv', '-adops-report.xls')
开发者ID:Wikia,项目名称:wiki-recommender,代码行数:44,代码来源:transform_topics_csv.py

示例6: locals

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import get [as 别名]
# for more info look at pyesgf module documentation
    esgfargs=constraints
    if 'mip' in constraints.keys():
        esgfargs['cmor_table']=esgfargs.pop('mip')
    if 'exp0' in locals():
        esgfargs['query']=exp0+"%"
    esgfargs['replica']=False
    esgf.search_node(**esgfargs)
    print("Found ",esgf.ds_count(),"simulations for constraints")
# loop returned DatasetResult objects
# using multiprocessing Pool to parallelise process_file
# using 8 here as it is the number ov VCPU on VDI
    if esgf.ds_count()>=1:
        results=esgf.get_ds()
        async_results = Pool(1).map_async(retrieve_ds, results)
        for ds_info in async_results.get():
            esgf_results.append(ds_info)

# append to results list of version dictionaries containing useful info 
# NB search should return only one latest, not replica version if any
        
# compare local to remote info
    print("Finished to retrieve remote data")
    if esgf_results==[]:
        if db_results!=[]:
            print("Found local version but none is currently available on ESGF nodes for constraints:\n",constraints)
        else: 
            print("Nothing currently available on ESGF nodes and no local version exists for constraints:\n",constraints)
    else:
        print(esgf.ds_count(),"instances were found on ESGF and ",outputs.count()," on the local database")
        if sys.version_info < ( 3, 0 ):
开发者ID:coecms,项目名称:ARCCSSive,代码行数:33,代码来源:compare_ESGF.py

示例7: Pool

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import get [as 别名]
    pr.start()
    ## 等待pr结束:
    pr.join()
    print
    print "all data write and read done"
    print
    print "Process Pool example...\n"
    ## 注意:队列对象不能在父进程与子进程间通信,如果想要使用进程池中使用队列则要使用multiprocess的Manager类,如下:
    manager = multiprocessing.Manager()
    # 父进程创建Queue,并传给各个子进程:
    q = manager.Queue()
    lock = manager.Lock() ## 创建队列锁,保证同一时间,只有一个进程在对队列进行操作
    p = Pool(processes=5) ## 保证每次只能5个进程在同时运行
    p_list = []
    # 同时开20个进程
    for i in range(20):
        pw = p.apply_async(write,args=(i,q,lock))
        p_list.append(pw)
    for p in p_list:
        p.get()

    p = Pool()
    time.sleep(0.5)
    pr = p.apply_async(read,args=(q,))
    p.close() # 调用get()之前必须先调用close(),调用close()之后就不能继续添加新的Process了
    p.join()


    print
    print 'all data write and read done'
开发者ID:ohgenlong,项目名称:python_trainning,代码行数:32,代码来源:multi_process_queue.py

示例8: ListDocIdsService

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import get [as 别名]
    url = details.get('url')
    lang = details.get('lang')
    #print url
    #doc_ids = ListDocIdsService().get_value(wid)
    doc_ids = map(lambda x: x.split('_')[1],
                  filter(lambda y: '_' in y,
                         #ListDocIdsService().get_value(wid)))[:100]
                         ListDocIdsService().get_value(wid)))
    #pprint(doc_ids); sys.exit(0)
    #for n in range(0, len(doc_ids), step):
    ##for n in range(0, 20, step):
    #    print 'n = %d' % n
    #    doc_ids_subset = doc_ids[n:n+step]
    r = Pool(processes=8).map_async(get_fields, chunks(doc_ids, step))
    r.wait()
    pprint(r.get())
    print '*'*80
    #for k in r.get():  # DEBUG
    #    print k
    fields = []
    m = map(lambda x: fields.extend(x), r.get())
    #pprint(fields)
    indexed = dict(fields)

pprint(indexed)  # DEBUG

#for doc_id in doc_ids_to_heads:
#    entity_response = doc_ids_to_entities.get(
#        doc_id, {'titles': [], 'redirects': {}})
#    doc_ids_combined[doc_id] = map(preprocess,
#                                   indexed.get(doc_id, []) +
开发者ID:tristaneuan,项目名称:vetdata,代码行数:33,代码来源:test_indexer.py


注:本文中的multiprocessing.Pool.get方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。