当前位置: 首页>>代码示例>>Python>>正文


Python Pool.close方法代码示例

本文整理汇总了Python中multiprocessing.Pool.close方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.close方法的具体用法?Python Pool.close怎么用?Python Pool.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.Pool的用法示例。


在下文中一共展示了Pool.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: enumerate_all_subgraphs_upto_size_k_parallel

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import close [as 别名]
def enumerate_all_subgraphs_upto_size_k_parallel(document_graph, k, num_of_workers=4):
    """
    returns all subgraphs of a DiscourseDocumentGraph (i.e. a MultiDiGraph)
    with up to k nodes. This is a trivially parallelized version of
    enumerate_all_subgraphs_upto_size_k()
    """
    document_nodes = len(document_graph)
    if k > document_nodes:
        k = document_nodes

    int_graph = nx.convert_node_labels_to_integers(nx.DiGraph(document_graph),
                                                   first_label=1,
                                                   label_attribute='node_id')

    pool = Pool(processes=num_of_workers) # number of CPUs
    results = [pool.apply_async(enumerate_all_size_k_subgraphs, args=(int_graph, i))
                for i in xrange(1, k+1)]
    pool.close()
    pool.join()

    subgraphs = []
    for result in results:
        tmp_result = result.get()
        if isinstance(tmp_result, list):
            subgraphs.extend(tmp_result)
        else:
            subgraphs.append(tmp_result)
    return subgraphs
开发者ID:arne-cl,项目名称:discoursekernels,代码行数:30,代码来源:subgraph_enumeration.py

示例2: __decrypt_file

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import close [as 别名]
  def __decrypt_file(self, private_d, public_n, keys, path_to_file, CRT, k):
    if CRT:
      pool = Pool(processes = k)
      promises = []
    decrpted_data = ''
    with open(path_to_file, 'r') as f:
      encrypted_data = f.read()
      encrypted_data_chunks = list(map(''.join, zip(*[iter(encrypted_data)]*len(str(public_n)))))
      for i in range(len(encrypted_data_chunks)):
        stripped = encrypted_data_chunks[i].lstrip('0')
        if CRT:
          promise = pool.apply_async(self.compute_part_of_message, args=(stripped, keys, i))
          promises.append(promise)
        else:
          decrpted_data += chr(self.__decrypt_message(stripped, private_d, public_n))
    if CRT:
      results = [promise.get() for promise in promises]
      decrypted_sorted = sorted(results, key = lambda x: x[1])
      for data in decrypted_sorted:
        decrpted_data += chr(data[0])

    if CRT:
      pool.close()
    with open(path_to_file + '.dec', 'w') as f:
      f.write(decrpted_data)
    return decrpted_data
开发者ID:Bouncer00,项目名称:Cryptography,代码行数:28,代码来源:decryptor.py

示例3: crawl_recursive_threaded

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import close [as 别名]
def crawl_recursive_threaded(dirpath, ext):
    from database import indexer
    from database import utils
    from multiprocessing import Pool

    # convert to our infos
    cdir = indexer.DirInfo(dirpath, ext)
    cInfos = indexer.dirs_to_info(cdir.subfolders(), ext)

    # comment if you want a silent indexing
    print(cdir.to_string())

    # recursive pooled call
    # NOTE: child calls must not be pooled
    p = Pool(utils.Settings.config['processes'])
    infos = p.map(crawl_recursive, cInfos)
    p.close()

    # remove hierarchy
    dirInfos = [d for sublist in infos for d in sublist]
    dirInfos.append(cdir)

    print('I was crawling with %d processes' %
          utils.Settings.config['processes'])

    return dirInfos
开发者ID:TUWien,项目名称:Benchmarking,代码行数:28,代码来源:crawler.py

示例4: expand_all_commits

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import close [as 别名]
def expand_all_commits(code_dir, target_dir, only_year=None):
  print code_dir
  uname_lookup_by_year_q = load_uname_lookup_by_year_q()
  latest_submissions = get_latest_submissions(code_dir)
  num_students = len(latest_submissions)

  def get_commit_args(args):
    i, student = args
    latest_submit = latest_submissions[student]
    student_dir = os.path.join(code_dir, latest_submit)
    year_q = get_submit_time(student_dir) 
    if (not year_q) or only_year != year_q: return (-1,'','',-1,'',-1)
    year_target_dir = os.path.join(target_dir, year_q)
    if year_q not in uname_lookup_by_year_q or \
          latest_submit not in uname_lookup_by_year_q[year_q]:
        add_uname_to_lookup(latest_submit, year_q, uname_lookup_by_year_q)
    student_id = uname_lookup_by_year_q[year_q][latest_submit]
    #if student_id != '2012010247': return (-1,'','',-1,'',-1)
    return i, student, student_dir, student_id, year_target_dir, num_students

  students = sorted(latest_submissions.keys())
  zipped_args = map(get_commit_args, enumerate(students))
  non_students = [student for i, student in enumerate(students) if zipped_args[i][0] == -1]
  #print "unsuccessful"
  #print '\n'.join([latest_submissions[student] for student in non_students])
  pool = ThreadPool(8)
  results = pool.map(thread_process_commit, zipped_args)
  pool.close()
  pool.join()
  export_uname_lookup_by_year_q(uname_lookup_by_year_q)
开发者ID:chrispiech,项目名称:SocialTrajectory,代码行数:32,代码来源:git_tool.py

示例5: build_from_queries

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import close [as 别名]
def build_from_queries(queries):
    p = Pool(5)
    query_results = p.map(q_exec, queries)
    p.close()
    p.join()
    #process the query_results
    return query_results
开发者ID:mrkaiser,项目名称:MovieDatasetBuilder,代码行数:9,代码来源:BaseMovieBuilder.py

示例6: fetch_imagery

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import close [as 别名]
def fetch_imagery(image_locations, local_dir):
    pool = Pool(cpu_count())
    tupled = [(loc[0], loc[1], local_dir) for loc in image_locations]
    try:
        pool.map(fetch_imagery_uncurried, tupled)
    finally:
        pool.close()
开发者ID:azavea,项目名称:raster-foundry,代码行数:9,代码来源:cog.py

示例7: calcRawScores

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import close [as 别名]
def calcRawScores(fastaFilePath,numThreads,geneNames,gapOpen, gapExtend, matrix, scoresO):
    '''Get a global alignment based raw score for every edge in scoresO.'''

    # load sequences
    protFnL=glob.glob(fastaFilePath)
    seqD=genomes.loadProt(protFnL)
                
    # make list of sets of arguments to be passed to p.map. There
    # should be numThreads sets.
    argumentL = [([],seqD,gapOpen, gapExtend, matrix) for i in range(numThreads)]

    i=0
    for g1,g2 in scoresO.iterateEdgesByEndNodes():
        edgeNum = scoresO.endNodesToEdge(g1,g2)
        edgeT = edgeNum,geneNames.numToName(g1),geneNames.numToName(g2)
        argumentL[i%numThreads][0].append(edgeT)
        i+=1
        
    # run
    p=Pool(numThreads)
    scoresLL = p.map(rawScoreGroup, argumentL)
    p.close()
    p.join()

    
    # store in scoresO
    for scoresL in scoresLL:
        for edgeNum,sc in scoresL:
            scoresO.addScoreByEdge(edgeNum,sc,'rawSc')

    return scoresO
开发者ID:ksl0,项目名称:xenoGI,代码行数:33,代码来源:scores.py

示例8: get

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import close [as 别名]
   def get(self, tag="貓咪", max_tag_id=None):
      if tag == "":
         tag = "貓咪"
      p = Pool(10)

      if self.prefix == "ajax":
         medias, next_ = util.search_by_tag(tag, 3, max_tag_id)
      else:
         medias, next_ = util.search_by_tag(tag, 5, max_tag_id)

      fs = p.map(util.features, medias)
      p_label, _, _ = libsvm.svm_predict([1] * len(fs), fs, model)
      for (m, f) in zip(medias, fs):
         print(m["caption"]["text"])
         print(f)
      if self.prefix == "ajax":
         medias = map(lambda (m, l): Media(m, l).__dict__, zip(medias, p_label))
         self.write(json.dumps({
            "max_tag_id": next_,
            "medias": medias
         }))
      else:
         medias = map(lambda (m, l): Media(m, l), zip(medias, p_label))
         if self.prefix == "demo1":
            self.render("demo1.html", medias=medias, tag_name=tag, max_tag_id=next_)
         elif self.prefix == "demo2":
            self.render("demo2.html", medias=medias, tag_name=tag, max_tag_id=next_)
         else:
            self.render("main.html", medias=medias, tag_name=tag, max_tag_id=next_)

      p.close()
      p.join()
开发者ID:carolinetychen,项目名称:AI_Project,代码行数:34,代码来源:server.py

示例9: _run

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import close [as 别名]
    def _run(self, source, destination_format, clear_source=False, workers=-1):
        """
        parallel version of the `convert` method
        :param source: (rdf) files to convert (source path)
        :param destination_format: the destination format
        :param clear_source: if set, delete the source files. Default = False
        :return: None
        """

        files = []
        src = os.path.abspath(source)
        if os.path.isdir(src):
            files = [os.path.join(src, f) for f in os.listdir(src) if to_process(f, destination_format)]
        elif os.path.exists(src):
            files = [src]
        self._log.info('to process: {0}'.format(files))
        if clear_source:
            self._log.warn('will remove original files after conversion')

        def job_finished(res):
            print '.',
            sys.stdout.flush()

        num_cpus = cpu_count()
        num_workers = workers if 0 < workers < num_cpus else num_cpus

        pool = Pool(processes=num_workers)

        for src in files:
            dst = dest_file_name(src, destination_format)
            if dst:
                pool.apply_async(convert_file, (src, dst, clear_source), callback=job_finished)

        pool.close()
        pool.join()
开发者ID:anukat2015,项目名称:rdftools,代码行数:37,代码来源:rdf2rdf.py

示例10: matrix_vector_iteration_by_processes

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import close [as 别名]
def matrix_vector_iteration_by_processes(A,x,k):
	# create a temporary directory to store the matrix and the vectors
	tmpdir = tempfile.mkdtemp()

	nvec = get_nvec(x)
	y = x.copy()

	save_matrix(tmpdir,A)
	for i in xrange(nvec):
		save_x(tmpdir,x,i)

	# start processes
	pool = Pool(processes=min(nvec,6))
	processes = []

	for i in xrange(nvec):
		processes.append( pool.apply_async(matrix_vector_iteration_process, (tmpdir,i,k)) ) 

	# fetch results (vector/matrix shape version)
	if x.ndim  == 1:
		processes[0].get()
		y = load_x(tmpdir,0)
	else:
		for i in xrange(nvec):
			processes[i].get()
			y[:,i] = load_x(tmpdir,i)

	pool.close()

	# remove temporary directory (with all it contains)
	shutil.rmtree(tmpdir)

	return y
开发者ID:sbordt,项目名称:markovmixing,代码行数:35,代码来源:iterate_distributions.py

示例11: run_train_models

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import close [as 别名]
def run_train_models(processes, model_library, **kwargs):
    """Train many supervised learning problems in parallel

    model_library = a list specifying the model library for the dataset in
            format needed for TrainModelCV
            **kwargs: all the rest of the input to TrainModelCV"""
    # sample input for model_library:
    #          [[LogisticRegression, classification_error, 'parameters.json', (), {'lam':0.5}],
    #          [LogisticRegression, auc_wmw_fast, None, (), {'C':50}]]

    # use a process pool top execute all the training jobs
    # collect the results and combine to return
    from multiprocessing import Pool

    p = Pool(processes)

    #ret = {}
    #for model in model_library:
    #    p.apply_async(_pool_helper, (model_library, ), kwargs, callback=ret.update)

    results = []
    for model in model_library:
        results.append(p.apply_async(_pool_helper, (model, ), kwargs))

    # wait on the pool to finish
    p.close()
    p.join()

    # collect the results
    ret = {}
    for result in results:
        ret.update(result.get())

    return ret
开发者ID:trtg,项目名称:mozsci,代码行数:36,代码来源:map_train.py

示例12: main

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import close [as 别名]
def main(path, out, cores):
    """
    Compute contact energies for each pdb in path and write results to 'out'.
    :param path: str
    :param out: str
    :param cores: int
    :return: None
    """
    # Find all pdbs in path
    workload = []
    for file in os.listdir(path):
        if os.path.splitext(file)[1].lower() == ".pdb":
            workload.append(file)
    # Print few newlines to prevent progressbar from messing up the shell
    print("\n\n")
    # Compute energies
    pool = Pool(processes=cores)
    results = []
    for (nr, pdb) in enumerate(workload):
        updateprogress(pdb, nr / len(workload))
        e = computecontactenergy(os.path.join(path, pdb), pool)
        results.append((pdb, e))
    pool.close()
    # Make 100% to appear
    updateprogress("Finished", 1)
    # Store output
    with open(out, "w") as handler:
        handler.write("PDB,Energy in kcal/mol\n")
        for pair in results:
            handler.write("{},{}\n".format(*pair))
开发者ID:maxemil,项目名称:InteractionPotential,代码行数:32,代码来源:energies.py

示例13: get

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import close [as 别名]
    def get(self):
        mode = toAlpha3Code(self.get_argument('lang'))
        text = self.get_argument('q')
        if not text:
            self.send_error(400, explanation='Missing q argument')
            return

        def handleCoverage(coverage):
            if coverage is None:
                self.send_error(408, explanation='Request timed out')
            else:
                self.sendResponse([coverage])

        if mode in self.analyzers:
            pool = Pool(processes=1)
            result = pool.apply_async(getCoverage, [text, self.analyzers[mode][0], self.analyzers[mode][1]])
            pool.close()

            @run_async_thread
            def worker(callback):
                try:
                    callback(result.get(timeout=self.timeout))
                except TimeoutError:
                    pool.terminate()
                    callback(None)

            coverage = yield tornado.gen.Task(worker)
            handleCoverage(coverage)
        else:
            self.send_error(400, explanation='That mode is not installed')
开发者ID:jatinluthra14,项目名称:apertium-apy,代码行数:32,代码来源:servlet.py

示例14: run_make_submission

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import close [as 别名]
def run_make_submission(settings, targets_and_pipelines, split_ratio):
    pool = Pool(settings.N_jobs)
    for i, (target, pipeline, feature_masks, classifier, classifier_name) in enumerate(targets_and_pipelines):
        for j, feature_mask in enumerate(feature_masks):
            progress_str = 'T=%d/%d M=%d/%d' % (i+1, len(targets_and_pipelines), j+1, len(feature_masks))
            pool.apply_async(make_submission_predictions, [settings, target, pipeline, classifier, classifier_name],
                {'feature_mask': feature_mask, 'progress_str': progress_str, 'quiet': True})
    pool.close()
    pool.join()

    guesses = ['clip,preictal']
    num_masks = None
    classifier_names = []
    for target, pipeline, feature_masks, classifier, classifier_name in targets_and_pipelines:
        classifier_names.append(classifier_name)
        if num_masks is None:
            num_masks = len(feature_masks)
        else:
            assert num_masks == len(feature_masks)

        test_predictions = []

        for feature_mask in feature_masks:
            data = make_submission_predictions(settings, target, pipeline, classifier, classifier_name, feature_mask=feature_mask)
            test_predictions.append(data.mean_predictions)

        predictions = np.mean(test_predictions, axis=0)
        guesses += make_csv_for_target_predictions(target, predictions)

    output = '\n'.join(guesses)
    write_submission_file(settings, output, 'ensemble n=%d split_ratio=%s' % (num_masks, split_ratio), None, str(classifier_names), targets_and_pipelines)
开发者ID:csae1152,项目名称:seizure-prediction,代码行数:33,代码来源:ensemble.py

示例15: main

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import close [as 别名]
def main():
    if MAIL_TO:
        signal.signal(signal.SIGALRM, send_email_by_alarm)
        signal.alarm(TIME_NOTIFICATION_BY_EMAIL)
        send_email_start()
    start_time = int(time.time())
    manager = Manager()
    queue = manager.Queue()
    pool = Pool(PROCESS_NUMBER + 1)
    jobs = []
    pool.apply_async(listener, args=(queue,))
    for config_file in FINAL_CONFIG_TO_SCRAPE:
        job = pool.apply_async(scraper, (config_file, queue))
        jobs.append(job)
    for i, job in enumerate(jobs):
        job.get()
    # although all job finished, but for unknown some providers still running
    time.sleep(10)
    #extend more time  to make sure there is not any provider running
    for i in range(1000):
        if len(get_summary().provider_running) > 0:
            time.sleep(500)
        else:
            break
    print "Run all has finished"
    queue.put(LISTENER_KILL_SIGNAL)
    pool.close()
    if MAIL_TO:
        send_email_end()
开发者ID:bangnguyen,项目名称:analyse_trames,代码行数:31,代码来源:run_all.py


注:本文中的multiprocessing.Pool.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。