当前位置: 首页>>代码示例>>Python>>正文


Python Pool.join方法代码示例

本文整理汇总了Python中multiprocessing.pool.Pool.join方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.join方法的具体用法?Python Pool.join怎么用?Python Pool.join使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.pool.Pool的用法示例。


在下文中一共展示了Pool.join方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: ingest

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import join [as 别名]
def ingest(
        dataset,
        cls,
        skip_if_exists=True,
        multi_process=False,
        multi_threaded=False,
        cores=None):

    pool = None

    if multi_process:
        pool = Pool(cores or cpu_count())
        map_func = pool.imap_unordered
    elif multi_threaded:
        pool = ThreadPool(cores or cpu_count())
        map_func = pool.imap_unordered
    else:
        map_func = map

    cls_args = repeat(cls)
    skip_args = repeat(skip_if_exists)

    map_func(ingest_one, zip(dataset, cls_args, skip_args))

    if pool is not None:
        # if we're ingesting using multiple processes or threads, the processing
        # should be parallel, but this method should be synchronous from the
        # caller's perspective
        pool.close()
        pool.join()
开发者ID:JohnVinyard,项目名称:zounds,代码行数:32,代码来源:ingest.py

示例2: add_tree

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import join [as 别名]
 def add_tree(self, iterations=-1, snapshot=False):
     """
     Multi-core, fully utilizes underlying CPU to create the trees
     of the forest and stores them into the forest's list of trees
     :param iterations: number of trees to make, -1 means use default setting
     :return: None
     """
     print("Adding trees:", iterations)
     if iterations == -1:
         iterations = self.default_tree_count
     #########################
     # MULTI THREADED
     ########################
     pool = Pool()  # creates multiple processes equal to cores in machine
     outputs = pool.map(make_tree, [(self.data_copy(), self.depthlimit, self.weak_learner)
                                    for _ in range(iterations)])
     pool.close()
     pool.join()
     self.trees.extend(outputs)  # get the trees created and store them
     #########################
     # SINGLE THREADED
     ########################
     #for i in range(iterations):
     #    tree = Tree(self.data, self.bagging, self.bag_ratio, self.depthlimit, self.weak_learner)
     #    self.trees.append(tree)  # get the trees created and store them
     if snapshot:
         self.sum_squares(len(self.trees))  # get error after each snapshot, if this command is run multiple times
开发者ID:alexftian,项目名称:random_forest,代码行数:29,代码来源:forest.py

示例3: stat_volume

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import join [as 别名]
def stat_volume(stime,etime):
    tgsinfo = read_tgs_info()

    # from multiprocessing.dummy import Pool as ThreadPool
    from multiprocessing.pool import Pool

    pool = Pool()
    volume = [pool.apply_async(stat_tgs_volume,args=(stime,etime,int(cid))) for cid in tgsinfo.keys()]
    pool.close()

    print 'waiting to join....'
    pool.join()

    print 'start to writing to file...'

    volume0 = []
    for i,elem in enumerate(volume):
        volume0.append((tgsinfo.keys()[i], elem.get()))
    volume0.sort(key=lambda x:x[1], reverse=True)

    total = 0
    with open(os.path.join(root_dir, "result", "volume.txt"),"w") as f:
        for i,elem in enumerate(volume0):
            # cid = tgsinfo.keys()[i]
            # vol = elem.get()
            total += elem[1]

            line = "%5s,%s: %d\n" % (elem[0], tgsinfo[elem[0]]['kkmc'], elem[1])
            f.write(line)

    print 'totally %d records.' % (total)
开发者ID:xlees,项目名称:TgsVis,代码行数:33,代码来源:trajetory.py

示例4: query_tweets

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import join [as 别名]
def query_tweets(query, limit=None, begindate=dt.date(2006, 3, 21), enddate=dt.date.today(), poolsize=20, lang=''):
    no_days = (enddate - begindate).days
    if poolsize > no_days:
        # Since we are assigning each pool a range of dates to query,
		# the number of pools should not exceed the number of dates.
        poolsize = no_days
    dateranges = [begindate + dt.timedelta(days=elem) for elem in linspace(0, no_days, poolsize+1)]

    if limit:
        limit_per_pool = (limit // poolsize)+1
    else:
        limit_per_pool = None

    queries = ['{} since:{} until:{}'.format(query, since, until)
               for since, until in zip(dateranges[:-1], dateranges[1:])]

    all_tweets = []
    try:
        pool = Pool(poolsize)
        logger.info('queries: {}'.format(queries))
        try:
            for new_tweets in pool.imap_unordered(partial(query_tweets_once, limit=limit_per_pool, lang=lang), queries):
                all_tweets.extend(new_tweets)
                logger.info('Got {} tweets ({} new).'.format(
                    len(all_tweets), len(new_tweets)))
        except KeyboardInterrupt:
            logger.info('Program interrupted by user. Returning all tweets '
                         'gathered so far.')
    finally:
        pool.close()
        pool.join()

    return all_tweets
开发者ID:taspinar,项目名称:twitterscraper,代码行数:35,代码来源:query.py

示例5: get_correlation_parallel

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import join [as 别名]
def get_correlation_parallel(s1,s2):
    """
    params s1 - series 1
    params s2 - series 2 
    NOTE : series are number 1 to 25 when giving in arguments
    returns the correlation between series
    """
    start = time.time()
    offsets = [] #this will be the arguments to all the parallel jobs
    instances = (MAX_ROWS/BATCH_SIZE)
    mean,std = calculate_mean_std_parallel()
    stripped_mean,stripped_std = calculate_stripped_mean_std_parallel(mean,std)
    processes = Pool(processes=instances)
    for i in range(instances):
        offsets.append((s1,s2,mean,std,stripped_mean,stripped_std,i*BATCH_SIZE))
    results = processes.map(get_correlation,offsets)
    processes.close()
    processes.join()
    pearson_corr = 0
    total = 0
    for result in results:
        pearson_corr += result[0]*result[1]
        total += result[1]
    pearson_corr = 1.0*pearson_corr / total
    t_value = abs(pearson_corr*math.sqrt( 1.0*(total - 2) / ( 1 - (pearson_corr*pearson_corr))))
    p_value = t.sf(t_value,total-2)
    print "\n ######### CORRELATION BETWEEN SERIES ",s1," AND SERIES ",s2, " is ",pearson_corr , "t value is ", t_value ," and p value is ", p_value,  "######### \n" 
    end = time.time()
    print "EXECUTION TIME : ", end-start , " sec"
    return pearson_corr
开发者ID:KanwalPrakashSingh,项目名称:data_cleaning,代码行数:32,代码来源:seta.py

示例6: start

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import join [as 别名]
    def start(self):
        """Starts a server that controls local workers.

        Calling this function starts a pool of `num_workers` workers used to run
        targets sent to the server. The server will run indefinitely unless shut
        down by the user.
        """
        try:
            serv = Listener((self.hostname, self.port))
            workers = Pool(
                processes=self.num_workers,
                initializer=Worker,
                initargs=(self.status, self.queue, self.waiting),
            )

            logging.info(
                "Started %s workers, listening on port %s",
                self.num_workers,
                serv.address[1],
            )
            self.wait_for_clients(serv)
        except OSError as e:
            if e.errno == 48:
                raise ServerError(
                    (
                        "Could not start workers listening on port {}. "
                        "The port may already be in use."
                    ).format(self.port)
                )
        except KeyboardInterrupt:
            logging.info("Shutting down...")
            workers.close()
            workers.join()
            self.manager.shutdown()
开发者ID:mailund,项目名称:gwf,代码行数:36,代码来源:local.py

示例7: _MultiExecutor

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import join [as 别名]
class _MultiExecutor(_Executor):
    """Execute functions async in a process pool"""

    def __init__(self):
        super(_MultiExecutor, self).__init__()
        self._children = 0
        self.pool = Pool()

    def _collector(self, result):
        super(_MultiExecutor, self)._collector(result)
        self._children -= 1

    def execute(self, func, args):
        self._children += 1
        self.pool.apply_async(func, args, callback=self._collector)

    def wait_for_results(self):
        self.pool.close()
        # One would have hoped joining the pool would take care of this, but
        # apparently you need to first make sure that all your launched tasks
        # has returned their results properly, before calling join, or you
        # risk a deadlock.
        while self._children > 0:
            time.sleep(0.001)
        self.pool.join()
开发者ID:reinout,项目名称:checkoutmanager,代码行数:27,代码来源:executors.py

示例8: _itergroundings

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import join [as 别名]
 def _itergroundings(self, simplify=False, unsatfailure=False):
     global global_bpll_grounding
     global_bpll_grounding = self
     if self.multicore:
         pool = Pool(maxtasksperchild=1)
         try:
             for gndresult in pool.imap(with_tracing(create_formula_groundings), self.formulas):
                 for fidx, stat in gndresult:
                     for (varidx, validx, val) in stat: 
                         self._varidx2fidx[varidx].add(fidx)
                         self._addstat(fidx, varidx, validx, val)
                     checkmem()
                 yield None
         except CtrlCException as e:
             pool.terminate()
             raise e
         pool.close()
         pool.join()
     else:
         for gndresult in imap(create_formula_groundings, self.formulas):
             for fidx, stat in gndresult:
                 for (varidx, validx, val) in stat: 
                     self._varidx2fidx[varidx].add(fidx)
                     self._addstat(fidx, varidx, validx, val)
             yield None
开发者ID:Bovril,项目名称:pracmln,代码行数:27,代码来源:bpll.py

示例9: main

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import join [as 别名]
def main(datadir, convert_dir, crop_size):
    try:
        os.mkdir(convert_dir)
    except OSError:
        pass

    filenames = data_util.get_image_files(datadir)

    print('Resizing images in {} to {}'.format(datadir, convert_dir))

    n = len(filenames)

    batch_size = 500
    batches = n // batch_size + 1
    p = Pool()

    args = []

    for f in filenames:
        args.append((convert_size, (datadir, convert_dir, f, crop_size)))

    for i in range(batches):
        print('batch {:>2} / {}'.format(i + 1, batches))
        p.map(convert, args[i * batch_size : (i + 1) * batch_size])

    p.close()
    p.join()
    print('Done')
开发者ID:Seth-Park,项目名称:fundus-diabetes-detection,代码行数:30,代码来源:convert.py

示例10: main

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import join [as 别名]
def main():
	global pool
	pool = Pool(POOL_SIZE)
	
	
	nseeds = 100
	
#	print("== generating seeds...")
#	generate_seeds(nseeds)
	
	#print("running const density experiments...")
	#run_constant_density(0.1, range(100, 1000, 100), nseeds)
	
	#print("running const size experiments...")
	#run_constant_size(50, range(100, 1000, 100), nseeds)
	
	print("== running aggregate interval experiments (const density)...")
#	run_aggregate_interval_constant_density(0.1, range(100, 1000, 100), nseeds, [100, 500] + list(range(1000, 4000, 1000)))

	run_aggregate_interval_constant_density(0.1, range(100, 1000, 100), nseeds, [3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000, 50000])
	reset_pool()
	run_aggregate_interval_constant_density(0.2, range(100, 1000, 100), nseeds, [100, 500, 1000, 2000,4000, 5000, 6000, 7000, 8000, 9000, 10000, 50000])
	reset_pool()
	run_aggregate_interval_constant_density(0.3, range(100, 1000, 100), nseeds, [100, 500, 1000, 2000,4000, 5000, 6000, 7000, 8000, 9000, 10000, 50000])
	reset_pool()
	run_aggregate_interval_constant_density(0.4, range(100, 1000, 100), nseeds, [100, 500, 1000, 2000,4000, 5000, 6000, 7000, 8000, 9000, 10000, 50000])
	reset_pool()
	run_aggregate_interval_constant_density(0.5, range(100, 1000, 100), nseeds, [100, 500, 1000, 2000,4000, 5000, 6000, 7000, 8000, 9000, 10000, 50000])

	pool.close()
	pool.join()
开发者ID:Darma,项目名称:wiselib,代码行数:33,代码来源:run_shawn_experiments.py

示例11: work

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import join [as 别名]
def work(host, port, processes, threads, times):
    pool = Pool(processes,
                lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
    p = Process(target=progress)
    p.daemon = True

    start = time.time()

    try:
        for chunk in divide(times, processes):
            pool.apply_async(thread, (host, port, threads, chunk))

        p.start()

        pool.close()
        pool.join()
        p.terminate()
        p.join()

    except KeyboardInterrupt:
        pool.terminate()
        p.terminate()
        p.join()
        pool.join()

    return time.time() - start
开发者ID:lrg87,项目名称:hackathon-eleme,代码行数:28,代码来源:stress.py

示例12: _itergroundings

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import join [as 别名]
 def _itergroundings(self, simplify=True, unsatfailure=True):
     # generate all groundings
     if not self.formulas:
         return
     global global_fastConjGrounding
     global_fastConjGrounding = self
     batches = list(rndbatches(self.formulas, 20))
     batchsizes = [len(b) for b in batches]
     if self.verbose:
         bar = ProgressBar(width=100, steps=sum(batchsizes), color='green')
         i = 0
     if self.multicore:
         pool = Pool()
         try:
             for gfs in pool.imap(with_tracing(create_formula_groundings), batches):
                 if self.verbose:
                     bar.inc(batchsizes[i])
                     bar.label(str(cumsum(batchsizes, i + 1)))
                     i += 1
                 for gf in gfs: yield gf
         except Exception as e:
             logger.error('Error in child process. Terminating pool...')
             pool.close()
             raise e
         finally:
             pool.terminate()
             pool.join()
     else:
         for gfs in imap(create_formula_groundings, batches):
             if self.verbose:
                 bar.inc(batchsizes[i])
                 bar.label(str(cumsum(batchsizes, i + 1)))
                 i += 1
             for gf in gfs: yield gf
开发者ID:danielnyga,项目名称:pracmln,代码行数:36,代码来源:fastconj.py

示例13: get_data

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import join [as 别名]
def get_data():
	f2 = open('app_links1.txt','r')

	nprocs = 500 # nprocs is the number of processes to run
	ParsePool = Pool(nprocs)
	#ParsePool.map(btl_test,url)
	ParsedURLS = ParsePool.map(deatilsExtract,f2)
	ParsePool.close()
	ParsePool.join()
开发者ID:mounarajan,项目名称:google-play-crawler-new,代码行数:11,代码来源:script.py

示例14: load_images_uint

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import join [as 别名]
def load_images_uint(files):
    p = Pool()
    process = imread
    results = p.map(process, files)
    p.close()
    p.join()
    images = np.array(results)
    images = images.transpose(0, 3, 1, 2)
    return images
开发者ID:Seth-Park,项目名称:fundus-diabetes-detection,代码行数:11,代码来源:data_util.py

示例15: main

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import join [as 别名]
def main():
    print('Process (%s) start...' % os.getpid())
    p = Pool()
    for i in range(4):
        p.apply_async(long_time_task, args=(i,))
    print('Waiting for all subprocesses done...')
    p.close()
    p.join()
    print('All subprocesses done.')
开发者ID:Eric-Xie,项目名称:demos_lxf,代码行数:11,代码来源:do_multiprocess.py


注:本文中的multiprocessing.pool.Pool.join方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。