当前位置: 首页>>代码示例>>Python>>正文


Python Pool.apply_async方法代码示例

本文整理汇总了Python中multiprocessing.pool.Pool.apply_async方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.apply_async方法的具体用法?Python Pool.apply_async怎么用?Python Pool.apply_async使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.pool.Pool的用法示例。


在下文中一共展示了Pool.apply_async方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: work

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import apply_async [as 别名]
def work(host, port, processes, threads, times):
    pool = Pool(processes,
                lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
    p = Process(target=progress)
    p.daemon = True

    start = time.time()

    try:
        for chunk in divide(times, processes):
            pool.apply_async(thread, (host, port, threads, chunk))

        p.start()

        pool.close()
        pool.join()
        p.terminate()
        p.join()

    except KeyboardInterrupt:
        pool.terminate()
        p.terminate()
        p.join()
        pool.join()

    return time.time() - start
开发者ID:lrg87,项目名称:hackathon-eleme,代码行数:28,代码来源:stress.py

示例2: _MultiExecutor

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import apply_async [as 别名]
class _MultiExecutor(_Executor):
    """Execute functions async in a process pool"""

    def __init__(self):
        super(_MultiExecutor, self).__init__()
        self._children = 0
        self.pool = Pool()

    def _collector(self, result):
        super(_MultiExecutor, self)._collector(result)
        self._children -= 1

    def execute(self, func, args):
        self._children += 1
        self.pool.apply_async(func, args, callback=self._collector)

    def wait_for_results(self):
        self.pool.close()
        # One would have hoped joining the pool would take care of this, but
        # apparently you need to first make sure that all your launched tasks
        # has returned their results properly, before calling join, or you
        # risk a deadlock.
        while self._children > 0:
            time.sleep(0.001)
        self.pool.join()
开发者ID:reinout,项目名称:checkoutmanager,代码行数:27,代码来源:executors.py

示例3: main

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import apply_async [as 别名]
def main():
    print('Process (%s) start...' % os.getpid())
    p = Pool()
    for i in range(4):
        p.apply_async(long_time_task, args=(i,))
    print('Waiting for all subprocesses done...')
    p.close()
    p.join()
    print('All subprocesses done.')
开发者ID:Eric-Xie,项目名称:demos_lxf,代码行数:11,代码来源:do_multiprocess.py

示例4: TcpController

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import apply_async [as 别名]
class TcpController(object):
    def __init__(self,handlers):
        self.handlers=handlers
        self.workers=Pool(MAX_PROCESS_POOL_SIZE)

    def process(self,header,body):
        self.workers.apply_async(wrap,(self.handlers[header](),body,))

    def destroy(self):
        self.handlers=None
        self.workers.close()
开发者ID:xsank,项目名称:pseudo-http,代码行数:13,代码来源:tcpcontroller.py

示例5: _get

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import apply_async [as 别名]
    def _get(self, args):
        draft_id = args[0]
        id = args[1] if len(args) > 1 else None

        q = self.db.query(Player)
        if id is not None:
            player = q.filter(Player.id == int(id)).first()
            team = self.db.query(Team).filter(and_(Team.is_owner == True,
                                                   Team.draft_id == draft_id)).first()

            available_players = self.db.query(Player).join(Player.core).filter(and_(PlayerCore.rank != None,
                                                                                    PlayerCore.target_price != None,
                                                                                    PlayerCore.points > 0,
                                                                                    Player.draft_id == draft_id,
                                                                                    Player.team_id == None,
                                                                                    Player.id != player.id)).order_by(PlayerCore.rank).all()

            min_price = 1
            max_price = min(player.core.target_price + 21, team.money)
            manager = Manager()
            max_starters_points = manager.dict()
            max_bench_points = manager.dict()
            pool = Pool(processes=8)
            starters, bench = get_starters_and_bench(self.db, team.id)
            max_starters_points[0] = optimizer.optimize_roster(starters, available_players, team.money - (constants.BENCH_SIZE - len(bench)))[1]
            for m in range(min_price, 10):
                pool.apply_async(wrap_optimizer, args=(starters, available_players, team.money - m - (constants.BENCH_SIZE - len(bench)) + 1, max_bench_points, m))

            full_starters = True
            for s in starters:
                if s is None:
                    full_starters = False
            if not full_starters:
                starters_clone = list(starters)
                bench_clone = list(bench)
                place_player(player, starters_clone, bench_clone)
                for m in range(min_price, max_price):
                    pool.apply_async(wrap_optimizer, args=(starters_clone, available_players, team.money - m - (constants.BENCH_SIZE - len(bench_clone)), max_starters_points, m))

            pool.close()
            pool.join()

            ret = player.to_dict(['core'])
            ret['max_starters_points'] = dict(max_starters_points)
            ret['max_bench_points'] = dict(max_bench_points)

            return ret
        else:
            players = q.join(PlayerCore).filter(and_(Player.draft_id == int(draft_id),
                                                     PlayerCore.rank != None,
                                                     PlayerCore.target_price != None)).all()
            return {'players': [p.to_dict(['core']) for p in players]}
开发者ID:jkgneu12,项目名称:draft,代码行数:54,代码来源:api.py

示例6: main

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import apply_async [as 别名]
def main():
	"""
		Build all the models. Spin off a new process for each participant
		because the ANN library is not multithreaded. Process is used instead
		of thread to leverage multiple cores.
	"""
	parser = ArgumentParser()
	parser.add_argument("inputFilename")
	parser.add_argument("outputDirectory")
	
	args = parser.parse_args()
	inputFilename = args.inputFilename
	outputDirectory = args.outputDirectory
	
	data = pickle.load( open(inputFilename, 'rb') )
	
	tasks = [ 'matb', 'rantask' ]
	participantIds = [ '001', '002', '003', '004', '005', '006', '007' ]
	
	# Cut off first row header for each data set
	for task in tasks:
		for participantId in participantIds:
			data[participantId][task] = data[participantId][task][1:] 
			
	splits = performSplit( data )
	
	# Record start time so that the elapsed time can be determined
	start_time = time.time()
	
	# Create a multicore processing pool with 7 processes ( 7 so that one core stays free
	# for system processes )
	pool = Pool( processes = 7 )
	
	# Build models for participants in a task
	for task in tasks:
		for participantId in participantIds:
			outputFilename = path.join( outputDirectory, 'testingOn-' + participantId + '-' + task + '.txt' )
			
			# Spin off a process for the building
			pool.apply_async( tuneANN, ( splits[participantId][task], outputFilename ) )
			
	# Close down the pool so that we can wait on all the processes
	pool.close()
	pool.join()
	
	# Calculate and print the elapsed time
	elapsed_time = time.time() - start_time
	print( "Elapsed time: " + str(elapsed_time) )
开发者ID:tjc1575,项目名称:Thesis,代码行数:50,代码来源:annBuilder_SPCT.py

示例7: stat_volume

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import apply_async [as 别名]
def stat_volume(stime,etime):
    tgsinfo = read_tgs_info()

    # from multiprocessing.dummy import Pool as ThreadPool
    from multiprocessing.pool import Pool

    pool = Pool()
    volume = [pool.apply_async(stat_tgs_volume,args=(stime,etime,int(cid))) for cid in tgsinfo.keys()]
    pool.close()

    print 'waiting to join....'
    pool.join()

    print 'start to writing to file...'

    volume0 = []
    for i,elem in enumerate(volume):
        volume0.append((tgsinfo.keys()[i], elem.get()))
    volume0.sort(key=lambda x:x[1], reverse=True)

    total = 0
    with open(os.path.join(root_dir, "result", "volume.txt"),"w") as f:
        for i,elem in enumerate(volume0):
            # cid = tgsinfo.keys()[i]
            # vol = elem.get()
            total += elem[1]

            line = "%5s,%s: %d\n" % (elem[0], tgsinfo[elem[0]]['kkmc'], elem[1])
            f.write(line)

    print 'totally %d records.' % (total)
开发者ID:xlees,项目名称:TgsVis,代码行数:33,代码来源:trajetory.py

示例8: manager_process

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import apply_async [as 别名]
def manager_process(dir_queue, file_queue, out_queue):
    """Dispatches and manages path and scanning workers.

    """
    pool = Pool(options.num_threads)
    atexit.register(at_exit_manager, pool)
    logging.info('Gathering Files...')
    pool.apply(explore_path, (dir_queue, file_queue))
    logging.info('Files gathered. Scanning %s files...', file_queue.qsize())
    logging.info('Starting %s scan processes...', options.num_threads)
    print '~' * 80
    thread.start_new_thread(print_status, (file_queue,))
    for _ in range(options.num_threads):
        pool.apply_async(parallel_scan, (file_queue, out_queue))
    pool.close()
    pool.join()
    out_queue.put(StopIteration)
开发者ID:bashcode,项目名称:Pyscan,代码行数:19,代码来源:pyscan.py

示例9: multiprocess_all_chromosomes

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import apply_async [as 别名]
def multiprocess_all_chromosomes(func, cls, *args, **kwargs):
    '''
    Convenience method for splitting up queries based on tag id.
    '''
    processes = current_settings.ALLOWED_PROCESSES

    set_chromosome_lists(cls, use_table=kwargs.get('use_table', None))
    p = Pool(processes)

    try:
        for chr_list in current_settings.CHR_LISTS:
            p.apply_async(func, args=[cls, chr_list, ] + list(args))
        p.close()
        p.join()
    except Exception as e:
        print('Terminating pool.')
        p.terminate()
        raise e
开发者ID:karmel,项目名称:vespucci,代码行数:20,代码来源:transcript.py

示例10: run

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import apply_async [as 别名]
 def run(self):
     if(self.fileName[0] != None and self.fileName[0] !=""):
         # p = Process(target=self.creationPDF, args=(self.fileName[0],))
         # self.creationPDF(self.fileName[0])
         # p.start()
         pool = Pool(processes=4)  # start 4 worker processes
         result = pool.apply_async(self.creationPDF, [self.fileName[0]])
     else:
         print("Sauvegarde annule")
开发者ID:ProjetPC2,项目名称:SIMM2.0-DEV,代码行数:11,代码来源:Rapport.py

示例11: run_jar_new_thread

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import apply_async [as 别名]
def run_jar_new_thread(config_path, **kwargs):
    """
    kwargs - arguments dictionary of run_jar()
    """
    pool = Pool(processes=1)
    __import__("ipdb").set_trace()
    result = pool.apply_async(_run_jar_with_config, [config_path], kwargs['kwargs'])
    job_id = result.get()
    return job_id
开发者ID:nmakhotkin,项目名称:nmakhotkin-rnd-beans,代码行数:11,代码来源:oozie_jar.bak.py

示例12: main

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import apply_async [as 别名]
def main():
    queue_logger = setup_redirection()
    queue_logger.write("ABCDEF\n")
    try:
        p = Pool(10)
        results = [p.apply_async(some_process_body) for i in xrange(20)]
        [result.get() for result in results]
        p.close()
    finally:
        queue_logger.stop()
开发者ID:idan,项目名称:celery,代码行数:12,代码来源:queuelog.py

示例13: process

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import apply_async [as 别名]
def process(configuration: LogParserConfig, db: DatabaseConnection, process_count: int):
    influxdb_client = get_client(configuration)
    pool = Pool(process_count)
    currently_processing = defaultdict(list)
    dl_dir = os.path.join(db.root_dir, 'data')

    while True:
        logging.info('Checking for new files to process')
        settings = db.get_settings()
        new_files = get_new_files_to_process(configuration.buckets, settings)
        settings.files.extend(new_files)
        timestamp = int(time.time())
        completed_files = defaultdict(list)
        # Empty queue of finished work and create a lists of all completed files per bucket
        while True:
            if finished_queue.empty():
                break
            success, bucket, filename = finished_queue.get()
            currently_processing[bucket].remove(filename)
            if success:
                completed_files[bucket].append(filename)
        # Set processed timestamp on processed files
        for file in settings.files:
            if file.name in completed_files[file.bucket]:
                file.processed_timestamp = timestamp
        logging.info('%d files completed processing since last loop', sum(len(l) for l in completed_files.values()))
        completed_files.clear()
        db.save_settings(settings)
        added = 0
        for file in settings.files:
            if file.processed_timestamp is None and file.name not in currently_processing[file.bucket]:
                currently_processing[file.bucket].append(file.name)
                pool.apply_async(process_file, (file.bucket, file.name, dl_dir, influxdb_client), {}, after_processed,
                                 after_error)
                added += 1
        processing_count = sum(len(l) for l in currently_processing.values())
        if added:
            logging.info('Added %s files to pool, %s files currently processing.', added, processing_count)
        else:
            logging.info('Nothing new to process, sleeping %s seconds. %s files currently in queue to be processed.',
                         configuration.interval, processing_count)
            time.sleep(configuration.interval)
            continue
开发者ID:our-city-app,项目名称:log-parser,代码行数:45,代码来源:__init__.py

示例14: create_execution_pool

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import apply_async [as 别名]
def create_execution_pool():
    global execution_pool
    pool_size = engine.app.config['POOL_SIZE']
    execution_pool = Pool(pool_size, initializer=initialize_worker)

    futures = []
    for i in xrange(pool_size * 2):
        futures.append(execution_pool.apply_async(connect_worker))

    [f.get() for f in futures]
开发者ID:r-BenDoan,项目名称:stream_engine,代码行数:12,代码来源:cass.py

示例15: _transport_backup_parallel

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import apply_async [as 别名]
    def _transport_backup_parallel(data, filename, aws_key, aws_secret, bucketname):
        """
        Parallel multipart upload.
        """
        headers = {}
        _logger.info('Backing up via S3 parallel multipart upload agent')
        keyname = filename
        tempInFile = NamedTemporaryFile(suffix='.zip', prefix='db-backup-', delete=False)
        tempInFile.write(data)
        tempInFile.close()
        source_path = tempInFile.name
        source_size = os.stat(source_path).st_size
        parallel_processes = (multiprocessing.cpu_count() * 2) + 1

        conn = boto.connect_s3(aws_key, aws_secret)
        bucket = conn.get_bucket(bucketname)

        mtype = 'application/zip, application/octet-stream'
        headers.update({'Content-Type': mtype})

        mp = bucket.initiate_multipart_upload(keyname, headers=headers)

        bytes_per_chunk = max(int(math.sqrt(5242880) * math.sqrt(source_size)),
                              5242880)
        chunk_amount = int(math.ceil(source_size / float(bytes_per_chunk)))

        pool = Pool(processes=parallel_processes)
        for i in range(chunk_amount):
            offset = i * bytes_per_chunk
            remaining_bytes = source_size - offset
            bytes = min([bytes_per_chunk, remaining_bytes])
            part_num = i + 1
            pool.apply_async(_upload_part, [bucketname, aws_key, aws_secret, mp.id,
                                            part_num, source_path, offset, bytes])
        pool.close()
        pool.join()

        if len(mp.get_all_parts()) == chunk_amount:
            mp.complete_upload()
        else:
            mp.cancel_upload()
        os.unlink(tempInFile.name)
        _logger.info('Data successfully backed up to s3')
开发者ID:jeffery9,项目名称:odoo-saas-tools,代码行数:45,代码来源:saas_client.py


注:本文中的multiprocessing.pool.Pool.apply_async方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。