当前位置: 首页>>代码示例>>Python>>正文


Python Pool.apply_async方法代码示例

本文整理汇总了Python中multiprocessing.Pool.apply_async方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.apply_async方法的具体用法?Python Pool.apply_async怎么用?Python Pool.apply_async使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.Pool的用法示例。


在下文中一共展示了Pool.apply_async方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _send_requests

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import apply_async [as 别名]
    def _send_requests(self, total_requests, path='', other=False):
        url = 'http://{0}:{1}/{2}'.format(self.traffic_server_host, self.traffic_server_port, path)
        url2 = 'http://{0}:{1}/other/{2}'.format(self.traffic_server_host, self.traffic_server_port, path)
        jobs = []
        jobs2 = []
        pool = Pool(processes=4)
        for _ in xrange(0, total_requests):
            jobs.append(pool.apply_async(requests.get, (url,)))
            if other:
                jobs2.append(pool.apply_async(requests.get, (url2,)))

        results = []
        results2 = []
        for j in jobs:
            try:
                results.append(j.get())
            except Exception as e:
                results.append(e)

        for j in jobs2:
            try:
                results2.append(j.get())
            except Exception as e:
                results2.append(e)

        return results, results2
开发者ID:BillTheBest,项目名称:trafficserver,代码行数:28,代码来源:test_origin_max_connections.py

示例2: dirImgProcess

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import apply_async [as 别名]
def dirImgProcess(path):
    global workerPool, workerOutput, theGreatIndex
    workerPool = Pool()
    workerOutput = []
    work = []
    theGreatIndex = {}
    pagenumber = 0
    for (dirpath, dirnames, filenames) in os.walk(path):
        for afile in filenames:
            if getImageFileName(afile) is not None:
                pagenumber += 1
                work.append([afile, dirpath, options])
    if GUI:
        GUI.progressBarTick.emit(str(pagenumber))
    if len(work) > 0:
        for i in work:
            workerPool.apply_async(func=fileImgProcess, args=(i, ), callback=fileImgProcess_tick)
        workerPool.close()
        workerPool.join()
        if GUI and not GUI.conversionAlive:
            rmtree(os.path.join(path, '..', '..'), True)
            raise UserWarning("Conversion interrupted.")
        if len(workerOutput) > 0:
            rmtree(os.path.join(path, '..', '..'), True)
            raise RuntimeError("One of workers crashed. Cause: " + workerOutput[0])
    else:
        rmtree(os.path.join(path, '..', '..'), True)
        raise UserWarning("Source directory is empty.")
开发者ID:moshev,项目名称:kcc,代码行数:30,代码来源:comic2ebook.py

示例3: getData

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import apply_async [as 别名]
def getData():
    if os.path.isfile("chat_urls.p"):
        chat_urls = pickle.load( open( "chat_urls.p", "rb" ) )
    else:
        chat_urls = {}
        for user in users:
            chat_urls[user] = get_urls(user)
        teams_url = "http://espn.go.com/mlb/teams"
        pickle.dump( chat_urls, open( "chat_urls.p", "wb" ) )

    # for user in chat_urls:
    #     urls = chat_urls[user]
    #     for url in urls:
    #         getLog(url)
    logDB = {}
    for user in chat_urls:
        logDB[user] = {}
    p = Pool(20)
    i=0
    manager = Manager()
    db = manager.dict()
    for user in chat_urls:
        for url in chat_urls[user]:
            i+=1
            p.apply_async(addLogData, args=(url,db))
    p.close()
    p.join()
    out = db._getvalue()
    outfile = open("rawChat.txt","wb")
    for url in out:
        outfile.write(out[url]+"\n")
开发者ID:dmakian,项目名称:TwitchRNNBot,代码行数:33,代码来源:getTwitchData.py

示例4: MultiProcessScheduler

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import apply_async [as 别名]
class MultiProcessScheduler(LocalScheduler):
    def __init__(self, threads):
        LocalScheduler.__init__(self)
        self.threads = threads
        self.tasks = {}
        from multiprocessing import Pool
        self.pool = Pool(self.threads or 2)

    def start(self):
        pass

    def submitTasks(self, tasks):
        def callback(args):
            logger.debug("got answer: %s", args)
            tid, reason, result, update = args
            task = self.tasks.pop(tid)
            self.taskEnded(task, reason, result, update)

        for task in tasks:
            logger.debug("put task async: %s", task)
            self.tasks[task.id] = task
            self.pool.apply_async(run_task_in_process,
                [task, self.nextAttempId(), env.environ],
                callback=callback)

    def stop(self):
        self.pool.terminate()
        self.pool.join()
        logger.debug("process pool stopped")
开发者ID:Ssmithcr,项目名称:dpark,代码行数:31,代码来源:schedule.py

示例5: _run

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import apply_async [as 别名]
    def _run(self, source, destination_format, clear_source=False, workers=-1):
        """
        parallel version of the `convert` method
        :param source: (rdf) files to convert (source path)
        :param destination_format: the destination format
        :param clear_source: if set, delete the source files. Default = False
        :return: None
        """

        files = []
        src = os.path.abspath(source)
        if os.path.isdir(src):
            files = [os.path.join(src, f) for f in os.listdir(src) if to_process(f, destination_format)]
        elif os.path.exists(src):
            files = [src]
        self._log.info('to process: {0}'.format(files))
        if clear_source:
            self._log.warn('will remove original files after conversion')

        def job_finished(res):
            print '.',
            sys.stdout.flush()

        num_cpus = cpu_count()
        num_workers = workers if 0 < workers < num_cpus else num_cpus

        pool = Pool(processes=num_workers)

        for src in files:
            dst = dest_file_name(src, destination_format)
            if dst:
                pool.apply_async(convert_file, (src, dst, clear_source), callback=job_finished)

        pool.close()
        pool.join()
开发者ID:anukat2015,项目名称:rdftools,代码行数:37,代码来源:rdf2rdf.py

示例6: handle

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import apply_async [as 别名]
    def handle(self, *args, **options):

        pool = Pool(settings.NUM_THREADS)
        conf = settings.TRAINER_CURRENCY_CONFIG['supervised_nn']

        print("Starting V2 run")
        for ticker in conf['ticker']:
            for hidden_layers in conf['hidden_layers']:
                for min_back in conf['min_back']:
                    for epochs in conf['epochs']:
                        for granularity in conf['granularity']:
                            for datasetinputs in conf['datasetinputs']:
                                for bias in conf['bias']:
                                    for momentum in conf['momentum']:
                                        for learningrate in conf['learningrate']:
                                            for weightdecay in conf['weightdecay']:
                                                for recurrent in conf['recurrent']:
                                                    for timedelta_back_in_granularity_increments in \
                                                            conf['timedelta_back_in_granularity_increments']:
                                                        pool.apply_async(do_prediction_test, args=(
                                                            ticker, hidden_layers, min_back, epochs, granularity,
                                                            datasetinputs,
                                                            learningrate, bias, momentum, recurrent, weightdecay,
                                                            timedelta_back_in_granularity_increments
                                                        ))
        print("All V2 jobs queued")
        pool.close()
        pool.join()
        print("V2 run complete")
开发者ID:ChristianWitts,项目名称:pytrader,代码行数:31,代码来源:predict_many_v2.py

示例7: get_classify

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import apply_async [as 别名]
def get_classify():
    classify = {"type1": "美食", "type2": "休闲娱乐",  "type4-sub_type1": "酒店",  "type5": "购物",  "type6": "办卡送礼",
                "type7": "旅游",  "type9": "汽车",  "type10": "时尚丽人",  "type11": "生活服务",
                "type4-sub_type2": "出行", "type4-sub_type3": "出行"}
    for name in classify:
        total_num_of_page = get_num_of_page('http://www.rong360.com/credit/f-youhui-' + name)
        print classify[name] + ": " + str(total_num_of_page)
        thread_num = 20  # num of process
        section_size = 50
        section = total_num_of_page / section_size
        if total_num_of_page % section_size > 0:
            section += 1

        for k in range(section):
            begin = k * section_size + 1
            end = begin + section_size - 1
            end = min(end, total_num_of_page)
            print "start to get summary pages from " + str(begin) + " to " + str(end) + \
                  ", each summary page contains 20 detail content pages."
            manager = multiprocessing.Manager()
            queue = manager.Queue()  # a queue storing index of url
            queue.put(begin - 1)  # Initialization of url index

            page_queue = manager.Queue()  # a queue storing end of urls

            # start multiprocess to get urls
            pool = Pool(thread_num)
            for i in range(thread_num):
                pool.apply_async(get_page_url, args=(queue, end, page_queue, '-' + name))
            pool.close()
            pool.join()
            print 'num of total pages: ' + str(page_queue.qsize())

            store_data.insert_column("classify", classify[name], page_queue)
开发者ID:PPSHA2016SummerInternProgram,项目名称:DealBridge,代码行数:36,代码来源:main.py

示例8: MultiProcessScheduler

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import apply_async [as 别名]
class MultiProcessScheduler(LocalScheduler):
    def __init__(self, threads):
        LocalScheduler.__init__(self)
        self.threads = threads
        self.tasks = {}
        from multiprocessing import Pool
        self.pool = Pool(self.threads or 2)

    def start(self):
        pass

    def submitTasks(self, tasks):
        total, self.finished = len(tasks), 0
        def callback(args):
            logger.debug("got answer: %s", args)
            tid, reason, result, update = args
            task = self.tasks.pop(tid)
            self.finished += 1
            logger.info("Task %s finished (%d/%d)        \x1b[1A",
                tid, self.finished, total)
            if self.finished == total:
                logger.info("\r" + " "*80 + "\x1b[1A") # erase the progress bar
            self.taskEnded(task, reason, result, update)

        for task in tasks:
            logger.debug("put task async: %s", task)
            self.tasks[task.id] = task
            self.pool.apply_async(run_task_in_process,
                [task, self.nextAttempId(), env.environ],
                callback=callback)

    def stop(self):
        self.pool.terminate()
        self.pool.join()
        logger.debug("process pool stopped")
开发者ID:Dshadowzh,项目名称:dpark,代码行数:37,代码来源:schedule.py

示例9: run

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import apply_async [as 别名]
def run(wait=0):
    """Starts the scrapping proccess.
    creates a process per week per year given in pages
    """

    logger = makeLogger('main', r'./logs_RotoFDStats/')
    
    startTime = datetime.now()
    
    logger.debug('start time: ' + str(startTime))
    logger.debug('waiting %d seconds', wait)
    time.sleep(wait)

    logger.debug('starting')
    pool = Pool(processes=int(get_proxy_count()/2))

    pages = [(2011, 17), (2012, 17), (2013, 17), (2014, 17), (2015, 17)]
        
    for year, maxWeek in pages:
        for week in range(1, maxWeek+1):
            #parseWeek(year, week)
            pool.apply_async(parseWeek,(year, week,))

    pool.close() #Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit.
    pool.join() #Wait for the worker processes to exit. One must call close() or terminate() before using join().

    logger.debug('run time: ' + str(datetime.now()-startTime ))

    closeLogger('main')
开发者ID:hcastor,项目名称:football_data,代码行数:31,代码来源:getRotoFDStats.py

示例10: evaluate_fitness

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import apply_async [as 别名]
def evaluate_fitness(individuals, grammar, fitness, ave, mats, generation,
                     MATERIALS_FILE, LOAD, FITNESSES, DEBUG = False,
                     MULTI_CORE = True, PRINT = False):
    """Perform the mapping and evaluate each individual
    across multiple available cores"""
    if MULTI_CORE:
        cores = cpu_count() #   use all available cores
        pool = Pool(processes=cores)
        for name, ind in enumerate(individuals):
            bind = (name, fitness, ind, grammar, ave, mats, generation, LOAD,
                    MATERIALS_FILE, FITNESSES, DEBUG, PRINT)
            # Perform the mapping for each individual
            pool.apply_async(parallelize_indivs, args = (bind, ),
                             callback = ind.save_result)
        pool.close()    
        pool.join()
    else:
        for name, ind in enumerate(individuals):
            bind = (name, fitness, ind, grammar, ave, mats, generation, LOAD,
                    MATERIALS_FILE, FITNESSES, DEBUG, PRINT)
            parallelize_indivs(bind)
    counter = 0
    pounder = 0
    for ind in individuals:
        if ind.phenotype == None:
            bind = (1, fitness, ind, grammar, ave, mats, generation, LOAD,
                    MATERIALS_FILE, FITNESSES, DEBUG, PRINT)
            parallelize_indivs(bind)
            if ind.phenotype == None:
                counter += 1
        if ind.good == True:
            pounder += 1
    if counter:
        print "Number of individuals with no phenotype:",counter
开发者ID:mikefenton,项目名称:SEOIGE,代码行数:36,代码来源:evolver.py

示例11: main

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import apply_async [as 别名]
def main():
    search_dir = sys.argv[1]
    assert(os.path.isdir(search_dir))

    files_to_convert = []
    for directory_root, directory, filenames in os.walk(search_dir):
        for filename in filenames:
            full_filename = os.path.join(directory_root,filename)
            if full_filename.endswith('.entities.gz'):
                files_to_convert.append(full_filename)


            if full_filename.endswith('.generations.gz'):
                files_to_convert.append(full_filename)

    print 'Found %d files to convert' % len(files_to_convert)

    r = library.Reporter('converting files')
    r.set_total_count(len(files_to_convert))
    pool = Pool()
    for f in files_to_convert:
        pool.apply_async(convert_file, args=(f,), callback=r.increment_report_callback)

    pool.close()
    pool.join()
    r.done()
开发者ID:Kortemme-Lab,项目名称:sequence-tolerance,代码行数:28,代码来源:convert_new_seqtol_to_old.py

示例12: test_run

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import apply_async [as 别名]
def test_run(force=False):
    if not args.test_run and not force:
        return

    if not os.path.exists(dir_test):
        os.mkdir(dir_test)

    number_device = len(devices)
    if number_device < 1:
        error('Please ensure test device is connected')

    # Build test
    if args.test_drybuild:
        results = {}
        for command in test_suite:
            results[command] = []
            for suite in test_suite[command]:
                results[command].append('PASS')
    else:
        results = test_build(force=True)

    pool = Pool(processes=number_device)
    for index, device in enumerate(devices):
        pool.apply_async(_test_run_device, (index, results))
    pool.close()
    pool.join()
开发者ID:zhongzhuyang,项目名称:share,代码行数:28,代码来源:x64-upstream.py

示例13: test

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import apply_async [as 别名]
def test():
  pool = Pool(len(ip_list))
  for ip in ip_list:
    pool.apply_async(start_collect, (ip, '/letv/crawler_delta', './test_in'))
  print 'waiting for all file collector to finish...'
  pool.close()
  pool.join()
开发者ID:cfhb,项目名称:crawl_youtube,代码行数:9,代码来源:file_collector.py

示例14: runPortScan

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import apply_async [as 别名]
 def runPortScan(self, ips, threads, allports=''):
     results = []
     rep = re.match(r'(\w+)-(\w+)', allports)
     if rep:
         left = int(rep.group(1))
         right = int(rep.group(2)) + 1
         ports = xrange(left, right)
     else:
         if allports != '':
             allports = allports
         else:
             allports = self.default_port
         ports = allports.replace(' ', '').split(',')
     for ip in ips:
         pool = Pool(threads)
         for port in ports:
             pool.apply_async(func=self.simpleScan, args=(ip, str(port)), callback=self.callback)
         pool.close()
         pool.join()
         # print ''
         ipport = {}
         ipport['ip'] = ip
         ipport['port'] = self.openport
         results.append(ipport)
         self.openport = ''
         self.first = 0
     return results
开发者ID:jokeywhy,项目名称:domaildeal,代码行数:29,代码来源:portscan.py

示例15: recursive_download_dir

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import apply_async [as 别名]
def recursive_download_dir(seed, depth, dir, root):
	if not os.path.exists(root):
		os.mkdir(root);
	f = urllib.urlopen(seed+dir);
	text = f.read();

	parser = CaidaParser();
	parser.feed(text);
	
	p = Pool(5);
	
	for e in parser.file:
		for i in range(depth):
			print "--",
		print e;
		p.apply_async(download, args=(dir, e, seed+dir+e, root, ));
		#download(dir, e, seed+dir+e, root);
	
	p.close();
	p.join();
	
	for e in parser.dir:
		for i in range(depth):
			print "--",
		print e;
		if not os.path.exists(root+e):
			os.mkdir(root+dir+e);

		recursive_download_dir(seed, depth+1, dir+e, root)
开发者ID:johnsonyue,项目名称:downloader,代码行数:31,代码来源:downloader.py


注:本文中的multiprocessing.Pool.apply_async方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。