当前位置: 首页>>代码示例>>Python>>正文


Python ThreadPool.map方法代码示例

本文整理汇总了Python中multiprocessing.pool.ThreadPool.map方法的典型用法代码示例。如果您正苦于以下问题:Python ThreadPool.map方法的具体用法?Python ThreadPool.map怎么用?Python ThreadPool.map使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.pool.ThreadPool的用法示例。


在下文中一共展示了ThreadPool.map方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_next_n

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map [as 别名]
def get_next_n(conn, limit=0):
    """Gets duplicate ids for n bugs and stores it in PostgreSQL database.

    Args:
        conn: Psycopg2 connection to PostgreSQL database.
        limit (int): Number of bugs to get duplicates for.

    Returns:
        None.
    """
    cur = conn.cursor()
    cur.execute("SET TIME ZONE 'UTC';")

    query = """
            SELECT f.id
            FROM final f
                LEFT OUTER JOIN duplicates d
                    ON f.id = d.id
            WHERE f.resolution_final = 'duplicate'
                AND d.id IS NULL
            LIMIT {}
            """.format(limit)
    cur_report = conn.cursor()
    cur_report.execute(query)

    # use threads to parallelize API requests
    pool = ThreadPool(10)
    bugs = cur_report.fetchall()
    get_bug_desc_conn = partial(get_duplicate_info, conn)
    pool.map(get_bug_desc_conn, bugs)
    pool.close()
    return
开发者ID:alex-miller-0,项目名称:bugs,代码行数:34,代码来源:get_duplicates.py

示例2: analyze_commits

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map [as 别名]
def analyze_commits(project_name, target_repo, existing_target_branches, fork_list):
    print 'Analyzing commits'

    pool = ThreadPool(processes=10)

    existing_target_commits = []

    for fork_repo in fork_list:
        for target_branch in existing_target_branches:

            print '    Analyzing %s (branch: %s) ' % (fork_repo.full_name, target_branch),
            fork_repo_commits = fork_repo.get_commits(sha=target_branch)

            max_commits_to_analyze = 30
            analyzed_commits = 0

            fork_commits_to_analyze = []

            for fork_comm in fork_repo_commits:
                if analyzed_commits == max_commits_to_analyze:
                    break

                fork_commits_to_analyze.append(fork_comm)

                analyzed_commits += 1

            partial_c_in_root = functools.partial(commit_is_in_root,
                                                  existing_target_commits,
                                                  target_repo, fork_repo)

            pool.map(partial_c_in_root, fork_commits_to_analyze)
            print
开发者ID:andresriancho,项目名称:w3af-misc,代码行数:34,代码来源:fork_info.py

示例3: main

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map [as 别名]
def main(dir_path, outfile_path, is_journal=True):
    pn = 20
    flst = os.listdir(dir_path)
    arglst = []
    ret = dict()
    for i in range(pn):
        beg = int(math.ceil(float(len(flst)) / pn * i))
        end = int(math.ceil(float(len(flst)) / pn * (i + 1)))
        if(id == 0):
            beg = 0
        if(id == pn - 1):
            end = (len(flst))
        arglst.append([dir_path, is_journal, beg, end, i, ret])
    pool = ThreadPool(pn)
    pool.map(job_map, arglst)
    pool.close()
    pool.join()
    print(80 * '=')
    print('[acmdl]: map finished')
    print(80 * '=')
    job_reduce(ret, outfile_path)
    print(80 * '=')
    print('[acmdl]: reduce finished')
    print(80 * '=')
    return
开发者ID:dragonxlwang,项目名称:s3e,代码行数:27,代码来源:acm_proc.py

示例4: run_test_case_list

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map [as 别名]
  def run_test_case_list(
      self, test_case_list, max_concurrent, timeout_ok=False,
      max_retries=0, retry_interval_secs=5, full_trace=False):
    """Run a list of test cases.

    Args:
      test_case_list: [list of OperationContract] Specifies the tests to run.
      max_concurrent: [int] The number of cases that can be run concurrently.
      timeout_ok: [bool] If True then individual tests can timeout and still
         be considered having a successful AgentOperationStatus.
      max_retries: [int] Number of independent retries permitted on
         individual operations if the operation status fails. A value of 0
         indicates that a test should only be given a single attempt.
      retry_interval_secs: [int] Time between retries of individual operations.
      full_trace: [bool] If True then provide detailed execution tracing.
    """
    num_threads = min(max_concurrent, len(test_case_list))
    pool = ThreadPool(processes=num_threads)
    def run_one(test_case):
      """Helper function to run individual tests."""
      self.run_test_case(
          test_case=test_case, timeout_ok=timeout_ok,
          max_retries=max_retries, retry_interval_secs=retry_interval_secs,
          full_trace=full_trace)

    self.logger.info(
        'Running %d tests across %d threads.',
        len(test_case_list), num_threads)
    pool.map(run_one, test_case_list)
    self.logger.info('Finished %d tests.', len(test_case_list))
开发者ID:jtk54,项目名称:citest,代码行数:32,代码来源:agent_test_case.py

示例5: StartInfrastructure

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map [as 别名]
	def StartInfrastructure(inf_id, auth):
		"""
		Start all virtual machines in an infrastructure previously stopped.

		Args:

		- inf_id(str): infrastructure id.
		- auth(Authentication): parsed authentication tokens.

		Return(str): error messages; empty string means all was ok.
		"""

		InfrastructureManager.logger.info("Starting the infrastructure id: " + str(inf_id))

		sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth)
		exceptions = []
		if Config.MAX_SIMULTANEOUS_LAUNCHES > 1:
			pool = ThreadPool(processes=Config.MAX_SIMULTANEOUS_LAUNCHES)
			pool.map(
				lambda vm: InfrastructureManager._start_vm(vm, auth, exceptions), 
				reversed(sel_inf.get_vm_list())
				)
		else:
			for vm in sel_inf.get_vm_list():
				InfrastructureManager._start_vm(vm, auth, exceptions)

		if exceptions:
			msg = ""
			for e in exceptions:
				msg += str(e) + "\n"
			raise Exception("Error starting the infrastructure: %s" % msg)

		InfrastructureManager.logger.info("Infrastructure successfully restarted")
		return ""
开发者ID:lxhiguera,项目名称:im,代码行数:36,代码来源:InfrastructureManager.py

示例6: test

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map [as 别名]
def test(my_test_cases):
    try:
        logfile = edex_tools.find_latest_log()
    except OSError as e:
        log.error('Error fetching latest log file - %s', e)
        return {}

    total_timeout = 0
    count = 0
    sc = {}
    pool = ThreadPool(MAX_THREADS)

    pool.map(execute_test, my_test_cases)

    for tc in my_test_cases:
        total_timeout += tc.timeout
        count += tc.count

    # wait for all ingestion to complete
    if not edex_tools.watch_log_for('EDEX - Ingest complete for file', logfile=logfile,
                                    expected_count=count, timeout=total_timeout):
        log.error('Timed out waiting for ingest complete message')

    log.info('All files ingested, testing results')

    for tc in pool.map(evaluate_test_case, test_cases):
        sc.update(tc)

    return sc
开发者ID:emilyhahn,项目名称:ooi-tools,代码行数:31,代码来源:validate_dataset.py

示例7: _load_lyrics

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map [as 别名]
    def _load_lyrics(self, songdict):
        total = []
        for songlist in songdict.values():
            total += songlist

        pool = ThreadPool()
        pool.map(Song.load, total)
开发者ID:ajm188,项目名称:fugl,代码行数:9,代码来源:tswizzle.py

示例8: test_multiple_concurrent_request_on_same_client

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map [as 别名]
    def test_multiple_concurrent_request_on_same_client(self):
        import time
        from multiprocessing.pool import ThreadPool

        send_msg = u'test ฟนำีฟนำีฟนำีฟนำี'
        server_start = Event()

        def on_message(msg):
            self.assertEqual(msg, send_msg)

        def server():
            PostofficeServer(ip='0.0.0.0', port=4000, on_message=on_message, after_start_cb=lambda: server_start.set())

        from multiprocessing import Process
        p = Process(target=server)
        try:
            p.start()

            server_start.wait()

            c = PostofficeClient(ip='localhost', port=4000)

            def client(ith):
                c.send(send_msg)

            pool = ThreadPool(100)
            pool.map(client, [i for i in range(1000)])

            p.terminate()
            p.join()
        except Exception as e:
            # gracefully stop
            p.terminate()
            p.join()
            raise
开发者ID:phizaz,项目名称:gaw,代码行数:37,代码来源:test_postoffice.py

示例9: main

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map [as 别名]
def main():
    # Run the Tales 
    pool = ThreadPool(processes=int(tcfg['Workers'].get('pool_size', 10)))
    pool = ThreadPool()
    pool.map(worker, tales)
    pool.close()
    pool.join()
开发者ID:nickmaccarthy,项目名称:Tattle,代码行数:9,代码来源:tattled.py

示例10: sync_with_s3

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map [as 别名]
    def sync_with_s3(self):
        """
        Walk through our self.local_files list, and match them with the list
        of keys in the S3 bucket.
        """
        # Create a list to put all the files we're going to update
        self.update_list = []

        # Figure out which files need to be updated and upload all these files
        logger.debug("Comparing {} local files with {} bucket files".format(
            len(self.local_file_list),
            len(self.s3_obj_dict.keys())
        ))
        if self.no_pooling:
            [self.compare_local_file(f) for f in self.local_file_list]
        else:
            cpu_count = multiprocessing.cpu_count()
            logger.debug("Pooling local file comparison on {} CPUs".format(cpu_count))
            pool = ThreadPool(processes=cpu_count)
            pool.map(self.compare_local_file, self.local_file_list)

        logger.debug("Uploading {} new or updated files to bucket".format(len(self.update_list)))
        if self.no_pooling:
            [self.upload_to_s3(*u) for u in self.update_list]
        else:
            logger.debug("Pooling s3 uploads on {} CPUs".format(cpu_count))
            pool = ThreadPool(processes=cpu_count)
            pool.map(self.pooled_upload_to_s3, self.update_list)
开发者ID:datadesk,项目名称:django-bakery,代码行数:30,代码来源:publish.py

示例11: _fetch_all_packages

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map [as 别名]
    def _fetch_all_packages():
        proxy = xmlrpclib.ServerProxy(options.url)
        results = proxy.search({'keywords': BASE_KEYWORD})

        last_update_pool = ThreadPool(processes=10)
        if results:
            last_update_pool.map(_fetch_last_update, results)
开发者ID:patrickdepinguin,项目名称:tgwebsite,代码行数:9,代码来源:cogbin.py

示例12: dowload_person

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map [as 别名]
def dowload_person(person_url):
    print 'start to downlaod person %s\n'%(person_url)
    person_pic_url = get_person_pic_url_Set(person_url)
    pool = ThreadPool(8)
    pool.map(download_pic,person_pic_url)
    pool.close()
    pool.join()
开发者ID:wusphinx,项目名称:mmCrawler,代码行数:9,代码来源:mm_crawler.py

示例13: run_trials

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map [as 别名]
def run_trials():
    numTrials = 3000
    gens = 1000
    from multiprocessing.pool import ThreadPool as Pool
    pool = Pool(50)

    jids = pool.map(f,[gens]*numTrials)
    print "Done spawning trials. Retrieving results..."

    results = pool.map(cloud_result, jids)
    firstLocusFreqsHists = zeros((numTrials,gens+1), dtype='float')
    lastLocusFreqsHists = zeros((numTrials,gens+1), dtype='float')
    print "Done retrieving results. Press Enter to serialize..."

    raw_input()

    for i, result in enumerate(results):
        firstLocusFreqsHists[i, :], lastLocusFreqsHists[i, :] = result

    with closing(FileStorage("soda_results.durus")) as durus:
        conn = Connection(durus)
        conn.get_root()[str(int(floor(time.time())))] = (firstLocusFreqsHists, lastLocusFreqsHists)
        conn.commit()

    pool.close()
    pool.join()
开发者ID:burjorjee,项目名称:speedyGApy,代码行数:28,代码来源:soda14.py

示例14: _getGraphiteData

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map [as 别名]
    def _getGraphiteData(self):

        def getData(metric):
            url = self._createGraphiteRequest(metric)

            s_time = time.time()
            try:
                res = urllib2.urlopen(url)
                data = yaml.load(res)
            except:
                pass 
            finally:
                e_time = time.time() - s_time
                e_time = int(e_time * 1000)
                print "{time}ms/t{url}".format(time = e_time, url = url)

            if len(data) and 'datapoints' in data[0]:
                data = filter(lambda d: d[0] != None, data[0]['datapoints'])

                if data:
                    len_data = len(data)
                    data = [ v / len_data for v in reduce(lambda x, y: [ x[0] + y[0], x[1] + y[1] ], data) ]
                    metric.update({ 'value': data[0], 'time': data[1] })

        pool = Pool(processes=50)
        pool.map(getData, self.metrics)
        pool.close()
        pool.join()
开发者ID:sdgdsffdsfff,项目名称:graphite--zabbix,代码行数:30,代码来源:graphite-zabbix.py

示例15: download_dumps

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map [as 别名]
def download_dumps(dump_links, config):
    """
    Given a list of direct links to character dumps, this will grab each dump
    and save it to disk in DUMPDIR as <some random guid>.dmp.
    """
    destination_dir = config.dumpdir

    def write_dump(dump):
        filename = str(uuid.uuid4()) + "." + DUMP_EXT
        f = open(os.path.join(destination_dir, filename), "w")
        f.write(dump)
        f.close()

    def download_dump(dump_link):
        dump_page = requests.get(dump_link).text
        soup = BeautifulSoup(dump_page)
        dump = soup.find("pre").text
        write_dump(dump)

    print "Dumping {0} files to {1} using {2} threads.".format(
        len(dump_links), destination_dir, config.concurrency
    )

    if config.concurrency > 1:
        from multiprocessing.pool import ThreadPool
        pool = ThreadPool(processes=config.concurrency)
        pool.map(download_dump, dump_links)
    else:
        map(download_dump, dump_links)
开发者ID:MichaelDiBernardo,项目名称:silscraper,代码行数:31,代码来源:sil_scraper.py


注:本文中的multiprocessing.pool.ThreadPool.map方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。