当前位置: 首页>>代码示例>>Python>>正文


Python ThreadPool.map_async方法代码示例

本文整理汇总了Python中multiprocessing.pool.ThreadPool.map_async方法的典型用法代码示例。如果您正苦于以下问题:Python ThreadPool.map_async方法的具体用法?Python ThreadPool.map_async怎么用?Python ThreadPool.map_async使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.pool.ThreadPool的用法示例。


在下文中一共展示了ThreadPool.map_async方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _parallel_execute

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map_async [as 别名]
def _parallel_execute(datasources, options, outs_dir, pabot_args, suite_names):
    original_signal_handler = signal.signal(signal.SIGINT, keyboard_interrupt)
    pool = ThreadPool(pabot_args['processes'])
    if pabot_args.get("vectors"):
        result = pool.map_async(execute_and_wait_with,
                    [(datasources,
                     outs_dir,
                     options,
                     suite,
                     pabot_args['command'],
                     pabot_args['verbose'],
                     vector)
                    for suite in suite_names
                    for vector in pabot_args['vectors']])
    else:
        result = pool.map_async(execute_and_wait_with,
                    [(datasources,
                     outs_dir,
                     options,
                     suite,
                     pabot_args['command'],
                     pabot_args['verbose'],
                     None)
                    for suite in suite_names])
    pool.close()
    while not result.ready():
        # keyboard interrupt is executed in main thread and needs this loop to get time to get executed
        try:
            time.sleep(0.1)
        except IOError:
            keyboard_interrupt()
    signal.signal(signal.SIGINT, original_signal_handler)
开发者ID:roamingunner,项目名称:rf-libs,代码行数:34,代码来源:pabot.py

示例2: got_password_entries

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map_async [as 别名]
 def got_password_entries(self):
     if GlobalState.options.no_password_policies:
         self.controller.show_panel(views.ChoosePasswordsPanel)
         return
     def check_password_update_endpoint(login):
         print time.time(), threading.current_thread()
         if not login.get('domain'):
             return None
         print "checking", login['domain']
         scheme = login['scheme'] if GlobalState.options.ssl_not_required else 'https'
         announce_url = "%s://%s/.well-known/password-policy" % (scheme, login['domain'])
         try:
             result = requests.get(announce_url, verify=True, allow_redirects=False, timeout=5)
         except Exception as e:
             print e
             return
         if result.status_code != 200:
             return
         try:
             data = yaml.load(result.content)
         except Exception as e:
             print e
             return
         if not type(data)==dict or not data.get('endpoint') or not data['endpoint'].startswith('/'):
             return
         login['rule'] = PasswordEndpointRule(login['domain'], announce_url, data)
         print "got", login['domain'], data
     def check_complete(results):
         print "DONE"
         wx.CallAfter(self.controller.show_panel, views.ChoosePasswordsPanel)
     pool = ThreadPool(processes=50)
     pool.map_async(check_password_update_endpoint, GlobalState.logins, callback=check_complete)
     pub.sendMessage('wait')
开发者ID:jcushman,项目名称:password_change_tool,代码行数:35,代码来源:main.py

示例3: addBounties

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map_async [as 别名]
def addBounties(bounties):
    """Add a list of bounties in parallel using multiprocessing.Pool for verification"""
    from multiprocessing.pool import ThreadPool
    pool = ThreadPool()
    safeprint("Mapping verifications", verbosity=3)
    async = pool.map_async(verify, bounties)  # defer this for possible efficiency boost
    internal = pool.map(internalVerify, bounties)
    safeprint("Waiting for verifications", verbosity=3)
    external = async.get()
    safeprint("Received verifications", verbosity=3)
    rvals = []
    safeprint(internal)
    safeprint(external)
    for i in range(len(bounties)):
        safeprint("Finishing the processing of bounty " + str(i+1) + "/" + str(len(bounties)), verbosity=2)
        if not internal[i]:
            rvals.append(-3)
        elif not external[i]:
            rvals.append(-2)
        elif bounties[i] in bountyList:
            rvals.append(-1)
        elif internal[i] == -1:
            rvals.append(0)
        else:
            rvals.append(1)
            addValidBounty(bounties[i])
        safeprint("Passed first if", verbosity=3)
    safeprint("Verifications parsed", verbosity=3)
    return rvals
开发者ID:gappleto97,项目名称:Senior-Project,代码行数:31,代码来源:bounty.py

示例4: main

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map_async [as 别名]
def main():
    ACCESS_TOKEN = os.getenv('MIXIA_ACCESS_TOKEN')
    if not ACCESS_TOKEN:
        raise FetchFailed("`MIXIA_ACCESS_TOKEN` not found.")

    user = account.MiXiaUser.from_access_token(ACCESS_TOKEN)
    client = user.mixia_client

    try:
        album_ids = sys.argv[1:]
    except Exception:
        raise FetchFailed("Album id not found.")

    for aid in album_ids:
        thread_pool = ThreadPool(processes=10)
        album = song.MiXiaAlbum.from_id(aid, client)
        thread_pool.map_async(lambda s: s.fetch_detail(client, consts.TRACK_HIGH_QUALITY),
                              album.songs)
        thread_pool.close()
        thread_pool.join()

        ensure_dir(str(album.album_id))
        album_logo_resp = requests.get(
            album.big_logo,
            headers={
                'User-Agent':
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'
            })
        album_logo_resp.raise_for_status()
        album_logo = album_logo_resp.content
        for s in album.songs:
            detail = s.track_detail
            filename = './{}/{}.mp3'.format(album.album_id, s.song_id)
            print filename
            subprocess.call(['wget', '-O', filename, detail.track_url])

            song_name = '{}_{}_{}'.format(detail.cd_serial, detail.track,
                                          detail.song_name.replace('/', '_'))

            if not eyed3:
                print "no eyed3, skip update ID3."
                os.rename(filename, os.path.join(album.album_id, song_name))
                continue

            song_id3 = eyed3.load(filename)
            song_id3.initTag()
            song_id3.rename(song_name)
            song_id3.tag.images.set(type_=3, img_data=album_logo, mime_type='image/jpeg')
            song_id3.tag.title = detail.song_name
            song_id3.tag.album = detail.album_name
            song_id3.tag.album_artist = detail.artist_name
            song_id3.tag.artist = detail.artist_name
            song_id3.tag.disc_num = (detail.cd_serial, album.cd_count)
            song_id3.tag.track_num = (detail.track, album.song_count)

            song_id3.tag.save()
开发者ID:realityone,项目名称:MiXiaBatch,代码行数:58,代码来源:album_fetcher.py

示例5: main

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map_async [as 别名]
def main(*xunitfile_and_result_dirs):
    tests = []
    for xunit_filename, result_dir in xunitfile_and_result_dirs:
        test_dir = os.path.dirname(os.path.abspath(xunit_filename))

        tree = ElementTree.parse(xunit_filename)
        root = tree.getroot()
        assemblies = root.findall('.//assembly')

        for filename in (node.attrib['filename'] for node in assemblies):
            tests.append((filename, test_dir, result_dir))

    threads = ThreadPool()
    threads.map_async(star_test, tests).get()
开发者ID:pombredanne,项目名称:metamorphosys-desktop,代码行数:16,代码来源:run_tests_console_output_xml_parallel.py

示例6: _parallel_execute

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map_async [as 别名]
def _parallel_execute(datasources, options, outs_dir, pabot_args, suite_names):
    original_signal_handler = signal.signal(signal.SIGINT, keyboard_interrupt)
    pool = ThreadPool(pabot_args['processes'])
    if (pabot_args.has_key("hostsfile")):
        hosts = [host.rstrip('\r\n') for host in open(pabot_args["hostsfile"])]
    else:
        hosts = None
    if pabot_args["verbose"]:
        print [(suite,host) for (suite,host) in TestsuitesHosts(suite_names, hosts)]
    result = pool.map_async(execute_and_wait_with,
               [(datasources,
                 outs_dir,
                 options,
                 suite,
                 pabot_args['command'],
                 pabot_args['verbose'],
                 host)
                for (suite,host) in TestsuitesHosts(suite_names, hosts)])
    pool.close()
    while not result.ready():
        # keyboard interrupt is executed in main thread and needs this loop to get time to get executed
        try:
            time.sleep(0.1)
        except IOError:
            keyboard_interrupt()
    signal.signal(signal.SIGINT, original_signal_handler)
开发者ID:tangkun75,项目名称:pabot,代码行数:28,代码来源:pabot.py

示例7: check_online_streams

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map_async [as 别名]
    def check_online_streams(self):
        self.all_streams_offline = True
        self.set_status(' Checking online streams...')

        done_queue   = queue.Queue()

        def check_stream_managed(args):
            url, queue = args
            status = self._check_stream(url)
            done_queue.put(url)
            return status

        pool = Pool(self.config.CHECK_ONLINE_THREADS)
        args = [(s['url'], done_queue) for s in self.streams]
        statuses = pool.map_async(check_stream_managed, args)
        n_streams = len(self.streams)

        while not statuses.ready():
            sleep(0.1)
            self.set_status(' Checked {0}/{1} streams...'.format(done_queue.qsize(), n_streams))
            self.s.refresh()

        statuses = statuses.get()
        for i, s in enumerate(self.streams):
            s['online'] = statuses[i]
            if s['online']:
                self.all_streams_offline = False

        self.refilter_streams()
        self.last_autocheck = int(time())

        pool.close()
开发者ID:drunkrx,项目名称:livestreamer-curses,代码行数:34,代码来源:streamlist.py

示例8: launch_parallel_tests

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map_async [as 别名]
    def launch_parallel_tests(self):
        image_name = "django_parallel_tests/%s" % self.project_name
        if len(self.docker.images(name=image_name)) == 0:
            self.build_image()

        req_hash = hashlib.sha224(str(sorted(self.requirements))).hexdigest()
        try:
            last_req_hash = open(".last_requirements").read().strip()
        except:
            last_req_hash = None

        if req_hash != last_req_hash:
            self.build_image()
            with open(".last_requirements", "w") as f:
                f.write(req_hash)

        pool = ThreadPool()
        tests = [[test] for test in self.tests]
        run_tests = partial(run_tests_for_project, self.project_name)

        result = pool.map_async(run_tests, tests)
        try:
            while True:
                time.sleep(0.1)
                if result.ready():
                    print "got result", result.get()
                    return
        except KeyboardInterrupt:
            pool.terminate()
            pool.join()
        else:
            pool.close()
            pool.join()
开发者ID:Gautier,项目名称:django_parallel_tests,代码行数:35,代码来源:runner.py

示例9: ThreadMailer

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map_async [as 别名]
class ThreadMailer(object):

    def __init__(self, message, mailer):
        self.message = message
        self.mailer = mailer
        self.threadPool = ThreadPool()

    def initMessage(self, *args, **kwargs):
        message = Message(*args, **kwargs)
        return message

    def initMailer(self, host, user, pwd):
        mailer = Mailer(host)
        mailer.login(user, pwd)
        return mailer

    def sendEmail(self, receiver=None):
        if receiver:
            self.message.To = receiver
        result = self.mailer.send(self.message)
        return result

    def send(self):
        result = self.threadPool.map_async(self.sendEmail, self.message.To)
        _g = None
        try:
	    _g = result.get()
        except Exception, e:
            logger.error("send mail error.")
        return _g
开发者ID:wangxunxun,项目名称:tornadoapp,代码行数:32,代码来源:mailer.py

示例10: get

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map_async [as 别名]
    def get(self):
        key = self.request.get('key')
        if key:
            next_key, result = channel.read(key)
            response = json.dumps(dict(
                next_key=next_key,
                result=result,
            ))
            self.response.write(response)
            return

        handles = self.request.get_all('handles')
        key = channel.create()
        pool = ThreadPool(len(handles))
        pool.map_async(lambda handle: get_last_tweet(key, handle), handles)
        response = dict(next_key=key)
        self.response.write(json.dumps(response))
开发者ID:christopherhesse,项目名称:python-longpoll-example,代码行数:19,代码来源:channel-server.py

示例11: index

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map_async [as 别名]
def index(request):
    """
    Pretty much runs a Map-Reduce job on the Title-Value Sets
    
    return_objs looks like:
    [
        {T1: [V1, V2, V3]},
        {T2: [V1, V2, V3]},
        {T3: [V1, V2, V3]},
        {T4: [V1, V2, V3]},
    ]
    """
    # Limit represents the number of times the title occurs on amazon.com's query sets. Used for filtering by weight. 
    return_objs = []
    touched_titles = []
    limit = 1000
    
    def append_title(title):
        """
        Appends the title to the array along with the value set belonging to it. 
        """
        touched_titles.append(title)
        touched_values = []
        
        matched_values = Value.objects.filter(query_title__name=title).exclude(name=None).values("name")
        
        if len(matched_values) <= limit:
            print "Returned"
            return
        
        def append_value(value):
            touched_values.append(value["name"])

        # Appends the value to the value set if it's not None. 
        map(append_value, filter(lambda x: x != "None", matched_values.values()))
        
        # Sets the value set of the title if the value set is not empty.  
        if len(touched_values) > 0:
            touched_values = list(set(touched_values))
            touched_values.append(unicode("None", "utf-8"))
            print "Added Values"
            return_objs.append({
                title: reversed(touched_values)
                })

    # Loads all values for the queried titles into RAM
    titles = Title.objects.all().exclude(name=None).prefetch_related('values').values('name').distinct()
    # Makes a unique set of title strings 
    titles = list(set([title["name"] for title in titles]))
    
    # Multithreads the queried titles to grab each value set for the title.
    pool = ThreadPool()
    res = pool.map_async(append_title, titles)
    m = res.get()
    pool.close()
    
    # Returns the object to the Django Template as a dinctionary. 
    return render_to_response("list.html", dict(list_titles=return_objs), context_instance=RequestContext(request))
开发者ID:RobertErvin,项目名称:amazon-project,代码行数:60,代码来源:views.py

示例12: get_list

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map_async [as 别名]
    def get_list(self):
        def validate_distro(distro):
            if check_url_path(distro['path']):
                return distro['name']

        n_processes = len(self.distros.keys())
        pool = ThreadPool(processes=n_processes)
        map_res = pool.map_async(validate_distro, self.distros.values())
        pool.close()
        pool.join()
        res = list(set(map_res.get()) - set([None]))
        return sorted(res)
开发者ID:SkyWei,项目名称:kimchi,代码行数:14,代码来源:config.py

示例13: _parallel_execute

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map_async [as 别名]
def _parallel_execute(items, processes):
    original_signal_handler = signal.signal(signal.SIGINT, keyboard_interrupt)
    pool = ThreadPool(processes)
    result = pool.map_async(execute_and_wait_with, items, 1)
    pool.close()
    while not result.ready():
        # keyboard interrupt is executed in main thread
        # and needs this loop to get time to get executed
        try:
            time.sleep(0.1)
        except IOError:
            keyboard_interrupt()
    signal.signal(signal.SIGINT, original_signal_handler)
开发者ID:mkorpela,项目名称:pabot,代码行数:15,代码来源:pabot.py

示例14: run

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map_async [as 别名]
    def run(self):
        if not self.platforms:
            raise RuntimeError("No enabled platform to build on")
        self.set_build_image()

        thread_pool = ThreadPool(len(self.platforms))
        result = thread_pool.map_async(self.select_and_start_cluster, self.platforms)

        try:
            result.get()
        # Always clean up worker builds on any error to avoid
        # runaway worker builds (includes orchestrator build cancellation)
        except Exception:
            thread_pool.terminate()
            self.log.info('build cancelled, cancelling worker builds')
            if self.worker_builds:
                ThreadPool(len(self.worker_builds)).map(
                    lambda bi: bi.cancel_build(), self.worker_builds)
            while not result.ready():
                result.wait(1)
            raise
        else:
            thread_pool.close()
            thread_pool.join()

        annotations = {'worker-builds': {
            build_info.platform: build_info.get_annotations()
            for build_info in self.worker_builds if build_info.build
        }}

        self._apply_repositories(annotations)

        labels = self._make_labels()

        fail_reasons = {
            build_info.platform: build_info.get_fail_reason()
            for build_info in self.worker_builds
            if not build_info.build or not build_info.build.is_succeeded()
        }

        workspace = self.workflow.plugin_workspace.setdefault(self.key, {})
        workspace[WORKSPACE_KEY_UPLOAD_DIR] = self.koji_upload_dir
        workspace[WORKSPACE_KEY_BUILD_INFO] = {build_info.platform: build_info
                                               for build_info in self.worker_builds}

        if fail_reasons:
            return BuildResult(fail_reason=json.dumps(fail_reasons),
                               annotations=annotations, labels=labels)

        return BuildResult.make_remote_image_result(annotations, labels=labels)
开发者ID:projectatomic,项目名称:atomic-reactor,代码行数:52,代码来源:build_orchestrate_build.py

示例15: execute

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import map_async [as 别名]
def execute(filename):
    def run(size):
        # for size in size_block:
        cmd = ['cjpeg', '-q', '-n {}'.format(size),
               '--no-save', '{}'.format(filename)]

        try:
            process = subprocess.Popen(cmd)
            process.wait()
        except Exception:
            pass

    runner = ThreadPool(processes=_MAX_THREADS)
    result = runner.map_async(run, size_block)
    result.wait()
开发者ID:Ziul,项目名称:custom-jpeg,代码行数:17,代码来源:tests.py


注:本文中的multiprocessing.pool.ThreadPool.map_async方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。