当前位置: 首页>>代码示例>>Python>>正文


Python ThreadPool.wait方法代码示例

本文整理汇总了Python中threadpool.ThreadPool.wait方法的典型用法代码示例。如果您正苦于以下问题:Python ThreadPool.wait方法的具体用法?Python ThreadPool.wait怎么用?Python ThreadPool.wait使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在threadpool.ThreadPool的用法示例。


在下文中一共展示了ThreadPool.wait方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: startWork

# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import wait [as 别名]
 def startWork(self, work, argsList, resultCallback=None):
   try:
     requests = makeRequests(work, argsList, resultCallback, None)
     job = ThreadPool(self.threadNum)
     for req in requests:
       job.putRequest(req)
     job.wait()
   except:
     print sys.exc_info()
开发者ID:JunfeiYang,项目名称:Python_project,代码行数:11,代码来源:login.py

示例2: bfTest

# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import wait [as 别名]
def bfTest():
    pool = ThreadPool(100)
    for j in range(100):
        alltime = []
        for i in range(bingfa):
            work = WorkRequest(threads, args=(int(random.random() * portnum) % portnum,))
            pool.putRequest(work)
            sleep((1.0 / bingfa) * random.random())
            # threading.Thread(target=threads, args=(i % portnum,)).start()
        pool.wait()
        printdata(alltime)
开发者ID:dabeike,项目名称:waf,代码行数:13,代码来源:test.py

示例3: ConcurrentTestPool

# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import wait [as 别名]
class ConcurrentTestPool(Singleton):
    @synchronized_self
    def init(self):
        self.pool = ThreadPool(multiprocessing.cpu_count())

    @synchronized_self
    def put(self, callable_, args=None, kwds=None):
        self.pool.putRequest(WorkRequest(callable_, args=args, kwds=kwds))

    def join(self):
        self.pool.wait()
开发者ID:chrisforbes,项目名称:piglit,代码行数:13,代码来源:threads.py

示例4: same_ms

# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import wait [as 别名]
def same_ms(product_id):
	data = {'product_id': product_id, 'address_id': '72858'}
	url = 'http://payment.ohsame.com/order_create'
	
	time_s = time.time()
	pool = ThreadPool(20)
	reqs = makeRequests(same_ms_req, [((url, data), {}) for i in range(200)], same_ms_callback)
	[pool.putRequest(req) for req in reqs]
	pool.wait()
	time_e = time.time()

	print('秒杀商品:%s\n' % str(product_id))
	print('秒杀结果:%s\n' % rs_ms)
	print('秒杀耗时:%s\n' % (time_e-time_s))
开发者ID:litterzhang,项目名称:same,代码行数:16,代码来源:same.py

示例5: refresh_tunnels

# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import wait [as 别名]
def refresh_tunnels(args):
    tunnels = db.store.find(Tunnel)
    if tunnels:
        pool = ThreadPool(tunnels.count())
        for tunnel in tunnels:
            request = WorkRequest(tunnel.check_available)
            pool.putRequest(request)

        pool.wait()
        
    for tunnel in tunnels:
        host = db.store.get(Host, tunnel.hostid)
        record = AvailabilityRecord.register(host, tunnel, check=False)
        print record
开发者ID:ZachGoldberg,项目名称:Personal-Cluster-Manager,代码行数:16,代码来源:refresh_tunnels.py

示例6: start_thread

# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import wait [as 别名]
 def start_thread(self):
     args_list = []
     ips = self.parse_ip()
     for ip in ips:
         args = self.args.copy()
         args['ip'] = ip
         args_list.append(args)
     self.cui.w('Proxy Scanner started')
     self.cui.i('Nums: %s' % len(args_list))
     self.cui.i('Port: %s' % self.args['port'])
     self.cui.i('Thread: %s' % self.args['thread'])
     pool = ThreadPool(self.args['thread'])
     reqs = makeRequests(self.run, args_list)
     [pool.putRequest(req) for req in reqs]
     pool.wait()
开发者ID:green23,项目名称:ProxyValidator,代码行数:17,代码来源:ProxyValidator.py

示例7: start_work

# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import wait [as 别名]
 def start_work(
     self,
     work,
     args_list,
     result_callback=None,
     ):
     outputs = dict()
     try:
         requests = makeRequests(work, args_list, result_callback,
                                 None)
         job = ThreadPool(self.thread_num)
         for req in requests:
             req.outputs = outputs
             job.putRequest(req)
         job.wait()
     except:
         traceback.print_exc()
     return outputs
开发者ID:younglibin,项目名称:mytestcode,代码行数:20,代码来源:login.py

示例8: run_example

# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import wait [as 别名]
def run_example():
    num_workers = 3
    pool = ThreadPool(num_workers)

    # This variable will tell us whether all threads worked or not. Stored in
    # an object (list) otherwise the inner definition cannot modify it.
    success = [True]

    # The exception handler is not required, but if it's not used the error
    # will be silent.
    def exc_handler(work_request, exc_info):
        mldb.log(traceback.format_tb(exc_info[2]))
        exception_type = exc_info[0]
        exception_message = exc_info[1]
        mldb.log(str(exception_type) + ': ' + str(exception_message))
        success[0] = False

        # If there is an error, stop all threads as soon as possible
        pool.dismissWorkers(num_workers, do_join=True)

    # makeRequests takes, as a second argument, a list of tuples where the
    # first element is *args and the second one is **kwargs and where each
    # tuple represents a job to run.
    #
    # Here we schedule two jobs.
    requests = makeRequests(some_func, [(['patate'], {}), (['orange'], {})],
                            exc_callback=exc_handler)
    [pool.putRequest(req) for req in requests]

    # pool.wait will raise an exception if an error occurs in an early jobs and
    # more jobs need to be run. It's ok, if there is an error we want to stop
    # anyway.
    pool.wait()

    # If the error occurred in one of the last jobs pool.wait will have worked
    # so we need to check it anyway.
    if not success[0]:
        mldb.log("An error occured")
        return

    # It is important (MLDBFB-470) to properly dismiss the workers
    pool.dismissWorkers(num_workers, do_join=True)
    mldb.log("Out of main thread")
开发者ID:BenjaminYu,项目名称:mldb,代码行数:45,代码来源:threading_example.py

示例9: run

# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import wait [as 别名]
    def run(self):

        page = self.getPage(baseURL)
        linksTitleDict = self.getTitleList(page)

        # for i, link in enumerate(articleLinks):
        #     print('parsing links in title: ' + articleTitle[i])
        #     links = self.getHomePageList(link)

        pool = ThreadPool(10)

        requests = makeRequests(self.getHomePageList, linksTitleDict.keys())
        #[pool.putRequest(req) for req in requests]
        for req in requests:
            pool.putRequest(req)
            print(req)
        pool.wait()

        for link in self.links:
            print(link)
开发者ID:liukai545,项目名称:Algorithms-in-Python,代码行数:22,代码来源:qunfenxiang.py

示例10: run_prod

# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import wait [as 别名]
def run_prod():
    cycle_count=1
    main = ThreadPool(num_workers=PARSER_THREAD_COUNT)
    while True:
        ADMIN_LOGGER.info("Starting cycle : "+str(cycle_count))
        reload(P_ROOT)
        process_list = [[e, __import__(P_ROOT.__name__ + '.' + e + '.main', fromlist=e)]  for e in P_ROOT.__all__]
        process_dict=dict(process_list)
        ADMIN_LOGGER.info("Executing process list : "+str(process_dict.items()))
        for proc_name in process_dict.keys():                
                proc=getattr(process_dict.get(proc_name),'Parser','None')
                main.putRequest(WorkRequest(proc_runner, args=(1,proc),callback=None))
                ADMIN_LOGGER.info("Started thread : "+proc_name)
                try:                    
                    main.poll()
                except NoResultsPending:
                        pass
                except :
                    ADMIN_LOGGER.error(traceback.format_exc())        
        main.wait()
        ADMIN_LOGGER.info("Sleeping for default LISTING_SLEEP_TIME : "+str(GLOBAL_SLEEP_TIME))
        time.sleep(GLOBAL_SLEEP_TIME)
        cycle_count= 1 if cycle_count > 9999 else cycle_count+1
开发者ID:anaved,项目名称:jobparser,代码行数:25,代码来源:proc_run.py

示例11: Ether

# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import wait [as 别名]
    pkt = Ether(src="12:34:56:78:90:BE",dst=dl_dst)
    pkt /= inet.IP(dst='10.0.0.255', src='10.0.0.3')
    pkt /= inet.ICMP()
    sendpfast(pkt,file_cache=True)


def send_udp6():
    pkt = Ether(src="12:34:56:78:90:AE", dst=dl_dst)
    pkt /= inet6.IPv6(src='2001::2',dst='2001::4')
    pkt /= inet.UDP(sport=8888,dport=8888)
    sendp(pkt)


if __name__ == '__main__':
    pool = ThreadPool(24)
    pkts = []
    N = 100000
    if len(sys.argv) == 2:
            N = int(sys.argv[-1])

    #for _ in range(N):
    #    pkt = generate_pkt(generate_dl_src(10000),generate_ipv6_src(1))
    #    pkts.append(pkt)
    for _ in range(100):
        reqs = makeRequests(session_start,range(N/100))
        [pool.putRequest(req) for req in reqs]
        time.sleep(2)

    pool.wait()

开发者ID:zy-sdn,项目名称:savi-test,代码行数:31,代码来源:host_spoof.py

示例12: __init__

# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import wait [as 别名]
class MMCrawler:

    def __init__(self,threads,output,limits):
        self.threads = threads #任务线程数
        self.output = output #图片保存目录
        self.limits = limits  #抓取图片数量限制
        self.tasks = 0 #已完成任务数
        self.stop = False #停止任务
        self.threadpool = ThreadPool(self.threads) #初始化线程池

    def recursion_category(self,category_index_page):
        """递归抓取所有分类页面
        :param category_index_page: 分类首页地址
        """
        while(True):
            if self.stop == True: break;
            self.fetch_category_page(category_index_page) #抓取当前页
            nextpage = self.get_next_category_page(category_index_page) #获得下一页地址
            if nextpage != "":
                self.recursion_category(nextpage)
            else:
                break

    def fetch_category_page(self,category_page_url):
        """抓取分类页中的所有图片
        :param category_page_url: 分页类地址
        """
        suites  = self.fetch_suites(category_page_url) 
        print "当前分类页中套图数量:",len(suites)
        for suite_url in suites:
            suite_images = self.fetch_suite_images(suite_url) 
            print "当前套图中图片数量:",len(suite_images)
            for image_url in suite_images:
                if not self.stop:
                    self.fetch_image(image_url)
                else:
                    return

    def get_next_category_page(self,category_page_url):
        """获取分类的下一页地址
        :param category_page_url: 分类页地址
        """
        html            = helper.http_get(category_page_url)
        next_page_re    = re.compile('<a href=\'(index_\\d+.html)\'>></a>')
        next_page       = next_page_re.findall(html)
        if len(next_page) > 0:
            return urlparse.urljoin(category_page_url,next_page[0])
        return ""

    def fetch_suites(self,category_page_url):
        """抓取分类页中的所有套图
        :param category_page_url: 分类页地址
        """
        html = helper.http_get(category_page_url)
        real_urls = []
        link_re   = re.compile('<a.?href="(.*?)"')
        link_list = link_re.findall(html)
        for link in link_list:
            realurl = urlparse.urljoin(category_page_url,link)
            #过滤掉站外链接
            if  realurl.endswith(".html") and (realurl.find('/top/') == -1) :
                real_urls.append(realurl)
        return real_urls

    def fetch_suite_images(self,pageurl):
        """获取套图中的所有图片列表
        :param pageurl: 套图首页地址
        """
        html = helper.http_get(pageurl)
        if html == "":  return []
        #获得图片数量
        image_count_re  = re.compile('</span>/(\\d+)</strong><a href=')
        image_count     = image_count_re.findall(html)
        image_count     = len(image_count) == 1 and image_count[0] or 0
        #分析:最后一页中包含了所有的图片地址,因此抓取最后一页即可
        pageurl = pageurl[0:-5] + '-' + image_count + '.html'
        html    = helper.http_get(pageurl)
        if html == "":  return []
        #解析出所有图片地址
        link_re   = re.compile('\]="(.*?)"')
        link_list = link_re.findall(html)
        link_list = map(lambda url: url.replace("big","pic"),link_list)
        return link_list

    def fetch_image(self,image_url):
        """抓取网络图片
        :param image_url: 图片地址
        """
        if not self.__completed():
            #创建图片保存目录
            output_dir,filename = helper.make_subdir(self.output,image_url)
            realpath = os.path.join(output_dir,filename)
            #将抓取任务放入线程池
            self.threadpool.putRequest(WorkRequest(self.__retrieve_net_image,[realpath,image_url]))
        else:
            self.threadpool.wait()

    def __retrieve_net_image(self,realpath,image_url):
        """将网络图片并保存到本地目录(该函数在将会多线程访问)
        :param realpath: 保存到本地的绝对路径
#.........这里部分代码省略.........
开发者ID:misoag,项目名称:mm_crawler,代码行数:103,代码来源:mm_crawler.py

示例13: len

# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import wait [as 别名]
                    photo_found = True
                    break

            if photo_found:
                continue
            else:
                print 'Queuing photo %i/%i, %s of album %s for moving.' % (photo_id + 1, len(all_photos), photo.get('title'), set_title)

            p_album = None
            for album in picasa_albums:
                if int(album.numphotosremaining.text) > 0:
                    album.numphotosremaining.text = str(int(album.numphotosremaining.text) - 1)
                    p_album = album
                    break
        
            req = WorkRequest(move_photo, [photo, p_album], {})
            threadpool.putRequest(req)
       
    
    threadpool.wait()
    
    
if __name__ == "__main__":
    
    print """
    This script will move all the photos and sets from flickr over to picasa. 
    That will require getting authentication information from both services...
    """
    random.seed(time.time())
    do_migration()
开发者ID:cthompso,项目名称:flickasa,代码行数:32,代码来源:flickasa.py

示例14: getIpHostName

# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import wait [as 别名]
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# check ip is spider  /  [email protected]  /  2012-03-22 / v0.2
#
import time
import socket
from threadpool import ThreadPool


def getIpHostName(arg):
    try:
        botkey = ['baidu', 'google', 'yahoo', 'msn']
        handle = socket.gethostbyaddr(arg)[0]
        if any(x in handle for x in botkey):
            print "%s-%s\n" % (arg, handle.split('.')[-2])
    except socket.herror:
        pass

if __name__ == "__main__":
    starttime = time.time()
    ipfiles = open('ip_list.txt', 'r')

    # 开始线程操作
    tp = ThreadPool(10)
    for cip in ipfiles.xreadlines():
        tp.push(getIpHostName, cip.strip())
    tp.wait()
    ipfiles.close()
    print tp.busy()
    print 'done ,used :%s' % str(time.time() - starttime)
开发者ID:amxku,项目名称:checkIpSpider,代码行数:32,代码来源:checkIpSpider.py

示例15: do_migration

# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import wait [as 别名]

#.........这里部分代码省略.........
            os.remove(filename)
            return

        print "Uploading photo %s of album %s to Picasa." % (flickr_photo.get("title"), picasa_album.title.text)

        if flickr_photo.get("media") == "photo":
            picasa_photo = gdata.photos.PhotoEntry()
        else:
            picasa_photo = VideoEntry()

        im = Image.open(filename)
        if im.size[0] > 2048 or im.size[1] > 2048:
            im.thumbnail((2048, 2048), Image.ANTIALIAS)
            im.save(filename)

        picasa_photo.title = atom.Title(text=flickr_photo.get("title"))
        picasa_photo.summary = atom.Summary(text=flickr_photo.get("description"), summary_type="text")
        photo_info = FLICKR.photos_getInfo(photo_id=flickr_photo.get("id")).find("photo")
        picasa_photo.media.keywords = gdata.media.Keywords()
        picasa_photo.media.keywords.text = ", ".join([t.get("raw") for t in photo_info.find("tags").getchildren()])
        picasa_photo.summary.text = photo_info.find("description").text

        if flickr_photo.get("media") == "photo":
            gd_client.InsertPhoto(
                picasa_album, picasa_photo, filename, content_type=headers.get("content-type", "image/jpeg")
            )
        else:
            gd_client.InsertVideo(
                picasa_album, picasa_photo, filename, content_type=headers.get("content-type", "video/avi")
            )

        print "Upload Finished of %s for album %s." % (flickr_photo.get("title"), picasa_album.title.text)

        os.close(fd)
        os.remove(filename)

    threadpool = ThreadPool(threadpoolsize)

    for aset_id in range(len(sets)):  # go through each flickr set
        aset = sets[aset_id]
        set_title = aset.find("title").text
        print 'Moving "%s" set over to a picasa album. %i/%i' % (set_title, aset_id + 1, len(sets))

        print 'Gathering set "%s" information.' % set_title

        num_photos = int(aset.get("photos")) + int(aset.get("videos"))
        all_photos = []

        page = 1
        while len(all_photos) < num_photos:
            all_photos.extend(
                FLICKR.photosets_getPhotos(
                    photoset_id=aset.get("id"),
                    per_page=500,
                    extras="url_o,media,original_format",
                    page=page,
                    media="all",
                )
                .find("photoset")
                .getchildren()
            )
            page += 1

        print "Found %i photos and videos in the %s flickr set." % (num_photos, set_title)

        picasa_albums = get_picasa_albums(set_title, aset, len(all_photos))
        picasa_photos = get_picasa_photos(picasa_albums)

        for photo_id in range(len(all_photos)):

            photo = all_photos[photo_id]
            photo_found = False

            for p_photo in picasa_photos:
                if p_photo.title.text == photo.get("title"):
                    print 'Already have photo "%s", skipping' % photo.get("title")
                    photo_found = True
                    break

            if photo_found:
                continue
            else:
                print "Queuing photo %i/%i, %s of album %s for moving." % (
                    photo_id + 1,
                    len(all_photos),
                    photo.get("title"),
                    set_title,
                )

            p_album = None
            for album in picasa_albums:
                if int(album.numphotosremaining.text) > 0:
                    album.numphotosremaining.text = str(int(album.numphotosremaining.text) - 1)
                    p_album = album
                    break

            req = WorkRequest(move_photo, [photo, p_album], {})
            threadpool.putRequest(req)

    threadpool.wait()
开发者ID:duffyd,项目名称:kokorice.importer,代码行数:104,代码来源:migrateflickrtopicasanokeyresize.py


注:本文中的threadpool.ThreadPool.wait方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。