当前位置: 首页>>代码示例>>Python>>正文


Python ThreadPool.destroy方法代码示例

本文整理汇总了Python中threadpool.ThreadPool.destroy方法的典型用法代码示例。如果您正苦于以下问题:Python ThreadPool.destroy方法的具体用法?Python ThreadPool.destroy怎么用?Python ThreadPool.destroy使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在threadpool.ThreadPool的用法示例。


在下文中一共展示了ThreadPool.destroy方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: range

# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import destroy [as 别名]
        for i in range(1, 10):
            try:
                imageUrls = get_image_url(restUrl)
                break
            except Exception, e:
                print 'get restUrl error times' + str(i) + ': %s' % (e,)
                logging.error('get restUrl error times' + str(i) + ': %s' % (e,))
                time.sleep(10)

        if imageUrls is None or len(imageUrls) == 0:
            print 'get imageUrls error %s' % restUrl
            logging.error('get imageUrls error %s' % restUrl)
            continue

        # logging.debug("progress: %d of %d, %s , %d images", progress, total, reviewName, len(urls))  # 进度

        count = 0
        for imageUrl in imageUrls:
            # download_image(imageUrl, LOCAL_DIR + reviewName,
            # imageUrl[imageUrl.rfind("/") + 1:] + ".jpg")
            pool.add_task(download_image, imageUrl, LOCAL_DIR + reviewName,
                          imageUrl[imageUrl.rfind("/") + 1:], SLEEP_SECONDS)  # 多线程下载图片
            count += 1
            # logging.debug("task added: %d", count)
            # logging.debug("finished : %s", reviewName)
            # print "finished : %s" % ( reviewName)
            #logging.info("finished : %s" % ( reviewName))
    pool.destroy()

开发者ID:sqjs,项目名称:my_scripts,代码行数:30,代码来源:crawler_style_resort-2015.py

示例2: Tasks

# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import destroy [as 别名]
class Tasks(Codes):

    def __init__(self):
        self.operate = Operate()
        self._api = OpenApi()
        self._http = HttpClient.getInstance()
        self._pool = ThreadPool(5)    # 初始化5个线程
        print("Task Class 初始化完毕")

    def getAllAdmin(self):
        print("所有管理员: %s", variable.Admins)
        return variable.Admins

    def getAllGroup(self):
        print("所有关注群: %s", variable.Groups)
        return variable.Groups

    def addAdmin(self, qq):
        return self.operate.addAdmin(qq)

    def delAdmin(self, qq):
        return self.operate.delAdmin(qq)

    def isAdmin(self, qq):
        return self.operate.isAdmin(qq)

    def addGroup(self, qq):
        return self.operate.addGroup(qq)

    def delGroup(self, qq):
        return self.operate.delGroup(qq)

    def inGroup(self, qq):
        # print("inGroup: %s", qq)
        return self.operate.inGroup(qq)

    def addAsk(self, question, answer):
        return self.operate.addAsk(question, answer)

    def delAsk(self, Id):
        return self.operate.delAsk(Id)

    def getAsk(self, content):
        return self.operate.getAsk(content)

    def end(self):
        self._pool.destroy()

    def uin_to_qq(self, uin):
        if uin in variable.UsersQQ:
            return variable.UsersQQ.get(uin)
        print("获取qq %s %s %s", uin, variable.Vfwebqq, variable.Referer)
        html = self._http.get(variable.Get_friend_uin2.format(uin, self.bytesToStr(variable.Vfwebqq)), referer = variable.Referer)
        print("uin_to_qq: %s", html)
        try:
            result = json.loads(self.bytesToStr(html))
            if result.get("retcode") != 0:
                return ""
            qq = result.get("result").get("account")
            if qq:
                variable.UsersQQ[uin] = str(qq)
                return str(qq)
        except Exception as e:
            print(e)
            return ""

    def sendMsg(self, *args, **kwargs):
        print("回复消息")
        url = kwargs.get("url")
        data = kwargs.get("data")
        # print(data)
        referer = kwargs.get("referer")
        result = self._http.post(url = url, data = data, referer = referer)
        print("回复结果: %s", result)

    def otherMsg(self, content, to, url, uin):
        if content:
            html = self._http.get(url = variable.RobotUrl.format(quote(content), uin))
            html = html.replace("\\n", "").replace("\n", "")
            html = self._api.parse(html)
            html = self._api.getResult()
            if html:
                print("智能回复: ", html)
                data = {'r' : variable.Msg_Data.format(to, uin, html, variable.Clientid, variable.Msgid, variable.Psessionid)}
                print(data)
                self._pool.add_task(callback = self.sendMsg, url = url, data = data, referer = variable.Referer)

    def analyze(self, qq, uin, content, iseq = None):
        print("开始解析消息")
        if iseq:
            print("消息来自群")
            to = "group_uin"
            url = variable.Send_qun_msg2
        else:
            print("消息来自好友")
            to = "to"
            url = variable.Send_buddy_msg2
        # 是管理员
        if self.isAdmin(qq) and content in ("开启机器人", "关闭机器人", "退出"):
            # 解析管理员命令
#.........这里部分代码省略.........
开发者ID:892768447,项目名称:QQRobot,代码行数:103,代码来源:tasks.py

示例3: main_fresh

# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import destroy [as 别名]
def main_fresh(dbOrNot):
    """
    Monitor URLs using fresh data.
    """
    # set value for oldUrlObjDic dict.
    f = open("./urgentCriterion_new")
    while 1:
        string = f.readline().strip()
        if not string:
            break
        arr = string.split(",")
        #URL Object Format: URL(length, md5)
        oldUrlObjDic[arr[0]] = URL(int(arr[1]), arr[2])
    f.close()

    f = open("./urgentAccessErrorURLs")
    while 1:
        string= f.readline().strip()
        if not string:
            break
        aeURLs.append(string)
    f.close()

    #lxw_tp
    #threadingNum = threading.Semaphore(THREADS_NUM)
    tp = ThreadPool(THREADS_NUM)

    threads = []
    urlCount = 0
    # monitor each url in .urls file
    f = open("./.urgentURLS")
    while 1:
        url = f.readline().strip()
        if not url:
            break

        #lxw_tp
        #Multiple Thread: Deal with "one url by one single thread".
        #mt = MyThread(monitor, (url,), threadingNum)
        tp.add_task(monitor, url)
        #mt.start()
        #threads.append(mt)

        urlCount += 1
    f.close()

    #lxw_tp
    tp.destroy()
    #for thread in threads:
    #    thread.start()

    """
    while 1:
        over = True
        for thread in threads:
            if thread.isAlive():
                if not thread.isTimedOut():     # not "Timed Out".
                    over = False
                else:
                    urgentMyUtils.writeLog("lxw_Timed Out", thread.getURL(), "")
        if over:
            break
    """

    if aeCount > 0:
        allContent = "本次共监测网站{0}个, 其中有{1}个网站访问异常, 详细信息如下:\n\n{2}".format(urlCount, aeCount, aeContent)
        urgentMyUtils.sendEmail(aeSubject, allContent)
    if uwCount >0:
        allContent = "本次共监测网站{0}个, 其中有{1}个网站监测到有更新, 详细信息如下:\n\n{2}".format(urlCount, uwCount, uwContent)
        urgentMyUtils.sendEmail(uwSubject, allContent)

    #Update Criterion file.
    f = open("./urgentCriterion_new", "w")
    for url in newUrlObjDic.keys():
        f.write("{0},{1},{2}\n".format(url, newUrlObjDic[url].length, newUrlObjDic[url].getMD5Str()))
    f.close()

    dbOrNot = False
    if dbOrNot:
        #update criterion in database
        urgentMyUtils.updateCriterion(newUrlObjDic)

    #Update accessErrorURLs file.
    f = open("./urgentAccessErrorURLs", "w")
    for url in aeURLs:
        f.write(url + "\n")
    f.close()
开发者ID:lxw0109,项目名称:monitorURLs,代码行数:89,代码来源:monitorUrgentURLs.py

示例4: start

# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import destroy [as 别名]
def start(baseUrl,seedUrl):
    # clean reffer in reffer.txt
    f = open("reffer.txt","w")
    f.close()

    #seed = Request(base='http://192.168.42.131/dvwa/index.php',url='http://192.168.42.131/dvwa/index.php',method='get')
    seed = request.Request(base=baseUrl,url=seedUrl,timeout=config.conf['connTimeout'],query={},method='get')
    #seed = request.Request(base='http://192.168.42.132/dvwa/',url='http://192.168.42.132/dvwa/',query={},method='get')
    colors.blue( '种子URL: %s\n'%seed._url)
    logfileName = create_logfile(seed._url)
    cookie = getCookie(seed._url)
    
    # begin crawler
    tup = urlparse.urlparse(seed._url)
    netloc = tup.netloc # seed url 
    count = 0
    q = Queue.Queue()
    bf = bloomFilter.BloomFilter(0.001,100000)
    # readreffer from reffer.txt
    '''
    reffer = readReffer()
    reqSet = []
    reqSet.append(seed)
    reqSet.extend(reffer)
    for i in reqSet:
        q.put(i)
        bf.insert(i._url)
    '''
    q.put(seed)
    bf.insert(seed._url)

    nums = config.conf['MaxThread']
    pool = ThreadPool(nums)
    begin = time.time()
    while(not q.empty()):
        req = q.get()
        req._cookies = cookie
        reqs = crawler.crawl(req,tree)

        if req._query != {} and is_tree_full(req._url,tree):
        #if req._query != {}:
            count += 1 
            print 'URL: ',req._BFUrl,'  ', req._source
            pool.add_task(startCheck,req,logfileName)
        

        for x in reqs:
            if not bf.exist(x._BFUrl):
                bf.insert(x._BFUrl)
                q.put(x)


    pool.destroy()
    end = time.time()
    
    f = open(logfileName,'r')
    colors.blue('\n扫描结果:\n\n')
    x  = f.read()
    colors.green(x)
    colors.blue('\n扫描结果已保存在 "%s"\n\n'%(os.getcwd()+'/'+logfileName)+' 中')
    cost = end - begin 
    print "耗时:%f秒"%cost
    print "进行测试的URL数量:",count
    f.close()
    f = open(logfileName,'a')
    f.write(advice())
    f.close()
    os.system('ps -ef | grep -v grep | grep proxy.py | awk \'{print $2}\'|xargs kill -9')
    '''
开发者ID:Tom-Kail,项目名称:SqliScan,代码行数:71,代码来源:master.py


注:本文中的threadpool.ThreadPool.destroy方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。