当前位置: 首页>>代码示例>>Python>>正文


Python Queue.qsize方法代码示例

本文整理汇总了Python中queue.Queue.qsize方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.qsize方法的具体用法?Python Queue.qsize怎么用?Python Queue.qsize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在queue.Queue的用法示例。


在下文中一共展示了Queue.qsize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: ProxyManager

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import qsize [as 别名]
class ProxyManager(object):

    def __init__(self):
        self.is_alive = True 
        self.proxies = Queue()
        self.scraper = Scraper()
        self.bad_proxies = BadProxies()

    def collect(self):
        while self.is_alive:
            if not self.proxies.qsize():

                for proxy in self.scraper.proxies:
                    if not proxy in self.bad_proxies:
                        self.proxies.put(proxy)
                        
            sleep(0.5)

    def bad_proxy(self, proxy):
        if not proxy in self.bad_proxies:
            self.bad_proxies.append(proxy)
            
    def get_proxy(self):
        if self.proxies.qsize():
            return self.proxies.get()
    
    def start(self):
        self.collect()
            
    def stop(self):
        self.is_alive = False 
        self.scraper.is_alive = False 
开发者ID:cato33,项目名称:ozaro333,代码行数:34,代码来源:proxy_manager.py

示例2: _schedule_processes

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import qsize [as 别名]
    def _schedule_processes(self, tasklist, _worker):
        # Reset the global flag that allows
        global _stop_all_processes
        _subprocess_container.stop_all = False
        # Make a shallow copy of the task list,
        # so we don't mess with the callers list.
        tasklist = copy.copy(tasklist)
        number_tasks = len(tasklist)
        if number_tasks == 0:
            totaltime = 0
            return totaltime
        use_threading = number_tasks > 1 and self.num_processes > 1
        starttime = time.process_time()
        task_queue = Queue()
        pbar = _ProgressBar(number_tasks, self.silent)
        pbar.animate(0)
        processed_tasks = []
        n_errors = 0
        threads = []
        try:
            # run while there is still threads, tasks or stuff in the queue
            # to process
            while threads or tasklist or task_queue.qsize():
                # if we aren't using all the processors AND there is still
                # data left to compute, then spawn another thread
                if (len(threads) < self.num_processes) and tasklist:
                    if use_threading:
                        t = Thread(
                            target=_worker, args=tuple([tasklist.pop(0), task_queue])
                        )
                        t.daemon = True
                        t.start()
                        threads.append(t)
                    else:
                        _worker(tasklist.pop(0), task_queue)
                else:
                    # In the case that we have the maximum number
                    # of running threads or we run out tasks.
                    # Check if any of them are done
                    for thread in threads:
                        if not thread.isAlive():
                            threads.remove(thread)
                while task_queue.qsize():
                    task = task_queue.get()
                    if task.has_error():
                        n_errors += 1
                    self.summery.task_summery(task)
                    processed_tasks.append(task)
                    pbar.animate(len(processed_tasks), n_errors)

                time.sleep(0.01)
        except KeyboardInterrupt:
            _display("Processing interrupted")
            _subprocess_container.stop_all = True
            # Add a small delay here. It allows the user to press ctrl-c twice
            # to escape this try-catch. This is usefull when if the code is
            # run in an outer loop which we want to excape as well.
            time.sleep(1)
        totaltime = time.process_time() - starttime
        return totaltime
开发者ID:AnyBody-Research-Group,项目名称:AnyPyTools,代码行数:62,代码来源:abcutils.py

示例3: _cost_limited_dfs

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import qsize [as 别名]
    def _cost_limited_dfs(self):
        """Run the DFS from a node

        Returns:
            (board, next_min)
            Board can either be a board or None
        """
        frontiers_list = Queue()
        frontiers_list.put(self.start_state)
        next_min = inf
        while True:
            if frontiers_list.empty():
                return (None, next_min)

            # Pop a node off the stack
            board = frontiers_list.get()
            cost = self._get_cost(board)

            # If the cost is less than the cutoff, we can continue
            if cost <= self.cut_off:
                if board.num_pegs == 1:
                    return (board, next_min)
                for index, move in enumerate(board.moves):
                    next_board = board.execute_move(index)
                    self.num_visited += 1
                    frontiers_list.put(next_board)
                    if self.max_space < frontiers_list.qsize():
                        self.max_space = frontiers_list.qsize()

            else:
                if cost < next_min:
                    next_min = cost
开发者ID:alexlafroscia,项目名称:class-projects,代码行数:34,代码来源:search.py

示例4: Master

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import qsize [as 别名]
class Master(threading.Thread):
    def __init__(self):
        super().__init__()
        self.setDaemon(True)
        self.que = Queue()
        self.conn = create_engine("postgresql://[email protected]/dht_demo").connect()
        self.ins = hash_tab.insert()

    def log_in_database_demo(self, infohash, name):
        try:
            self.conn.execute(self.ins, infohash=infohash, name=name)
        except Exception as e:
            pass

    def logger(self):
        while True:
            if self.que.empty():
                sleep(1)
                continue
            else:
                r = self.que.get()
                self.log_in_database_demo(r[1], r[2])

    def run(self):
        #while True:
        #    self.fetch()
        dt = threading.Thread(target=self.logger)
        dt.setDaemon(True)
        dt.start()
        while True:
            if threading.activeCount() < 1500:
                if self.que.qsize() == 0:
                    sleep(1)
                    continue
                r = self.que.get()
                t = threading.Thread(target=fetch_metadata, args=(r[0], r[1], r[2]))
                t.setDaemon(True)
                t.start()
            else:
                sleep(1)

    def fetch(self):
        for i in range(100):
            if self.que.qsize() == 0:
                sleep(1)
                continue
            r = self.que.get()
            t = threading.Thread(target=fetch_metadata, args=(r[0], r[1], r[2]))
            t.setDaemon(True)
            t.start()

    def log(self, nid, infohash, name, address):
        #print("%s %s" % (codecs.encode(infohash, "hex_codec").decode(), name.decode("utf-8")))
        #fetch_metadata(nid, infohash, address)
        #print(self.que.qsize())
        if self.que.qsize() > 5000:
            sleep(1)
        self.que.put([nid, codecs.encode(infohash, "hex_codec").decode(), name.decode()])
开发者ID:gaoyb7,项目名称:simDHTcrawler,代码行数:60,代码来源:main.py

示例5: __init__

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import qsize [as 别名]
class Fetcher:
    def __init__(self,threads,subject):
        self.opener = urllib.request.build_opener(urllib.request.HTTPHandler)
        self.lock = Lock()
        self.q_req = Queue()
        self.q_ans = Queue()
        self.threads = threads
        self.subject = subject
        for i in range(threads):
            t = Thread(target=self.threadget,args=subject)
            t.setDaemon(True)
            t.start()
        self.running = 0

    def __del__(self):
        time.sleep(0.5)
        self.q_req.join()
        self.q_ans.join()

    def taskleft(self):
        return self.q_req.qsize()+self.q_ans.qsize()+self.running

    def push(self, req):
        self.q_req.put(req)

    def pop(self, ans):
        return self.q_ans.get()

    def download_imag(self, subject):
        global count
        s = requests.session()
        imag = s.get(subject['cover'])
        name = subject['title']
        path = '/users/peibibing/PycharmProjects/douban/douban_movie/%s.jpg'%name
        with open(path,'wb') as f:
            f.write(imag.content)
        count += 1
        print(count)
        return 'ok'

    def threadget(self,sub):
        while True:
            req = self.q_req.get()
            with self.lock:  #保证操作的原子性
                self.running += 1
            try:
                # ans = download_imag(sub)

                ans = self.opener.open(req).read()
            except Exception:
                ans = 'error'
                print(ans)
            self.q_ans.put((req,ans))
            with self.lock:
                self.running -= 1
            self.q_req.task_done()
            time.sleep(0.1)
开发者ID:peibibing,项目名称:mac_test,代码行数:59,代码来源:douban_movies.py

示例6: PingThem

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import qsize [as 别名]
class PingThem():
    def __init__(self, targets, maxthreads=100):
        self.q1 = Queue(maxsize=0)
        self.q2 = Queue(maxsize=0)
        self.maxthreads = maxthreads if len(targets) >= maxthreads else len(targets)
        

        for target in targets:
            self.q1.put(target)
        logging.info("Done adding all targets")

        print(self.q1.qsize())


    def worker(self):
        while 1:
            i = self.q1.get()
            # logging.info("Got value from queue: {0}".format(i))
            # quit cond
            if i is None:
                break

            p = PingIt()
            r = p.doping(i)

            self.q2.put(r)

            self.q1.task_done()

    def run(self):
        print("Will start {0} threads for checking ...".format(self.maxthreads))
        allts = []
        for i in range(self.maxthreads):
            t = Thread(target=self.worker)
            t.start()
            allts.append(t)

        self.q1.join()

        for i in range(self.maxthreads):
            self.q1.put(None)

        for t in allts:
            t.join()

        # check q2
        logging.info(self.q2.qsize())

        ret = []
        for j in range(self.q2.qsize()):
            i = self.q2.get()
            if i is None:
                break
            ret.append(i)

        return ret
开发者ID:binhvq,项目名称:nms,代码行数:58,代码来源:pingit.py

示例7: downloads

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import qsize [as 别名]
def downloads(urls, outputs=[], concurrency=cpu_count()):
	# 用于线程同步的队列
	exit_queue = Queue(1)
	job_queue = Queue()
	result_queue = Queue()

	# 创建下载任务,并加入到任务队列
	outputs = [None for _ in urls] if not outputs else outputs
	for url, output in zip(urls, outputs):
		job_queue.put(Param(url, output))

	job_size = job_queue.qsize()
	works = []

	# 创建工作线程并启动
	concurrency = job_size if concurrency > job_size else concurrency
	for _ in range(concurrency):
		t = Worker(job_queue, result_queue, exit_queue)
		works.append(t)
		t.start()

	# 检测任务是否完成,主要有两种情况
	# 1.所有任务都执行了
	# 2.用户主动按ctrl+c结束任务,这里会等待已经运行的任务继续运行
	alive = True
	try:
		while alive:
			for work in works:
				if work.isAlive():
					alive = True
					break
			else:
				alive = False
			if result_queue.qsize() == job_size and exit_queue.qsize() == 0:
				exit_queue.put(1)
	except KeyboardInterrupt:
		logger.warning("ctrl + c is precessed!wait running task to complate..")
		exit_queue.put(1)
		for work in works:
			if work.isAlive():
				work.join()

	# 结果收集并返回
	results = []
	while job_queue.qsize() > 0:
		param = job_queue.get_nowait()
		results.append(Result(False, "task not excute", param.url))
	while result_queue.qsize() > 0:
		result = result_queue.get_nowait()
		results.append(result)
	return results
开发者ID:510908220,项目名称:filedownloader,代码行数:53,代码来源:http_file.py

示例8: __init__

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import qsize [as 别名]
class Fetcher:
    def __init__(self, threads_num):
        self.opener = urllib.request.build_opener(urllib.request.HTTPHandler)
        self.lock = Lock()  # 线程锁
        self.q_req = Queue()  # 任务队列
        self.q_ans = Queue()  # 结果队列
        self.threads_num = threads_num
        for i in range(threads_num):
            t = Thread(target=self.deal_task)
            t.setDaemon(True)
            t.start()
        self.running = 0

    def __del__(self):  # 解构时需等待两个队列的任务完成
        time.sleep(0.5)
        self.q_req.join()
        self.q_ans.join()

    def task_left(self):
        return self.q_req.qsize() + self.q_ans.qsize() + self.running

    def push(self, task):
        self.q_req.put(task)

    def pop(self):
        return self.q_ans.get()

    def deal_task(self):
        while True:
            req = self.q_req.get()
            with self.lock:  # 保证该操作的原子性
                self.running += 1
            ans = self.get_data(req)
            self.q_ans.put(ans)
            with self.lock:
                self.running -= 1
            self.q_req.task_done()
            time.sleep(0.1)

    def get_data(self, req, retries=3):  # 失败后的重连机制
        data = ''
        try:
            data = self.opener.open(req, timeout=10).read()  # 设置超时时间为10秒
        except urllib.request.URLError as e:
            if retries > 0:
                return self.get_data(req, retries - 1)
            print('GET Failed.', req)
            print(e.reason)
        return data
开发者ID:Near-River,项目名称:robot_spider,代码行数:51,代码来源:multiply_thread_crawl.py

示例9: loop

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import qsize [as 别名]
def loop(output, input, translator):
    q = Queue()
    input.input_queue = q
    input.start()

    dt = 0
    last_time = datetime.now()
    running = True
    while running:
        now = datetime.now()
        dt = (now - last_time).total_seconds()
        last_time = now

        while q.qsize() > 0:
            action = Action(*q.get())
            fn, args = translator.do(action)
            if fn == "halt":
                running = False
                for pin in translator.pins:
                    output.off(pin)
            else:
                getattr(output, fn)(*args)

        output.update(dt)

    output.close()
开发者ID:NelsonCrosby,项目名称:rpi-controller,代码行数:28,代码来源:loop.py

示例10: TrapUnitContainer

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import qsize [as 别名]
class TrapUnitContainer(FUnitContainer):
    def __init__(self, configuration, machine):
        super().__init__(configuration, machine)
        self.funits = [TrapUnit(machine) for i in range(self.numUnits)]
        self.trapQueue = Queue()

    def issue(self, instr):
        if instr.strOpcode not in self.instructions:
            return False
        if instr.funCode == 0:
            self.machine.haltIssued = True
            log("Halt issued.")
        if not self.hasOpenRStation():
            return False
        rStation = self.getOpenRStation()
        rStation.issue(instr)
        self.trapQueue.put(rStation)
        return True

    def execute(self):
        if not self.trapQueue.qsize(): return
        nextTrap = self.trapQueue.queue[0]
        if nextTrap.readyToExecute():
            log('{0} beginning execution.'.format(nextTrap.name))
            nextTrap.beginExecution()
        elif nextTrap.executing and nextTrap.execTime > 0:
            nextTrap.decreaseExecTime()
            log('{0} continuing execution. Time left: {1}'.format(nextTrap.name, nextTrap.execTime))
        elif nextTrap.execTime == 0 and not nextTrap.resultReady:
            log('{0} completing execution.'.format(nextTrap.name))
            nextTrap.computeResult()
            _ = self.trapQueue.get()
开发者ID:kaledj,项目名称:TomasuloSim,代码行数:34,代码来源:trapunitcontainer.py

示例11: monitor

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import qsize [as 别名]
   def monitor(self, core, mem):
      startTime = time.time()
      avgMH = Queue()
      while ((time.time() - startTime) < self.monitorTime):
         time.sleep(5)
         devInfo = self.api.getGPUInfo(self.device)
         if devInfo['Temperature'] >= self.maxTemp:        
            self.handleBadClocks('Temperature threshold reached.', devInfo)
            return True
         if devInfo['HWE'] > self.HWE:
            self.HWE = devInfo['HWE']
            self.handleBadClocks('Hardware errors found.', devInfo)
            #Make sure we give the GPU time to set the new clocks so we get the final HW error count
            time.sleep(2)
            devInfo = self.api.getGPUInfo(self.device)
            self.HWE = devInfo['HWE']
            return True

         avgMH.put(devInfo['MH'])
         if (avgMH.qsize() >= 3):
            avgMH.get()


      #MH added should be averaged
      totalMH = 0
      numMH = 0
      while (not avgMH.empty()):
         totalMH += avgMH.get()
         numMH += 1
      avg = totalMH/numMH
      newrec = {'device': self.device, 'core': core, 'mem': mem, 'success': True, 'MH': avg, 'temp': devInfo['Temperature']}
      self.results.append(newrec)
      self.logger.addRecord(newrec)
      return False
开发者ID:wfriedl,项目名称:CGMinerTuner,代码行数:36,代码来源:CGMinerTuner.py

示例12: train

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import qsize [as 别名]
    def train(self, texts, chunksize=100, workers = 2):
        """
        Update the model's neural weights from a sequence of sentences (can be a once-only generator stream).
        Each sentence must be a list of utf8 strings.

        """
        if not training_methods_imported:
            raise NotImplementedError(err_msg)
        logger.info("training model with %i workers" % (workers))

        start, next_report = time.time(), [1.0]
        jobs = Queue(maxsize=2 * workers)  # buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
        lock = threading.Lock()  # for shared state (=number of words trained so far, log reports...)

        total_error = [0.0]
        objects_done = [0]

        def worker_train():
            """Train the model, lifting lists of sentences from the jobs queue."""
            observation_work = np.zeros(self.window * self.size + self.object_size, dtype = REAL)
            prediction_work = np.zeros(self.output_size, dtype = REAL)
            composition_work = np.zeros([max(self.output_size, self.window * self.size + self.object_size), self.window * self.size + self.object_size], dtype = REAL) if self.bilinear_form else None

            while True:
                job = jobs.get()
                if job is None:  # data finished, exit
                    break
                # how many words did we train on? out-of-vocabulary (unknown) words do not count
                error = sum(train_sentence_concatenation(self, sentence, object_index, softmax_target, sigmoid_target, self._alpha, prediction_work, observation_work, composition_work) for sentence, object_index, softmax_target, sigmoid_target in job)
                with lock:
                    total_error[0] += error
                    objects_done[0] += len(job)
                    elapsed = time.time() - start
                    if elapsed >= next_report[0]:
                        logger.info("PROGRESS: %s objects, %.0f objects/s" % (objects_done[0], float(objects_done[0]) / elapsed if elapsed else 0.0))
                        next_report[0] = elapsed + 1.0  # don't flood the log, wait at least a second between progress reports

        dynos = [threading.Thread(target=worker_train) for _ in range(0,workers)]
        for thread in dynos:
            thread.daemon = True  # make interrupting the process with ctrl+c easier
            thread.start()

        # convert input strings to Vocab objects (or None for OOV words), and start filling the jobs queue
        no_oov = ((np.array([self.vocab.get_index(word) for word in sentence], dtype = INT), object_index, softmax_target, sigmoid_target) for sentence, object_index, softmax_target, sigmoid_target in texts)
        for job_no, job in enumerate(grouper(no_oov, chunksize)):
            logger.debug("putting job #%i in the queue, qsize=%i" % (job_no, jobs.qsize()))
            jobs.put(job)
        logger.info("reached the end of input; waiting to finish %i outstanding jobs" % jobs.qsize())

        for _ in range(0,workers):
            jobs.put(None)  # give the workers heads up that they can finish -- no more work!

        for thread in dynos:
            thread.join()

        elapsed = time.time() - start
        logger.info("training on %i objects took %.1fs, %.0f words/s" %
            (objects_done[0], elapsed, objects_done[0] / elapsed if elapsed else 0.0))

        return (objects_done[0], total_error[0])
开发者ID:JonathanRaiman,项目名称:PythonObjectLM,代码行数:62,代码来源:__init__.py

示例13: __init__

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import qsize [as 别名]
class EntityQueue:
    def __init__(self, maxsize = 1000):
        self.queue = Queue(maxsize)
        self.enqueuing_flags = {}

    def put(self, item, block = True, timeout = None):
        self.queue.put(item, block, timeout=timeout)

    def get(self, block = True, timeout = None):
        return self.queue.get(block, timeout)

    def qsize(self):
        return self.queue.qsize()

    def empty(self):
        return self.queue.empty() and not self.is_enqueuing()

    def full(self):
        return self.queue.full()

    def add_enqueuing_flag(self, id):
        self.enqueuing_flags[id] = True

    def update_enqueuing_flag(self, id, state):
        self.enqueuing_flags[id] = state

    def is_enqueuing(self):
        is_enqueuing = True

        for flag in self.enqueuing_flags.values():
            is_enqueuing = is_enqueuing and flag

        return is_enqueuing
开发者ID:OParl,项目名称:validator,代码行数:35,代码来源:entity_queue.py

示例14: WFOpenedClosedLists

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import qsize [as 别名]
class WFOpenedClosedLists(SwapPolitic):
    def __init__(self, table):
        self.max_depth = 3
        self.table = table
        self.tries = 0

        self.openedList = Queue()
        self.closedList = []

        self.openedList.put((table, 0))

    def run(self):
        while self.openedList.qsize() != 0:
            (table, depth) = self.openedList.get()
            table.printState()
            if depth >= self.max_depth:
                continue

            self.tries += 1

            if table.isValid():
                self.table = table
                return

            ret = None
            for i in range(0, table.count - 1):
                tt = table.copy()
                tt.swapTwoAdjacent(i)
                self.openedList.put((tt, depth+1))

            self.closedList.append((table, depth))
开发者ID:Infernux,项目名称:Projects,代码行数:33,代码来源:WFOpenedClosedLists.py

示例15: start_testing

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import qsize [as 别名]
def start_testing(unique_info, test_continues):
    """Multithreaded testing of files fetched from database.
    Reports first buttons found. Composes list of unique files with
    specified extentions."""
    worker_threads = Queue(TESTING_THREADS_ALLOWED)
    print("starting ", TESTING_THREADS_ALLOWED, " threads\n")
    test_thread_id = 0
    all_files_tested = False
    while time.time() - db.time_of_update[0] < DB_WATCHDOG_TIME \
           and (not all_files_tested) :
        """ Spawn threads to fetch, test files and update database until
        all files uploaded to DB and tested or no changes happened to DB
        for DB_WATCHDOG_TIME seconds."""
        print(time.time() - db.time_of_update[0])
        if worker_threads.qsize() < TESTING_THREADS_ALLOWED:
            worker_threads.put(test_thread_id)
            worker = threading.Thread(target=tester.tester, \
                                      args=(worker_threads, \
                                      conn_data, \
                                      unique_info,
                                      EXTENTION_TO_FIND,
                                      ALLOWED_APP_RUNTIME
                                      ))
            worker.setDaemon(True)
            worker.start()
            test_thread_id += 1
            time.sleep(0.01)
            if test_continues.qsize() < 2: #  tree composed and uploaded
                all_files_tested = db.check_test_completion(conn_data,
                                                        EXTENTION_TO_FIND)
    print ("Testing thread waiting for all worker-threads to complete\n")
    worker_threads.join()
    print ("Testing Thread Checked all unique ",EXTENTION_TO_FIND, " files\n")
    test_continues.get()
    test_continues.task_done()
开发者ID:kkostenkov,项目名称:parse_test,代码行数:37,代码来源:main.py


注:本文中的queue.Queue.qsize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。