当前位置: 首页>>代码示例>>Python>>正文


Python Queue.qsize方法代码示例

本文整理汇总了Python中six.moves.queue.Queue.qsize方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.qsize方法的具体用法?Python Queue.qsize怎么用?Python Queue.qsize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在six.moves.queue.Queue的用法示例。


在下文中一共展示了Queue.qsize方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_acquire_contextmanager

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import qsize [as 别名]
    def test_acquire_contextmanager(self):
        class TestedClass(Monitor):
            def __init__(self, cqueue):
                self.cqueue = cqueue
                Monitor.__init__(self)

            @Monitor.synchronized
            def execute(self):
                self.cqueue.put(1)
                sleep(1)
                self.cqueue.get()

        class TesterThread(Thread):
            def __init__(self, tc):
                self.tc = tc
                Thread.__init__(self)

            def run(self):
                self.tc.execute()

        cq = Queue()
        cq.put(1)
        tc = TestedClass(cq)
        tt = TesterThread(tc)

        with Monitor.acquire(tc):
            tt.start()
            sleep(0.4)
            self.assertEqual(cq.qsize(), 1)
开发者ID:piotrmaslanka,项目名称:satella,代码行数:31,代码来源:test_monitor.py

示例2: test_monitoring

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import qsize [as 别名]
    def test_monitoring(self):
        class TestedClass(Monitor):
            def __init__(self, cqueue):
                self.cqueue = cqueue
                Monitor.__init__(self)

            @Monitor.synchronized
            def execute(self):
                self.cqueue.put(1)
                sleep(1)
                self.cqueue.get()

        class TesterThread(Thread):
            def __init__(self, tc):
                self.tc = tc
                Thread.__init__(self)

            def run(self):
                self.tc.execute()

        q = Queue()
        tc = TestedClass(q)
        a, b = TesterThread(tc), TesterThread(tc)
        a.start(), b.start()

        while a.is_alive() or b.is_alive():
            sleep(0.1)
            self.assertNotEqual(q.qsize(), 2)
开发者ID:piotrmaslanka,项目名称:satella,代码行数:30,代码来源:test_monitor.py

示例3: print

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import qsize [as 别名]
        if choice != '':
            job_queue.put({
                'q': entity_id,
                'action': 'set_description',
                'lang': 'nb',
                'value': choice,
                'summary': '#no_to_nb cleanup drive'
            })

            if labels['no'].get('description') is not None:
                job_queue.put({
                    'q': entity_id,
                    'action': 'remove_description',
                    'lang': 'no',
                    'summary': '#no_to_nb cleanup drive'
                })

    completed += 1
    t1 = time.time() - t0
    if not bg_thread.isAlive():
        print('Thread exited. Starting a new')
        bg_thread = start_thread()

    while job_queue.qsize() > 100:
        print('Job queue length: %d, sleeping a while' % job_queue.qsize())
        time.sleep(10)
    print('Status: Fixed %s items. %s items left to go, time: %.2f sec/item, job queue length: %d' % (completed, len(rows), t1 / completed, job_queue.qsize()))

print('*** Main thread waiting')
job_queue.join()
print('*** Done')
开发者ID:danmichaelo,项目名称:wikidata_labels_nb_no,代码行数:33,代码来源:interactive.py

示例4: CachePipeline

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import qsize [as 别名]
class CachePipeline(object):
    def __init__(self, spider, cache):
        self.spider = spider
        self.cache = cache
        self.idle_event = Event()
        self.queue_size = 100
        self.input_queue = Queue()
        self.result_queue = Queue()

        self.thread = Thread(target=self.thread_worker)
        self.thread.daemon = True
        self.thread.start()

    def has_free_resources(self):
        return (self.input_queue.qsize() < self.queue_size
                and self.result_queue.qsize() < self.queue_size)

    def is_idle(self):
        return self.idle_event.is_set()

    def thread_worker(self):
        while True:
            self.idle_event.clear()
            try:
                action, data = self.input_queue.get(block=False)
            except Empty:
                self.idle_event.set()
                time.sleep(0.1)
                self.idle_event.clear()
            else:
                assert action in ('load', 'save')
                if action == 'load':
                    task, grab = data
                    result = None
                    if self.is_cache_loading_allowed(task, grab):
                        result = self.load_from_cache(task, grab)
                    if result:
                        self.result_queue.put(result)
                    else:
                        self.spider.submit_task_to_transport(task, grab)
                elif action == 'save':
                    task, grab = data
                    if self.is_cache_saving_allowed(task, grab):
                        with self.spider.timer.log_time('cache'):
                            with self.spider.timer.log_time('cache.write'):
                                self.cache.save_response(task.url, grab)

    def is_cache_loading_allowed(self, task, grab):
        # 1) cache data should be refreshed
        # 2) cache is disabled for that task
        # 3) request type is not cacheable
        return (not task.get('refresh_cache', False)
                and not task.get('disable_cache', False)
                and grab.detect_request_method() == 'GET')

    def is_cache_saving_allowed(self, task, grab):
        """
        Check if network transport result could
        be saved to cache layer.

        res: {ok, grab, grab_config_backup, task, emsg}
        """

        if grab.request_method == 'GET':
            if not task.get('disable_cache'):
                if self.spider.is_valid_network_response_code(
                        grab.response.code, task):
                    return True
        return False


    def load_from_cache(self, task, grab):
        with self.spider.timer.log_time('cache'):
            with self.spider.timer.log_time('cache.read'):
                cache_item = self.cache.get_item(
                    grab.config['url'], timeout=task.cache_timeout)
                if cache_item is None:
                    return None
                else:
                    with self.spider.timer.log_time('cache.read.prepare_request'):
                        grab.prepare_request()
                    with self.spider.timer.log_time('cache.read.load_response'):
                        self.cache.load_response(grab, cache_item)

                    grab.log_request('CACHED')
                    self.spider.stat.inc('spider:request-cache')

                    return {'ok': True, 'task': task, 'grab': grab,
                            'grab_config_backup': grab.dump_config(),
                            'emsg': None}
开发者ID:lunyang,项目名称:grab,代码行数:92,代码来源:cache_pipeline.py

示例5: Publisher

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import qsize [as 别名]
class Publisher(PublisherServerBase):

    def __init__(self, use_nanoconfig_service, publisher_endpoint,
                 nanoconfig_service_endpoint, nanoconfig_update_endpoint,
                 nanoconfig_profile, metrics_store, max_queue_size,
                 max_worker, min_worker=5):
        """
        :param use_nanoconfig_service: Indicates whether or not it should use a
            nanoconfig service
        :type use_nanoconfig_service: bool
        :param publisher_endpoint: Publisher server URI
        :type publisher_endpoint: str
        :param nanoconfig_service_endpoint: Nanoconfig service URI
        :type nanoconfig_service_endpoint: str
        :param nanoconfig_update_endpoint: Nanoconfig update service URI
        :type nanoconfig_update_endpoint: str
        :param nanoconfig_profile: Nanoconfig profile URI
        :type nanoconfig_profile: str
        :param max_queue_size: Max size for the message queue
        :type max_queue_size: int
        :param max_worker: Max number of worker to be spawned at a given time
        :type max_worker: int
        :param min_worker: Min number of worker to be spawned at a given time
        :type min_worker: int
        """
        super(Publisher, self).__init__(
            use_nanoconfig_service, publisher_endpoint,
            nanoconfig_service_endpoint, nanoconfig_update_endpoint,
            nanoconfig_profile
        )
        self.max_queue_size = max_queue_size
        self.metrics_store = metrics_store
        self.min_worker = min_worker
        self.max_worker = max_worker

        self.msg_queue = Queue(self.max_queue_size)
        self.workers = []

    @property
    def num_workers(self):
        return len(self.workers)

    def on_receive(self, msg):
        LOG.debug('[Publisher] Queue msg size = %s | workers = %s',
                  self.msg_queue.qsize(), self.num_workers)
        try:
            self.check_workers_alive()
            self.adjust_pool_size()
        except OSError as exc:
            LOG.exception(exc)
            LOG.error("[Publisher] Error upon receiving a message")

        self.msg_queue.put(msg)

    def check_workers_alive(self):
        # Because we can create new workers in this loop, we create a copy
        # --> We could otherwise loop onto a new workers...
        worker_items = self.workers[:]
        for worker_thread in worker_items:
            if not worker_thread.is_alive():
                self.workers.pop(self.workers.index(worker_thread))
                self.start_worker()

    def adjust_pool_size(self):
        needed_size = self.msg_queue.qsize() + self.min_worker
        if abs(needed_size - self.num_workers) > self.min_worker * 2:
            LOG.debug(("[Publisher] Auto adjust pool size needed size is `%s` "
                       "and the current size is `%s`"),
                      needed_size, self.num_workers)
            while self.num_workers > min(self.min_worker, needed_size):
                self.stop_worker()
            # Create enough, but not too many
            while self.num_workers < min(self.max_worker, needed_size):
                self.start_worker()

    def start_worker(self):
        LOG.debug("[Publisher] starting worker")
        worker = Worker(self.msg_queue, self.metrics_store)
        worker.start()
        self.workers.append(worker)

    def stop_worker(self):
        if self.num_workers:
            LOG.debug("[Publisher] stopping worker")
            worker = self.workers.pop(-1)  # Pops the last worker
            worker.stop()

    def stop(self):
        super(Publisher, self).stop()
        join_threads = []
        for key in self.workers:
            t = Thread(target=self.workers.get(key).stop)
            t.start()
            join_threads.append(t)
        for join_thread in join_threads:
            join_thread.join()
开发者ID:b-com,项目名称:watcher-metering,代码行数:98,代码来源:publisher.py

示例6: GraphiteReporter

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import qsize [as 别名]

#.........这里部分代码省略.........
    """Run the thread."""
    while True:
      try:
        try:
          name, value, valueType, stamp = self.queue.get()
        except TypeError:
          break
        self.log(name, value, valueType, stamp)
      finally:
        self.queue.task_done()


  def connect(self):
    """Connects to the Graphite server if not already connected."""
    if self.sock is not None:
      return
    backoff = 0.01
    while True:
      try:
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.settimeout(5)
        sock.connect((self.host, self.port))
        self.sock = sock
        return
      except socket.error:
        time.sleep(random.uniform(0, 2.0*backoff))
        backoff = min(backoff*2.0, 5.0)


  def disconnect(self):
    """Disconnect from the Graphite server if connected."""
    if self.sock is not None:
      try:
        self.sock.close()
      except socket.error:
        pass
      finally:
        self.sock = None


  def _sendMsg(self, msg):
    """Send a line to graphite. Retry with exponential backoff."""
    if not self.sock:
      self.connect()
    if not isinstance(msg, binary_type):
      msg = msg.encode("UTF-8")

    backoff = 0.001
    while True:
      try:
        self.sock.sendall(msg)
        break
      except socket.error:
        log.warning('Graphite connection error', exc_info = True)
        self.disconnect()
        time.sleep(random.uniform(0, 2.0*backoff))
        backoff = min(backoff*2.0, 5.0)
        self.connect()


  def _sanitizeName(self, name):
    """Sanitize a metric name."""
    return name.replace(' ', '-')


  def log(self, name, value, valueType=None, stamp=None):
    """Log a named numeric value. The value type may be 'value',
    'count', or None."""
    if type(value) == float:
      form = "%s%s %2.2f %d\n"
    else:
      form = "%s%s %s %d\n"

    if valueType is not None and len(valueType) > 0 and valueType[0] != '.':
      valueType = '.' + valueType

    if not stamp:
      stamp = time.time()

    self._sendMsg(form % (self._sanitizeName(name), valueType or '', value, stamp))


  def enqueue(self, name, value, valueType=None, stamp=None):
    """Enqueue a call to log."""
    # If queue is too large, refuse to log.
    if self.maxQueueSize and self.queue.qsize() > self.maxQueueSize:
      return
    # Stick arguments into the queue
    self.queue.put((name, value, valueType, stamp))


  def flush(self):
    """Block until all stats have been sent to Graphite."""
    self.queue.join()


  def shutdown(self):
    """Shut down the background thread."""
    self.queue.put(None)
    self.flush()
开发者ID:Cue,项目名称:scales,代码行数:104,代码来源:util.py

示例7: SelectiveRepeater

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import qsize [as 别名]

#.........这里部分代码省略.........
            # Only put the request back if it was successfully removed
            self._request_pool.put(pending_req)

    def _record_pending_req(self, msg, time_now, expiration_time):
        """
        Acquire a pending request object and record it's future
        expiration time in a map.
        """
        self._verify_link_thread()
        # Queue.get will block if no requests are available
        pending_req = self._request_pool.get(True)
        assert self._pending_map[pending_req.index].index == pending_req.index
        self._seqmap[msg.sequence] = pending_req.index
        self._pending_map[pending_req.index].track(msg, time_now, expiration_time)
        self._expire_map[expiration_time][pending_req] = pending_req

    def _config_cb(self, msg, **metadata):
        self._config_msg = msg
        self._init_fileio_config(msg.window_size, msg.batch_size, PROGRESS_CB_REDUCTION_FACTOR * 2)

    def _request_cb(self, msg, **metadata):
        """
        Process request completions.
        """
        index = self._seqmap.get(msg.sequence)
        if index is None:
            return
        pending_req = self._pending_map[index]
        if self._callback:
            self._callback(pending_req.message, msg)
        self._return_pending_req(pending_req)

    def _has_pending(self):
        return self._request_pool.qsize() != len(self._pending_map)

    def _retry_send(self, check_time, pending_req, delete_keys):
        """
        Retry a request by updating it's expire time on the object
        itself and in the expiration map.
        """
        self._total_retries += 1
        self._total_sends += 1
        timeout_delta = Time(SBP_FILEIO_TIMEOUT)
        send_time = Time.now()
        new_expire = send_time + timeout_delta
        pending_req.record_retry(send_time, new_expire)
        self._expire_map[new_expire][pending_req] = pending_req
        self._link(pending_req.message)
        delete_keys.append(pending_req)

    def _try_remove_keys(self, d, *keys):
        success = True
        for key in keys:
            try:
                del d[key]
            except KeyError:
                success = False
        return success

    def _check_pending(self):
        """
        Scans from the last check time to the current time looking
        for requests that are due to expire and retries them if
        necessary.
        """
        time_now = Time.now()
开发者ID:swift-nav,项目名称:piksi_tools,代码行数:70,代码来源:fileio.py

示例8: Delegate

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import qsize [as 别名]
    class Delegate(object):
        """
        Implements the methods of the Provisioner and JobBatcher class needed for the
        ClusterScaler class.
        """

        def __init__(self, preemptable):
            super(MockBatchSystemAndProvisioner.Delegate, self).__init__()
            self.jobQueue = Queue()
            self.totalJobs = 0  # Count of total jobs processed
            self.totalWorkerTime = 0.0  # Total time spent in worker threads
            self.nodesToWorker = {}  # Map from Node to instances of the Worker class
            self.maxWorkers = 0  # Maximum number of workers
            self.preemptable = preemptable

        def addJob(self):
            """
            Add a job to the job queue
            """
            self.totalJobs += 1
            self.jobQueue.put(None)

        # JobBatcher functionality

        def getNumberOfJobsIssued(self):
            return self.jobQueue.qsize()

        # AbstractScalableBatchSystem functionality

        def getNodes(self):
            nodes = dict()
            for i, worker in enumerate(self.nodesToWorker.values()):
                nodes[(i, self.preemptable)] = NodeInfo(coresTotal=0, coresUsed=0, requestedCores=1,
                                                        memoryTotal=0, memoryUsed=0, requestedMemory=1,
                                                        workers=1 if worker.busyEvent.is_set() else 0)
            return nodes

        def _addNodes(self, numNodes):
            class Worker(object):
                def __init__(self, jobQueue, secondsPerJob):
                    self.busyEvent = Event()
                    self.stopEvent = Event()

                    def workerFn():
                        while True:
                            if self.stopEvent.is_set():
                                return
                            try:
                                jobQueue.get(timeout=1.0)
                            except Empty:
                                continue
                            self.busyEvent.set()
                            time.sleep(secondsPerJob)
                            self.busyEvent.clear()

                    self.startTime = time.time()
                    self.worker = Thread(target=workerFn)
                    self.worker.start()

                def stop(self):
                    self.stopEvent.set()
                    self.worker.join()
                    return time.time() - self.startTime

            for i in range(numNodes):
                node = Node('127.0.0.1', '127.0.0.1', 'testNode', time.time())
                self.nodesToWorker[node] = Worker(self.jobQueue, self.outer.secondsPerJob)
            self.maxWorkers = max(self.maxWorkers, len(self.nodesToWorker))

        def _removeNodes(self, nodes):
            logger.info("removing nodes. %s workers and %s to terminate", len(self.nodesToWorker), len(nodes))
            for node in nodes:
                logger.info("removed node")
                try:
                    worker = self.nodesToWorker.pop(node)
                    self.totalWorkerTime += worker.stop()
                except KeyError:
                    # Node isn't our responsibility
                    pass

        def getNumberOfNodes(self):
            return len(self.nodesToWorker)
开发者ID:chapmanb,项目名称:toil,代码行数:84,代码来源:clusterScalerTest.py

示例9: ThreadedTaskDispatcher

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import qsize [as 别名]
class ThreadedTaskDispatcher(object):
    """A Task Dispatcher that creates a thread for each task."""

    stop_count = 0  # Number of threads that will stop soon.

    def __init__(self):
        self.threads = {}  # { thread number -> 1 }
        self.queue = Queue()
        self.thread_mgmt_lock = threading.Lock()

    def handlerThread(self, thread_no):
        threads = self.threads
        try:
            while threads.get(thread_no):
                task = self.queue.get()
                if task is None:
                    # Special value: kill this thread.
                    break
                try:
                    task.service()
                except:
                    log.exception('Exception during task')
        except:
            log.exception('Exception in thread main loop')
        finally:
            mlock = self.thread_mgmt_lock
            with mlock:
                self.stop_count -= 1
                try:
                    del threads[thread_no]
                except KeyError:
                    pass

    def setThreadCount(self, count):
        """See zope.server.interfaces.ITaskDispatcher"""
        mlock = self.thread_mgmt_lock
        with mlock:
            threads = self.threads
            thread_no = 0
            running = len(threads) - self.stop_count
            while running < count:
                # Start threads.
                while thread_no in threads:
                    thread_no = thread_no + 1
                threads[thread_no] = 1
                running += 1
                t = threading.Thread(target=self.handlerThread,
                                     args=(thread_no,),
                                     name='zope.server-%d' % thread_no)
                t.setDaemon(True)
                t.start()
                thread_no = thread_no + 1
            if running > count:
                # Stop threads.
                to_stop = running - count
                self.stop_count += to_stop
                for _n in range(to_stop):
                    self.queue.put(None)
                    running -= 1

    def addTask(self, task):
        """See zope.server.interfaces.ITaskDispatcher"""
        if task is None:
            raise ValueError("No task passed to addTask().")
        # assert ITask.providedBy(task)
        try:
            task.defer()
            self.queue.put(task)
        except:
            task.cancel()
            raise

    def shutdown(self, cancel_pending=True, timeout=5):
        """See zope.server.interfaces.ITaskDispatcher"""
        self.setThreadCount(0)
        # Ensure the threads shut down.
        threads = self.threads
        expiration = time() + timeout
        while threads:
            if time() >= expiration:
                log.error("%d thread(s) still running", len(threads))
                break
            sleep(0.1)
        if cancel_pending:
            # Cancel remaining tasks.
            try:
                queue = self.queue
                while not queue.empty():
                    task = queue.get()
                    if task is not None:
                        task.cancel()
            except Empty:
                pass

    def getPendingTasksEstimate(self):
        """See zope.server.interfaces.ITaskDispatcher"""
        return self.queue.qsize()
开发者ID:zopefoundation,项目名称:zope.server,代码行数:99,代码来源:taskthreads.py


注:本文中的six.moves.queue.Queue.qsize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。