当前位置: 首页>>代码示例>>Python>>正文


Python Queue.qsize方法代码示例

本文整理汇总了Python中celery.five.Queue.qsize方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.qsize方法的具体用法?Python Queue.qsize怎么用?Python Queue.qsize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在celery.five.Queue的用法示例。


在下文中一共展示了Queue.qsize方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_poll_result

# 需要导入模块: from celery.five import Queue [as 别名]
# 或者: from celery.five.Queue import qsize [as 别名]
    def test_poll_result(self):

        results = Queue()

        class Message(object):

            def __init__(self, **merge):
                self.payload = dict({'status': states.STARTED,
                                     'result': None}, **merge)

        class MockBinding(object):

            def __init__(self, *args, **kwargs):
                pass

            def __call__(self, *args, **kwargs):
                return self

            def declare(self):
                pass

            def get(self, no_ack=False):
                try:
                    return results.get(block=False)
                except Empty:
                    pass

        class MockBackend(AMQPBackend):
            Queue = MockBinding

        backend = MockBackend()

        # FFWD's to the latest state.
        results.put(Message(status=states.RECEIVED, seq=1))
        results.put(Message(status=states.STARTED, seq=2))
        results.put(Message(status=states.FAILURE, seq=3))
        r1 = backend.get_task_meta(uuid())
        self.assertDictContainsSubset({'status': states.FAILURE,
                                       'seq': 3}, r1,
                                       'FFWDs to the last state')

        # Caches last known state.
        results.put(Message())
        tid = uuid()
        backend.get_task_meta(tid)
        self.assertIn(tid, backend._cache, 'Caches last known state')

        # Returns cache if no new states.
        results.queue.clear()
        assert not results.qsize()
        backend._cache[tid] = 'hello'
        self.assertEqual(backend.get_task_meta(tid), 'hello',
                         'Returns cache if no new states')
开发者ID:KirillShaman,项目名称:celery,代码行数:55,代码来源:test_amqp.py

示例2: TokenBucketQueue

# 需要导入模块: from celery.five import Queue [as 别名]
# 或者: from celery.five.Queue import qsize [as 别名]
class TokenBucketQueue(object):
    """Queue with rate limited get operations.

    This uses the token bucket algorithm to rate limit the queue on get
    operations.

    :param fill_rate: The rate in tokens/second that the bucket will
                      be refilled.
    :keyword capacity: Maximum number of tokens in the bucket.
                       Default is 1.

    """
    RateLimitExceeded = RateLimitExceeded

    def __init__(self, fill_rate, queue=None, capacity=1):
        self._bucket = TokenBucket(fill_rate, capacity)
        self.queue = queue
        if not self.queue:
            self.queue = Queue()

    def put(self, item, block=True):
        """Put an item onto the queue."""
        self.queue.put(item, block=block)

    def put_nowait(self, item):
        """Put an item into the queue without blocking.

        :raises Queue.Full: If a free slot is not immediately available.

        """
        return self.put(item, block=False)

    def get(self, block=True):
        """Remove and return an item from the queue.

        :raises RateLimitExceeded: If a token could not be consumed from the
                                   token bucket (consuming from the queue
                                   too fast).
        :raises Queue.Empty: If an item is not immediately available.

        """
        get = block and self.queue.get or self.queue.get_nowait

        if not block and not self.items:
            raise Empty()

        if not self._bucket.can_consume(1):
            raise RateLimitExceeded()

        return get()

    def get_nowait(self):
        """Remove and return an item from the queue without blocking.

        :raises RateLimitExceeded: If a token could not be consumed from the
                                   token bucket (consuming from the queue
                                   too fast).
        :raises Queue.Empty: If an item is not immediately available.

        """
        return self.get(block=False)

    def qsize(self):
        """Returns the size of the queue."""
        return self.queue.qsize()

    def empty(self):
        """Returns :const:`True` if the queue is empty."""
        return self.queue.empty()

    def clear(self):
        """Delete all data in the queue."""
        return self.items.clear()

    def wait(self, block=False):
        """Wait until a token can be retrieved from the bucket and return
        the next item."""
        get = self.get
        expected_time = self.expected_time
        while 1:
            remaining = expected_time()
            if not remaining:
                return get(block=block)
            sleep(remaining)

    def expected_time(self, tokens=1):
        """Returns the expected time in seconds of when a new token should be
        available."""
        if not self.items:
            return 0
        return self._bucket.expected_time(tokens)

    @property
    def items(self):
        """Underlying data.  Do not modify."""
        return self.queue.queue
开发者ID:EnTeQuAk,项目名称:celery,代码行数:98,代码来源:buckets.py

示例3: Batches

# 需要导入模块: from celery.five import Queue [as 别名]
# 或者: from celery.five.Queue import qsize [as 别名]
class Batches(Task):
    abstract = True

    #: Maximum number of message in buffer.
    flush_every = 10

    #: Timeout in seconds before buffer is flushed anyway.
    flush_interval = 30

    def __init__(self):
        self._buffer = Queue()
        self._count = count(1)
        self._tref = None
        self._pool = None

    def run(self, requests):
        raise NotImplementedError('must implement run(requests)')

    def Strategy(self, task, app, consumer):
        self._pool = consumer.pool
        hostname = consumer.hostname
        eventer = consumer.event_dispatcher
        Req = Request
        connection_errors = consumer.connection_errors
        timer = consumer.timer
        put_buffer = self._buffer.put
        flush_buffer = self._do_flush
        body_can_be_buffer = consumer.pool.body_can_be_buffer

        def task_message_handler(message, body, ack, reject, callbacks, **kw):
            if body is None:
                body, headers, decoded, utc = (
                    message.body, message.headers, False, True,
                )
                if not body_can_be_buffer:
                    body = bytes(body) if isinstance(body, buffer_t) else body
            else:
                body, headers, decoded, utc = proto1_to_proto2(message, body)

            request = Req(
                message,
                on_ack=ack, on_reject=reject, app=app, hostname=hostname,
                eventer=eventer, task=task,
                body=body, headers=headers, decoded=decoded, utc=utc,
                connection_errors=connection_errors,
            )
            put_buffer(request)

            if self._tref is None:     # first request starts flush timer.
                self._tref = timer.call_repeatedly(
                    self.flush_interval, flush_buffer,
                )

            if not next(self._count) % self.flush_every:
                flush_buffer()

        return task_message_handler

    def flush(self, requests):
        return self.apply_buffer(requests, ([SimpleRequest.from_request(r)
                                             for r in requests],))

    def _do_flush(self):
        logger.debug('Batches: Wake-up to flush buffer...')
        requests = None
        if self._buffer.qsize():
            requests = list(consume_queue(self._buffer))
            if requests:
                logger.debug('Batches: Buffer complete: %s', len(requests))
                self.flush(requests)
        if not requests:
            logger.debug('Batches: Canceling timer: Nothing in buffer.')
            if self._tref:
                self._tref.cancel()  # cancel timer.
            self._tref = None

    def apply_buffer(self, requests, args=(), kwargs={}):
        acks_late = [], []
        [acks_late[r.task.acks_late].append(r) for r in requests]
        assert requests and (acks_late[True] or acks_late[False])

        def on_accepted(pid, time_accepted):
            [req.acknowledge() for req in acks_late[False]]

        def on_return(result):
            [req.acknowledge() for req in acks_late[True]]

        return self._pool.apply_async(
            apply_batches_task,
            (self, args, 0, None),
            accept_callback=on_accepted,
            callback=acks_late[True] and on_return or noop,
        )
开发者ID:277800076,项目名称:celery,代码行数:95,代码来源:batches.py

示例4: test_poll_result

# 需要导入模块: from celery.five import Queue [as 别名]
# 或者: from celery.five.Queue import qsize [as 别名]
    def test_poll_result(self):

        results = Queue()

        class Message(object):

            def __init__(self, **merge):
                self.payload = dict({'status': states.STARTED,
                                     'result': None}, **merge)
                self.body = pickle.dumps(self.payload)
                self.content_type = 'application/x-python-serialize'
                self.content_encoding = 'binary'

        class MockBinding(object):

            def __init__(self, *args, **kwargs):
                self.channel = Mock()

            def __call__(self, *args, **kwargs):
                return self

            def declare(self):
                pass

            def get(self, no_ack=False):
                try:
                    return results.get(block=False)
                except Empty:
                    pass

            def is_bound(self):
                return True

        class MockBackend(AMQPBackend):
            Queue = MockBinding

        backend = MockBackend()
        backend._republish = Mock()

        # FFWD's to the latest state.
        results.put(Message(status=states.RECEIVED, seq=1))
        results.put(Message(status=states.STARTED, seq=2))
        results.put(Message(status=states.FAILURE, seq=3))
        r1 = backend.get_task_meta(uuid())
        self.assertDictContainsSubset({'status': states.FAILURE,
                                       'seq': 3}, r1,
                                      'FFWDs to the last state')

        # Caches last known state.
        results.put(Message())
        tid = uuid()
        backend.get_task_meta(tid)
        self.assertIn(tid, backend._cache, 'Caches last known state')

        self.assertTrue(backend._republish.called)

        # Returns cache if no new states.
        results.queue.clear()
        assert not results.qsize()
        backend._cache[tid] = 'hello'
        self.assertEqual(backend.get_task_meta(tid), 'hello',
                         'Returns cache if no new states')
开发者ID:EnTeQuAk,项目名称:celery,代码行数:64,代码来源:test_amqp.py

示例5: Batches

# 需要导入模块: from celery.five import Queue [as 别名]
# 或者: from celery.five.Queue import qsize [as 别名]
class Batches(Task):
    abstract = True

    #: Maximum number of message in buffer.
    flush_every = 10

    #: Timeout in seconds before buffer is flushed anyway.
    flush_interval = 30

    def __init__(self):
        self._buffer = Queue()
        self._count = count(1)
        self._tref = None
        self._pool = None

    def run(self, requests):
        raise NotImplementedError("must implement run(requests)")

    def Strategy(self, task, app, consumer):
        self._pool = consumer.pool
        hostname = consumer.hostname
        eventer = consumer.event_dispatcher
        Req = Request
        connection_errors = consumer.connection_errors
        timer = consumer.timer
        put_buffer = self._buffer.put
        flush_buffer = self._do_flush

        def task_message_handler(message, body, ack):
            request = Req(
                body,
                on_ack=ack,
                app=app,
                hostname=hostname,
                events=eventer,
                task=task,
                connection_errors=connection_errors,
                delivery_info=message.delivery_info,
            )
            put_buffer(request)

            if self._tref is None:  # first request starts flush timer.
                self._tref = timer.apply_interval(self.flush_interval * 1000.0, flush_buffer)

            if not next(self._count) % self.flush_every:
                flush_buffer()

        return task_message_handler

    def flush(self, requests):
        return self.apply_buffer(requests, ([SimpleRequest.from_request(r) for r in requests],))

    def _do_flush(self):
        logger.debug("Batches: Wake-up to flush buffer...")
        requests = None
        if self._buffer.qsize():
            requests = list(consume_queue(self._buffer))
            if requests:
                logger.debug("Batches: Buffer complete: %s", len(requests))
                self.flush(requests)
        if not requests:
            logger.debug("Batches: Cancelling timer: Nothing in buffer.")
            self._tref.cancel()  # cancel timer.
            self._tref = None

    def apply_buffer(self, requests, args=(), kwargs={}):
        acks_late = [], []
        [acks_late[r.task.acks_late].append(r) for r in requests]
        assert requests and (acks_late[True] or acks_late[False])

        def on_accepted(pid, time_accepted):
            [req.acknowledge() for req in acks_late[False]]

        def on_return(result):
            [req.acknowledge() for req in acks_late[True]]

        return self._pool.apply_async(
            apply_batches_task,
            (self, args, 0, None),
            accept_callback=on_accepted,
            callback=acks_late[True] and on_return or None,
        )
开发者ID:KirillShaman,项目名称:celery,代码行数:84,代码来源:batches.py


注:本文中的celery.five.Queue.qsize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。