当前位置: 首页>>代码示例>>Python>>正文


Python Queue.qsize方法代码示例

本文整理汇总了Python中tornado.queues.Queue.qsize方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.qsize方法的具体用法?Python Queue.qsize怎么用?Python Queue.qsize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tornado.queues.Queue的用法示例。


在下文中一共展示了Queue.qsize方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: KafkaTopicConsumer

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import qsize [as 别名]
class KafkaTopicConsumer(object):
  """
  Tornado compatible class for consuming messages from a Kafka topic. The mode of operation is executing
  the kafka consumer code into its own thread, then communicate with the tornado IO pool code through
  callbacks in the i/o loop and queues. Depends on pykafka.
  """
  def __init__(self, **kwargs):
    self.kafka_hosts = kwargs['kafka_hosts']
    self.topic_name = kwargs['topic_name']
    self.io_loop = ioloop.IOLoop.instance()
    self.message_q = Queue(maxsize=128)
    self.exit = False

    self.kafka_process = Thread(target=self._consumer_loop)
    self.kafka_process.start()

  # Bear in mind that this method is run on a separate thread !!!
  def _consumer_loop(self, **kwargs):
    print "Connecting to %s" % self.kafka_hosts
    kafka_client = KafkaClient(hosts=self.kafka_hosts)
    topic_name = self.topic_name
    topic = kafka_client.topics[topic_name]

    # Generate consumer id if necessary
    if 'consumer_id' in kwargs:
      consumer_id = kwargs['consumer_id']
    else:
      rand_id = hex(random.getrandbits(32)).rstrip("L").lstrip("0x") or "0"
      consumer_id = "ush_consumer_%s" % rand_id

    count = 0
    consumer = topic.get_simple_consumer(consumer_id, consumer_timeout_ms=1000)
    while True:
      # exit if required
      if self.exit:
        del kafka_client
        return
      # be careful with saturating the queue (queue maxsize / 2)
      if self.message_q.qsize() > 64:
        time.sleep(1)
        continue
      try:
        m = consumer.consume()
        if m is not None and m.value is not None:
          value = json.loads(m.value)
          # Pass the value to the main thread through a callback in its io loop, the call is thread-safe
          self.io_loop.add_callback(self._put, value)
          #
          count += 1
          if (count % 100) == 0:
            print "INFO: processed %d messages on topic %s" % (count, self.topic_name)
      except Exception, e:
        # TODO: more better logging
        import sys, traceback
        exc_type, exc_value, exc_traceback = sys.exc_info()
        print "Error occurred while consuming kafka item"
        traceback.print_exception(exc_type, exc_value, exc_traceback, limit=16, file=sys.stdout)
开发者ID:ushahidi,项目名称:project-pheme-data-interface,代码行数:59,代码来源:kafka_topic_consumer.py

示例2: TornadoQuerierBase

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import qsize [as 别名]
class TornadoQuerierBase(object):

    def __init__(self):
        self.tasks = TornadoQueue()

    def gen_task(self):
        raise NotImplementError()

    def run_task(self, task):
        raise NotImplementError()

    def prepare(self):
        self.running = True

    def cleanup(self):
        self.running = False

    @coroutine
    def run_worker(self, worker_id, f):
        while self.tasks.qsize() > 0:
            task = yield self.tasks.get()
            LOG.debug('worker[%d]: current task is %s' % (worker_id, task))
            try:
                yield f(task)
                pass
            except Exception as e:
                LOG.warning(str(e))
            finally:
                self.tasks.task_done()
                task = None
        LOG.debug('worker[%d]: all tasks done %s' % (worker_id, self.tasks))

    @coroutine
    def start(self, num_workers=1):

        self.prepare()

        # add tasks
        tasks = yield self.gen_task()
        for task in tasks:
            yield self.tasks.put(task)

        # start shoot workers
        for worker_id in range(num_workers):
            LOG.debug('starting worker %d' % worker_id)
            self.run_worker(worker_id, self.run_task)

        yield self.tasks.join()
        self.cleanup()
开发者ID:jianingy,项目名称:watchgang,代码行数:51,代码来源:libwatcher.py

示例3: StreamClient

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import qsize [as 别名]
class StreamClient(object):
    MAX_SIZE = 60

    def __init__(self, steam_id):
        self.id = generate_id()
        self.stream_id = steam_id
        self.queue = Queue(StreamClient.MAX_SIZE)

    @coroutine
    def send(self, item):
        yield self.queue.put(item)

    @coroutine
    def fetch(self):
        item = yield self.queue.get()
        self.queue.task_done()
        return item

    def empty(self):
        return self.queue.qsize() == 0
开发者ID:AlexPereverzyev,项目名称:html5stream,代码行数:22,代码来源:stream_client.py

示例4: SQSDrain

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import qsize [as 别名]
class SQSDrain(object):
    """Implementation of IDrain that writes to an AWS SQS queue.
    """

    def __init__(self, logger, loop, sqs_client,
                 metric_prefix='emitter'):
        self.emitter = sqs_client
        self.logger = logger
        self.loop = loop
        self.metric_prefix = metric_prefix
        self.output_error = Event()
        self.state = RUNNING
        self.sender_tag = 'sender:%s.%s' % (self.__class__.__module__,
                                            self.__class__.__name__)
        self._send_queue = Queue()
        self._should_flush_queue = Event()
        self._flush_handle = None
        self.loop.spawn_callback(self._onSend)

    @gen.coroutine
    def _flush_send_batch(self, batch_size):
        send_batch = [
            self._send_queue.get_nowait()
            for pos in range(min(batch_size, self.emitter.max_messages))
        ]
        try:
            response = yield self.emitter.send_message_batch(*send_batch)
        except SQSError as err:
            self.logger.exception('Error encountered flushing data to SQS: %s',
                                  err)
            self.output_error.set()
            for msg in send_batch:
                self._send_queue.put_nowait(msg)
        else:
            if response.Failed:
                self.output_error.set()
                for req in response.Failed:
                    self.logger.error('Message failed to send: %s', req.Id)
                    self._send_queue.put_nowait(req)

    @gen.coroutine
    def _onSend(self):
        respawn = True
        while respawn:
            qsize = self._send_queue.qsize()
            # This will keep flushing until clear,
            # including items that show up in between flushes
            while qsize > 0:
                try:
                    yield self._flush_send_batch(qsize)
                except Exception as err:
                    self.logger.exception(err)
                    self.output_error.set()
                qsize = self._send_queue.qsize()
            # We've cleared the backlog, remove any possible future flush
            if self._flush_handle:
                self.loop.remove_timeout(self._flush_handle)
                self._flush_handle = None
            self._should_flush_queue.clear()
            yield self._should_flush_queue.wait()

    @gen.coroutine
    def close(self, timeout=None):
        self.state = CLOSING
        yield self._send_queue.join(timeout)

    def emit_nowait(self, msg):
        if self._send_queue.qsize() >= self.emitter.max_messages:
            # Signal flush
            self._should_flush_queue.set()
            raise QueueFull()
        elif self._flush_handle is None:
            # Ensure we flush messages at least by MAX_TIMEOUT
            self._flush_handle = self.loop.add_timeout(
                MAX_TIMEOUT,
                lambda: self._should_flush_queue.set(),
            )
        self.logger.debug("Drain emitting")
        self._send_queue.put_nowait(msg)

    @gen.coroutine
    def emit(self, msg, timeout=None):
        if self._send_queue.qsize() >= self.emitter.max_messages:
            # Signal flush
            self._should_flush_queue.set()
        elif self._flush_handle is None:
            # Ensure we flush messages at least by MAX_TIMEOUT
            self._flush_handle = self.loop.add_timeout(
                MAX_TIMEOUT,
                lambda: self._should_flush_queue.set(),
            )
        yield self._send_queue.put(msg, timeout)
开发者ID:CrowdStrike,项目名称:cs.eyrie,代码行数:94,代码来源:drain.py

示例5: TornadoTransmission

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import qsize [as 别名]
    class TornadoTransmission():
        def __init__(self, max_concurrent_batches=10, block_on_send=False,
                    block_on_response=False, max_batch_size=100, send_frequency=0.25,
                    user_agent_addition=''):
            if not has_tornado:
                raise ImportError('TornadoTransmission requires tornado, but it was not found.')

            self.block_on_send = block_on_send
            self.block_on_response = block_on_response
            self.max_batch_size = max_batch_size
            self.send_frequency = send_frequency

            user_agent = "libhoney-py/" + VERSION
            if user_agent_addition:
                user_agent += " " + user_agent_addition

            self.http_client = AsyncHTTPClient(
                force_instance=True,
                defaults=dict(user_agent=user_agent))

            # libhoney adds events to the pending queue for us to send
            self.pending = Queue(maxsize=1000)
            # we hand back responses from the API on the responses queue
            self.responses = Queue(maxsize=2000)

            self.batch_data = {}
            self.sd = statsd.StatsClient(prefix="libhoney")
            self.batch_sem = Semaphore(max_concurrent_batches)

        def start(self):
            ioloop.IOLoop.current().spawn_callback(self._sender)

        def send(self, ev):
            '''send accepts an event and queues it to be sent'''
            self.sd.gauge("queue_length", self.pending.qsize())
            try:
                if self.block_on_send:
                    self.pending.put(ev)
                else:
                    self.pending.put_nowait(ev)
                self.sd.incr("messages_queued")
            except QueueFull:
                response = {
                    "status_code": 0,
                    "duration": 0,
                    "metadata": ev.metadata,
                    "body": "",
                    "error": "event dropped; queue overflow",
                }
                if self.block_on_response:
                    self.responses.put(response)
                else:
                    try:
                        self.responses.put_nowait(response)
                    except QueueFull:
                        # if the response queue is full when trying to add an event
                        # queue is full response, just skip it.
                        pass
                self.sd.incr("queue_overflow")

        # We're using the older decorator/yield model for compatibility with
        # Python versions before 3.5.
        # See: http://www.tornadoweb.org/en/stable/guide/coroutines.html#python-3-5-async-and-await
        @gen.coroutine
        def _sender(self):
            '''_sender is the control loop that pulls events off the `self.pending`
            queue and submits batches for actual sending. '''
            events = []
            last_flush = time.time()
            while True:
                try:
                    ev = yield self.pending.get(timeout=self.send_frequency)
                    if ev is None:
                        # signals shutdown
                        yield self._flush(events)
                        return
                    events.append(ev)
                    if (len(events) > self.max_batch_size or
                        time.time() - last_flush > self.send_frequency):
                        yield self._flush(events)
                        events = []
                except TimeoutError:
                    yield self._flush(events)
                    events = []
                    last_flush = time.time()

        @gen.coroutine
        def _flush(self, events):
            if not events:
                return
            for dest, group in group_events_by_destination(events).items():
                yield self._send_batch(dest, group)

        @gen.coroutine
        def _send_batch(self, destination, events):
            ''' Makes a single batch API request with the given list of events. The
            `destination` argument contains the write key, API host and dataset
            name used to build the request.'''
            start = time.time()
            status_code = 0
#.........这里部分代码省略.........
开发者ID:honeycombio,项目名称:libhoney-py,代码行数:103,代码来源:transmission.py

示例6: get_data

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import qsize [as 别名]
    def get_data(cls, account, source_filter, limit=100, skip=0):
        """
        Gathers commit information from GH
        GET https://api.github.com/repos/:owner/:repo/commits
        Header: Accept: application/vnd.github.v3+json
        """
        if not account or not account.enabled:
            raise ValueError('cannot gather information without a valid account')
        client = AsyncHTTPClient()

        source_filter = GitHubRepositoryDateFilter(source_filter)

        if source_filter.repository is None:
            raise ValueError('required parameter projects missing')

        default_headers = {"Content-Type": "application/json", "Accept": "application/vnd.github.v3+json"}

        # first we grab our list of commits
        uri = "https://api.github.com/repos/{}/commits".format(source_filter.repository)
        qs = source_filter.get_qs()
        if qs != '':
            uri = uri + '?' + qs
        app_log.info("Starting retrieval of commit list for account {}".format(account._id))
        if limit is not None and limit <= 100:
            # we can handle our limit right here
            uri += "?per_page={}".format(limit)
        elif limit is None:
            uri += "?per_page=100"  # maximum number per page for GitHub API
        taken = 0

        queue = Queue()
        sem = BoundedSemaphore(FETCH_CONCURRENCY)
        done, working = set(), set()

        while uri is not None:
            app_log.info(
                "({}) Retrieving next page, received {} commits thus far".format(account._id, taken))
            req = account.get_request(uri, headers=default_headers)
            response = yield client.fetch(req)

            page_data = json.loads(response.body.decode('utf-8'))
            taken += page_data.__len__()
            for item in page_data:
                queue.put(item.get('url', None))

            if limit is None or taken < limit:
                # parse the Link header from GitHub (https://developer.github.com/v3/#pagination)
                links = parse_link_header(response.headers.get('Link', ''))
                uri = links.get('next', None)
            else:
                break

            if queue.qsize() > 500:
                raise HTTPError(413, 'too many commits')
        app_log.info("({}) Commit list retrieved, fetching info for {} commits".format(account._id, taken))

        # open our list
        cls.write('[')

        # our worker to actually fetch the info
        @gen.coroutine
        def fetch_url():
            current_url = yield queue.get()
            try:
                if current_url in working:
                    return
                page_no = working.__len__()
                app_log.info("Fetching page {}".format(page_no))
                working.add(current_url)
                req = account.get_request(current_url)
                client = AsyncHTTPClient()
                response = yield client.fetch(req)
                response_data = json.loads(response.body.decode('utf-8'))
                obj = {
                    'date': response_data['commit']['author']['date'],
                    'author': response_data['commit']['author']['name'],
                    'added_files': [file for file in response_data['files'] if file['status'] == 'added'].__len__(),
                    'deleted_files': [file for file in response_data['files'] if file['status'] == 'deleted'].__len__(),
                    'modified_files': [file for file in response_data['files'] if file['status'] == 'modified'].__len__(),
                    'additions': response_data['stats']['additions'],
                    'deletions': response_data['stats']['deletions']
                }
                if done.__len__() > 0:
                    cls.write(',')
                cls.write(json.dumps(obj))
                done.add(current_url)
                app_log.info("Page {} downloaded".format(page_no))

            finally:
                queue.task_done()
                sem.release()

        @gen.coroutine
        def worker():
            while True:
                yield sem.acquire()
                fetch_url()

        # start our concurrency worker
        worker()
#.........这里部分代码省略.........
开发者ID:vizydrop,项目名称:apps,代码行数:103,代码来源:commits.py


注:本文中的tornado.queues.Queue.qsize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。