当前位置: 首页>>代码示例>>Python>>正文


Python Queue.get_nowait方法代码示例

本文整理汇总了Python中tornado.queues.Queue.get_nowait方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.get_nowait方法的具体用法?Python Queue.get_nowait怎么用?Python Queue.get_nowait使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tornado.queues.Queue的用法示例。


在下文中一共展示了Queue.get_nowait方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: BatchedStream

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import get_nowait [as 别名]
class BatchedStream(object):
    """ Mostly obsolete, see BatchedSend """

    def __init__(self, stream, interval):
        self.stream = stream
        self.interval = interval / 1000.0
        self.last_transmission = default_timer()
        self.send_q = Queue()
        self.recv_q = Queue()
        self._background_send_coroutine = self._background_send()
        self._background_recv_coroutine = self._background_recv()
        self._broken = None

        self.pc = PeriodicCallback(lambda: None, 100)
        self.pc.start()

    @gen.coroutine
    def _background_send(self):
        with log_errors():
            while True:
                msg = yield self.send_q.get()
                if msg == "close":
                    break
                msgs = [msg]
                now = default_timer()
                wait_time = self.last_transmission + self.interval - now
                if wait_time > 0:
                    yield gen.sleep(wait_time)
                while not self.send_q.empty():
                    msgs.append(self.send_q.get_nowait())

                try:
                    yield write(self.stream, msgs)
                except StreamClosedError:
                    self.recv_q.put_nowait("close")
                    self._broken = True
                    break

                if len(msgs) > 1:
                    logger.debug("Batched messages: %d", len(msgs))
                for _ in msgs:
                    self.send_q.task_done()

    @gen.coroutine
    def _background_recv(self):
        with log_errors():
            while True:
                try:
                    msgs = yield read(self.stream)
                except StreamClosedError:
                    self.recv_q.put_nowait("close")
                    self.send_q.put_nowait("close")
                    self._broken = True
                    break
                assert isinstance(msgs, list)
                if len(msgs) > 1:
                    logger.debug("Batched messages: %d", len(msgs))
                for msg in msgs:
                    self.recv_q.put_nowait(msg)

    @gen.coroutine
    def flush(self):
        yield self.send_q.join()

    @gen.coroutine
    def send(self, msg):
        if self._broken:
            raise StreamClosedError("Batch Stream is Closed")
        else:
            self.send_q.put_nowait(msg)

    @gen.coroutine
    def recv(self):
        result = yield self.recv_q.get()
        if result == "close":
            raise StreamClosedError("Batched Stream is Closed")
        else:
            raise gen.Return(result)

    @gen.coroutine
    def close(self):
        yield self.flush()
        raise gen.Return(self.stream.close())

    def closed(self):
        return self.stream.closed()
开发者ID:broxtronix,项目名称:distributed,代码行数:88,代码来源:batched.py

示例2: __init__

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import get_nowait [as 别名]
class Model:
  def __init__(self, config_file):
    self.lock = locks.Lock()
    self.classification_queue = Queue()

    print('loading config %s' % config_file, file=log.v5)
    # Load and setup config
    try:
      self.config = Config.Config()
      self.config.load_file(config_file)
      self.pause_after_first_seq = self.config.float('pause_after_first_seq', 0.2)
      self.batch_size = self.config.int('batch_size', 5000)
      self.max_seqs = self.config.int('max_seqs', -1)
    except Exception:
      print('Error: loading config %s failed' % config_file, file=log.v1)
      raise

    try:
      self.devices = self._init_devices()
    except Exception:
      print('Error: Loading devices for config %s failed' % config_file, file=log.v1)
      raise

    print('Starting engine for config %s' % config_file, file=log.v5)
    self.engine = Engine.Engine(self.devices)
    try:
      self.engine.init_network_from_config(config=self.config)
    except Exception:
      print('Error: Loading network for config %s failed' % config_file, file=log.v1)
      raise

    IOLoop.current().spawn_callback(self.classify_in_background)

    self.last_used = datetime.datetime.now()

  def _init_devices(self):
    """
    Initiates the required devices for a config. Same as the funtion initDevices in
    rnn.py.
    :param config:
    :return: A list with the devices used.
    """
    oldDeviceConfig = ",".join(self.config.list('device', ['default']))
    if "device" in TheanoFlags:
      # This is important because Theano likely already has initialized that device.
      config.set("device", TheanoFlags["device"])
      print("Devices: Use %s via THEANO_FLAGS instead of %s." % (TheanoFlags["device"], oldDeviceConfig), file=log.v4)
    devArgs = get_devices_init_args(self.config)
    assert len(devArgs) > 0
    devices = [Device(**kwargs) for kwargs in devArgs]
    for device in devices:
      while not device.initialized:
        time.sleep(0.25)
    if devices[0].blocking:
      print("Devices: Used in blocking / single proc mode.", file=log.v4)
    else:
      print("Devices: Used in multiprocessing mode.", file=log.v4)
    return devices

  @tornado.gen.coroutine
  def classify_in_background(self):
    while True:
      requests = []
      # fetch first request
      r = yield self.classification_queue.get()
      requests.append(r)
      # grab all other waiting requests
      try:
        while True:
          requests.append(self.classification_queue.get_nowait())
      except QueueEmpty:
        pass

      output_dim = {}
      # Do dataset creation and classification.
      dataset = StaticDataset(data=[r.data for r in requests], output_dim=output_dim)
      dataset.init_seq_order()
      batches = dataset.generate_batches(recurrent_net=self.engine.network.recurrent,
                                         batch_size=self.batch_size, max_seqs=self.max_seqs)

      with (yield self.lock.acquire()):
        ctt = ForwardTaskThread(self.engine.network, self.devices, dataset, batches)
        yield ctt.join()

      try:
        for i in range(dataset.num_seqs):
          requests[i].future.set_result(ctt.result[i])
          self.classification_queue.task_done()
      except Exception as e:
        print('exception', e)
        raise

  @tornado.gen.coroutine
  def classify(self, data):
    self.last_used = datetime.datetime.now()
    request = ClassificationRequest(data)

    yield self.classification_queue.put(request)
    yield request.future

#.........这里部分代码省略.........
开发者ID:rwth-i6,项目名称:returnn,代码行数:103,代码来源:Server.py

示例3: SQSDrain

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import get_nowait [as 别名]
class SQSDrain(object):
    """Implementation of IDrain that writes to an AWS SQS queue.
    """

    def __init__(self, logger, loop, sqs_client,
                 metric_prefix='emitter'):
        self.emitter = sqs_client
        self.logger = logger
        self.loop = loop
        self.metric_prefix = metric_prefix
        self.output_error = Event()
        self.state = RUNNING
        self.sender_tag = 'sender:%s.%s' % (self.__class__.__module__,
                                            self.__class__.__name__)
        self._send_queue = Queue()
        self._should_flush_queue = Event()
        self._flush_handle = None
        self.loop.spawn_callback(self._onSend)

    @gen.coroutine
    def _flush_send_batch(self, batch_size):
        send_batch = [
            self._send_queue.get_nowait()
            for pos in range(min(batch_size, self.emitter.max_messages))
        ]
        try:
            response = yield self.emitter.send_message_batch(*send_batch)
        except SQSError as err:
            self.logger.exception('Error encountered flushing data to SQS: %s',
                                  err)
            self.output_error.set()
            for msg in send_batch:
                self._send_queue.put_nowait(msg)
        else:
            if response.Failed:
                self.output_error.set()
                for req in response.Failed:
                    self.logger.error('Message failed to send: %s', req.Id)
                    self._send_queue.put_nowait(req)

    @gen.coroutine
    def _onSend(self):
        respawn = True
        while respawn:
            qsize = self._send_queue.qsize()
            # This will keep flushing until clear,
            # including items that show up in between flushes
            while qsize > 0:
                try:
                    yield self._flush_send_batch(qsize)
                except Exception as err:
                    self.logger.exception(err)
                    self.output_error.set()
                qsize = self._send_queue.qsize()
            # We've cleared the backlog, remove any possible future flush
            if self._flush_handle:
                self.loop.remove_timeout(self._flush_handle)
                self._flush_handle = None
            self._should_flush_queue.clear()
            yield self._should_flush_queue.wait()

    @gen.coroutine
    def close(self, timeout=None):
        self.state = CLOSING
        yield self._send_queue.join(timeout)

    def emit_nowait(self, msg):
        if self._send_queue.qsize() >= self.emitter.max_messages:
            # Signal flush
            self._should_flush_queue.set()
            raise QueueFull()
        elif self._flush_handle is None:
            # Ensure we flush messages at least by MAX_TIMEOUT
            self._flush_handle = self.loop.add_timeout(
                MAX_TIMEOUT,
                lambda: self._should_flush_queue.set(),
            )
        self.logger.debug("Drain emitting")
        self._send_queue.put_nowait(msg)

    @gen.coroutine
    def emit(self, msg, timeout=None):
        if self._send_queue.qsize() >= self.emitter.max_messages:
            # Signal flush
            self._should_flush_queue.set()
        elif self._flush_handle is None:
            # Ensure we flush messages at least by MAX_TIMEOUT
            self._flush_handle = self.loop.add_timeout(
                MAX_TIMEOUT,
                lambda: self._should_flush_queue.set(),
            )
        yield self._send_queue.put(msg, timeout)
开发者ID:CrowdStrike,项目名称:cs.eyrie,代码行数:94,代码来源:drain.py

示例4: Application

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import get_nowait [as 别名]

#.........这里部分代码省略.........
                                                                             node.node_id,
                                                                             ping.MESSAGE_TYPE))

            # Check that the ACK sequence number matches the PING sequence number
            if ack.seqno == ping.seqno:
                LOGGER.debug('Sequence number matches. Node {} looks good to !'.format(node.node_id,
                                                                                       self.local_node.node_id))
                # Process the gossip messages tacked onto the ACK message's payload
                for message in ack.payload:
                    try:
                        self.gossip_inbox.put_nowait(message)
                    except QueueFull:
                        LOGGER.error('Unable to add {} message from {} to gossip inbox'.format(message.MESSAGE_TYPE,
                                                                                               node.node_id))
                # mark the node as ALIVE in self.nodes
                self.mark_alive(node)

                # Send gossip that this node is alive
                self.queue_gossip_send(
                    Alive(node=node, sender=self.local_node)
                )

                raise Return(True)
            else:
                raise Return(False)
        finally:
            stream.close()

    @coroutine
    def ack(self, stream, seqno):
        payload = []
        for _ in xrange(ACK_PAYLOAD_SIZE):
            try:
                gossip = self.gossip_outbox.get_nowait()
                payload.append(gossip)
            except QueueEmpty:
                break

        ack = Ack(seqno=seqno, payload=payload)
        LOGGER.debug('Trying to send ack: {}'.format(ack))
        try:
            yield stream.write(ack.to_msgpack)
        except StreamClosedError:
            LOGGER.error('Unable to connect from {} to stream (acking PING)'.format(self.local_node.node_id))
        LOGGER.debug('Sent ack to {}'.format(stream))

    @coroutine
    def _change_node_state(self, node, state):
        """
        Because Tornado has explicit context switching, we don't need to worry much about synchronization here
        """
        LOGGER.debug('{} knows about {}: {}'.format(self.local_node.node_id, node.node_id, state))
        self.add_node(node)
        self.nodes[node.node_id].state = state

    @coroutine
    def mark_alive(self, node):
        if node.node_id != self.local_node.node_id:
            LOGGER.debug('Marking {} ALIVE'.format(node.node_id))
            self._change_node_state(node, State.ALIVE)

    @coroutine
    def mark_dead(self, node):
        self._change_node_state(node, State.DEAD)

    @coroutine
开发者ID:jefffm,项目名称:swimpy,代码行数:70,代码来源:app.py


注:本文中的tornado.queues.Queue.get_nowait方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。