本文整理汇总了Python中tornado.queues.Queue.get方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.get方法的具体用法?Python Queue.get怎么用?Python Queue.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tornado.queues.Queue
的用法示例。
在下文中一共展示了Queue.get方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Stream
# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import get [as 别名]
class Stream(object):
def __init__(self, raw_headers, header_table):
self._queue = Queue()
self._header_table = header_table
self._current_headers = self._header_table.merge(raw_headers)
@gen.coroutine
def get(self, timeout=0):
if timeout == 0:
res, headers = yield self._queue.get()
else:
deadline = datetime.timedelta(seconds=timeout)
res, headers = yield self._queue.get(deadline)
self._current_headers = headers
if isinstance(res, Exception):
raise res
else:
raise gen.Return(res)
def push(self, item, raw_headers):
headers = self._header_table.merge(raw_headers)
self._queue.put_nowait((item, headers))
def done(self, raw_headers):
headers = self._header_table.merge(raw_headers)
return self._queue.put_nowait((ChokeEvent(), headers))
def error(self, errnumber, reason, raw_headers):
headers = self._header_table.merge(raw_headers)
return self._queue.put_nowait((RequestError(errnumber, reason), headers))
@property
def headers(self):
return self._current_headers
示例2: Publisher
# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import get [as 别名]
class Publisher(MQAsyncSub):
"""Handles new data to be passed on to subscribers."""
def __init__(self):
self.WSmessages = Queue()
self.MQmessages = Queue()
self.sub = MQAsyncSub.__init__(self, zmq.Context(), 'admin', [])
self.subscribers = set()
def register(self, subscriber):
"""Register a new subscriber."""
self.subscribers.add(subscriber)
def deregister(self, subscriber):
"""Stop publishing to a subscriber."""
self.subscribers.remove(subscriber)
@gen.coroutine
def on_message(self, did, msg):
"""Receive message from MQ sub and send to WS."""
yield self.WSmessages.put({"msgid": did, "content": msg})
@gen.coroutine
def submit(self, message):
"""Submit a new message to publish to subscribers."""
yield self.WSmessages.put(message)
@gen.coroutine
def publishToWS(self):
while True:
message = yield self.WSmessages.get()
if len(self.subscribers) > 0:
print("Pushing MQ message {} to {} WS subscribers...".format(
message, len(self.subscribers)))
yield [subscriber.submit(message) for subscriber in self.subscribers]
@gen.coroutine
def publishToMQ(self):
ctx = zmq.Context()
cli = MQSyncReq(ctx)
pub = MQPub(ctx, 'admin')
while True:
message = yield self.MQmessages.get()
jsons = json.loads(message)
# req/rep
if 'mq_request' in jsons and 'data' in jsons:
msg = MQMessage()
msg.set_action(str(jsons['mq_request']))
msg.set_data(jsons['data'])
print("REQ : {0}".format(msg.get()))
if 'dst' in jsons:
print cli.request(str(jsons['dst']), msg.get(), timeout=10).get()
else:
print cli.request('manager', msg.get(), timeout=10).get()
# pub
elif 'mq_publish' in jsons and 'data' in jsons:
print("Publish : {0}".format(jsons['data']))
pub.send_event(jsons['mq_publish'],
jsons['data'])
示例3: Room
# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import get [as 别名]
class Room(object):
def __init__(self, server, name):
self.server = server
self.name = name
self.clients = {}
self.lock = threading.RLock()
self.inqueue = Queue(maxsize=QUEUE_SIZE)
@coroutine
def dispatch(self):
logging.debug('Chatroom: %s opened' % self.name)
while True:
msg = yield self.inqueue.get()
logging.debug("Room got message: room[%s], command[%s], content[%s]",
msg.receiver, msg.command, msg.content)
if msg.command == COMMAND_JOIN:
logging.debug("%s joined", msg.sender.name)
self.clients[msg.sender.name] = msg.sender
elif msg.command == COMMAND_QUIT:
del self.clients[msg.sender.name]
yield self.broadcast(msg)
@coroutine
def broadcast(self, msg):
for _, client in self.clients.items():
yield client.inqueue.put(msg)
示例4: request
# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import get [as 别名]
def request(subscriber, request_type, tega_id, path, **kwargs):
'''
tega request/response service -- this method returns a generator
(tornado coroutine) to send a request to a remote tega db.
'''
global seq_no
seq_no += 1
if seq_no > 65535: # seq_no region: 0 - 65535.
seq_no = 0
subscriber.write_message('REQUEST {} {} {} {}\n{}'.format(
seq_no, request_type.name, tega_id, path, json.dumps(kwargs)))
queue = Queue(maxsize=1) # used like a synchronous queue
callback[seq_no] = queue # synchronous queue per request/response
try:
result = yield queue.get(timeout=timedelta(seconds=REQUEST_TIMEOUT))
return result
except gen.TimeoutError:
raise
示例5: _first_completed
# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import get [as 别名]
def _first_completed(futures):
""" Return a single completed future
See Also:
_as_completed
"""
q = Queue()
yield _as_completed(futures, q)
result = yield q.get()
raise gen.Return(result)
示例6: TornadoQuerierBase
# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import get [as 别名]
class TornadoQuerierBase(object):
def __init__(self):
self.tasks = TornadoQueue()
def gen_task(self):
raise NotImplementError()
def run_task(self, task):
raise NotImplementError()
def prepare(self):
self.running = True
def cleanup(self):
self.running = False
@coroutine
def run_worker(self, worker_id, f):
while self.tasks.qsize() > 0:
task = yield self.tasks.get()
LOG.debug('worker[%d]: current task is %s' % (worker_id, task))
try:
yield f(task)
pass
except Exception as e:
LOG.warning(str(e))
finally:
self.tasks.task_done()
task = None
LOG.debug('worker[%d]: all tasks done %s' % (worker_id, self.tasks))
@coroutine
def start(self, num_workers=1):
self.prepare()
# add tasks
tasks = yield self.gen_task()
for task in tasks:
yield self.tasks.put(task)
# start shoot workers
for worker_id in range(num_workers):
LOG.debug('starting worker %d' % worker_id)
self.run_worker(worker_id, self.run_task)
yield self.tasks.join()
self.cleanup()
示例7: as_completed
# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import get [as 别名]
def as_completed(fs):
if len(set(f.executor for f in fs)) == 1:
loop = first(fs).executor.loop
else:
# TODO: Groupby executor, spawn many _as_completed coroutines
raise NotImplementedError(
"as_completed on many event loops not yet supported")
from .compatibility import Queue
queue = Queue()
coroutine = lambda: _as_completed(fs, queue)
loop.add_callback(coroutine)
for i in range(len(fs)):
yield queue.get()
示例8: TopicAppllication
# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import get [as 别名]
class TopicAppllication(tornado.web.Application):
def __init__(self):
handlers = [
url(r'/', MainHandler)
]
self.queue = Queue(maxsize=10)
super(TopicAppllication, self).__init__(handlers=handlers, debug=True)
@gen.coroutine
def consumer(self):
item = yield self.queue.get()
try:
print item
finally:
self.queue.task_done()
示例9: CommandQueue
# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import get [as 别名]
class CommandQueue():
def __init__(self):
self.queue = Queue()
@gen.coroutine
def process_command(self):
while True:
item = yield self.queue.get()
try:
yield gen.sleep(0.1)
command, view = item
view.write_message({command[0]: command[1]})
finally:
self.queue.task_done()
def put(self, item):
self.queue.put(item)
示例10: StreamClient
# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import get [as 别名]
class StreamClient(object):
MAX_SIZE = 60
def __init__(self, steam_id):
self.id = generate_id()
self.stream_id = steam_id
self.queue = Queue(StreamClient.MAX_SIZE)
@coroutine
def send(self, item):
yield self.queue.put(item)
@coroutine
def fetch(self):
item = yield self.queue.get()
self.queue.task_done()
return item
def empty(self):
return self.queue.qsize() == 0
示例11: Subscription
# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import get [as 别名]
class Subscription(WebSocketHandler):
"""Websocket for subscribers."""
def initialize(self, publisher):
self.publisher = publisher
self.messages = Queue()
self.finished = False
def open(self):
print("New subscriber.")
self.publisher.register(self)
self.run()
def on_close(self):
self._close()
def _close(self):
print("Subscriber left.")
self.publisher.deregister(self)
self.finished = True
@gen.coroutine
def submit(self, message):
yield self.messages.put(message)
@gen.coroutine
def run(self):
""" Empty the queue of messages to send to the WS """
while not self.finished:
message = yield self.messages.get()
self.send(message)
def send(self, message):
try:
self.write_message(message)
except WebSocketClosedError:
self._close()
def on_message(self, content):
""" reciev message from websocket and send to MQ """
#print(u"WS to MQ: {0}".format(content))
self.publisher.MQmessages.put(content)
示例12: Subscription
# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import get [as 别名]
class Subscription(WebSocketHandler):
"""Websocket for subscribers."""
def initialize(self, publisher):
self.publisher = publisher
self.messages = Queue()
self.finished = False
def open(self):
print("New subscriber.")
self.publisher.register(self)
self.run()
def on_close(self):
self._close()
def _close(self):
print("Subscriber left.")
self.publisher.deregister(self)
self.finished = True
@gen.coroutine
def submit(self, message):
yield self.messages.put(message)
@gen.coroutine
def run(self):
while not self.finished:
message = yield self.messages.get()
#print("New MQ message: " + str(message))
self.send(message)
def send(self, message):
try:
self.write_message(message)
except WebSocketClosedError:
self._close()
def on_message(self, content):
self.publisher.MQmessages.put(content)
示例13: QueueStore
# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import get [as 别名]
class QueueStore(BaseStore):
"""Publish data via queues.
This class is meant to be used in cases where subscribers should
not miss any data. Compared to the :class:`DataStore` class, new
messages to be broadcast to clients are put in a queue to be
processed in order.
"""
def initialize(self):
self.messages = Queue()
self.publish()
@gen.coroutine
def submit(self, message):
yield self.messages.put(message)
@gen.coroutine
def publish(self):
while True:
message = yield self.messages.get()
if len(self.subscribers) > 0:
yield [subscriber.submit(message) for subscriber in self.subscribers]
示例14: as_completed
# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import get [as 别名]
def as_completed(fs):
""" Return futures in the order in which they complete
This returns an iterator that yields the input future objects in the order
in which they complete. Calling ``next`` on the iterator will block until
the next future completes, irrespective of order.
This function does not return futures in the order in which they are input.
"""
if len(set(f.executor for f in fs)) == 1:
loop = first(fs).executor.loop
else:
# TODO: Groupby executor, spawn many _as_completed coroutines
raise NotImplementedError(
"as_completed on many event loops not yet supported")
from .compatibility import Queue
queue = Queue()
coroutine = lambda: _as_completed(fs, queue)
loop.add_callback(coroutine)
for i in range(len(fs)):
yield queue.get()
示例15: ProjectGroomer
# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import get [as 别名]
class ProjectGroomer(object):
""" Cleans up expired transactions for a project. """
def __init__(self, project_id, coordinator, zk_client, db_access,
thread_pool):
""" Creates a new ProjectGroomer.
Args:
project_id: A string specifying a project ID.
coordinator: A GroomingCoordinator.
zk_client: A KazooClient.
db_access: A DatastoreProxy.
thread_pool: A ThreadPoolExecutor.
"""
self.project_id = project_id
self._coordinator = coordinator
self._zk_client = zk_client
self._tornado_zk = TornadoKazoo(self._zk_client)
self._db_access = db_access
self._thread_pool = thread_pool
self._project_node = '/appscale/apps/{}'.format(self.project_id)
self._containers = []
self._inactive_containers = set()
self._batch_resolver = BatchResolver(self.project_id, self._db_access)
self._zk_client.ensure_path(self._project_node)
self._zk_client.ChildrenWatch(self._project_node, self._update_containers)
self._txid_manual_offset = 0
self._offset_node = '/'.join([self._project_node, OFFSET_NODE])
self._zk_client.DataWatch(self._offset_node, self._update_offset)
self._stop_event = AsyncEvent()
self._stopped_event = AsyncEvent()
# Keeps track of cleanup results for each round of grooming.
self._txids_cleaned = 0
self._oldest_valid_tx_time = None
self._worker_queue = AsyncQueue(maxsize=MAX_CONCURRENCY)
for _ in range(MAX_CONCURRENCY):
IOLoop.current().spawn_callback(self._worker)
IOLoop.current().spawn_callback(self.start)
@gen.coroutine
def start(self):
""" Starts the grooming process until the stop event is set. """
logger.info('Grooming {}'.format(self.project_id))
while True:
if self._stop_event.is_set():
break
try:
yield self._groom_project()
except Exception:
# Prevent the grooming loop from stopping if an error is encountered.
logger.exception(
'Unexpected error while grooming {}'.format(self.project_id))
yield gen.sleep(MAX_TX_DURATION)
self._stopped_event.set()
@gen.coroutine
def stop(self):
""" Stops the grooming process. """
logger.info('Stopping grooming process for {}'.format(self.project_id))
self._stop_event.set()
yield self._stopped_event.wait()
@gen.coroutine
def _worker(self):
""" Processes items in the worker queue. """
while True:
tx_path, composite_indexes = yield self._worker_queue.get()
try:
tx_time = yield self._resolve_txid(tx_path, composite_indexes)
if tx_time is None:
self._txids_cleaned += 1
if tx_time is not None and tx_time < self._oldest_valid_tx_time:
self._oldest_valid_tx_time = tx_time
finally:
self._worker_queue.task_done()
def _update_offset(self, new_offset, _):
""" Watches for updates to the manual offset node.
Args:
new_offset: A string specifying the new manual offset.
"""
self._txid_manual_offset = int(new_offset or 0)
def _update_containers(self, nodes):
""" Updates the list of active txid containers.
Args:
nodes: A list of strings specifying ZooKeeper nodes.
"""
counters = [int(node[len(CONTAINER_PREFIX):] or 1)
#.........这里部分代码省略.........