本文整理汇总了Python中tornado.queues.Queue类的典型用法代码示例。如果您正苦于以下问题:Python Queue类的具体用法?Python Queue怎么用?Python Queue使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Queue类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, max_concurrent_batches=10, block_on_send=False,
block_on_response=False, max_batch_size=100, send_frequency=0.25,
user_agent_addition=''):
if not has_tornado:
raise ImportError('TornadoTransmission requires tornado, but it was not found.')
self.block_on_send = block_on_send
self.block_on_response = block_on_response
self.max_batch_size = max_batch_size
self.send_frequency = send_frequency
user_agent = "libhoney-py/" + VERSION
if user_agent_addition:
user_agent += " " + user_agent_addition
self.http_client = AsyncHTTPClient(
force_instance=True,
defaults=dict(user_agent=user_agent))
# libhoney adds events to the pending queue for us to send
self.pending = Queue(maxsize=1000)
# we hand back responses from the API on the responses queue
self.responses = Queue(maxsize=2000)
self.batch_data = {}
self.sd = statsd.StatsClient(prefix="libhoney")
self.batch_sem = Semaphore(max_concurrent_batches)
示例2: __init__
def __init__(self, routes, node, pipe):
"""
Application instantiates and registers handlers for each message type,
and routes messages to the pre-instantiated instances of each message handler
:param routes: list of tuples in the form of (<message type str>, <MessageHandler class>)
:param node: Node instance of the local node
:param pipe: Instance of multiprocessing.Pipe for communicating with the parent process
"""
# We don't really have to worry about synchronization
# so long as we're careful about explicit context switching
self.nodes = {node.node_id: node}
self.local_node = node
self.handlers = {}
self.tcpclient = TCPClient()
self.gossip_inbox = Queue()
self.gossip_outbox = Queue()
self.sequence_number = 0
if routes:
self.add_handlers(routes)
self.pipe = pipe
self.ioloop = IOLoop.current()
self.add_node_event = Event()
示例3: f
def f(c, a, b):
s = Scheduler((c.ip, c.port), loop=loop)
yield s._sync_center()
done = s.start()
sched, report = Queue(), Queue(); s.handle_queues(sched, report)
msg = yield report.get(); assert msg['op'] == 'stream-start'
s.update_graph(dsk={'x': (inc, 1),
'y': (inc, 'x'),
'z': (inc, 'y')},
keys=['z'])
progress = TextProgressBar(['z'], scheduler=s)
progress.start()
assert progress.all_keys == {'x', 'y', 'z'}
assert progress.keys == {'x', 'y', 'z'}
while True:
msg = yield report.get()
if msg['op'] == 'key-in-memory' and msg['key'] == 'z':
break
assert progress.keys == set()
check_bar_completed(capsys)
assert progress not in s.plugins
sched.put_nowait({'op': 'close'})
yield done
示例4: __init__
def __init__(self):
self.ctx = zmq.Context()
self.WSmessages = Queue()
self.MQmessages = Queue()
self.sub = MQAsyncSub.__init__(self, self.ctx, 'admin', [])
self.pub = MQPub(self.ctx, 'admin-ws')
self.subscribers = set()
示例5: f
def f(c, a, b):
s = Scheduler((c.ip, c.port), loop=loop)
yield s._sync_center()
done = s.start()
sched, report = Queue(), Queue(); s.handle_queues(sched, report)
msg = yield report.get(); assert msg['op'] == 'stream-start'
s.update_graph(dsk={'x-1': (inc, 1),
'x-2': (inc, 'x-1'),
'x-3': (inc, 'x-2'),
'y-1': (dec, 'x-3'),
'y-2': (dec, 'y-1'),
'e': (throws, 'y-2'),
'other': (inc, 123)},
keys=['e'])
while True:
msg = yield report.get()
if msg['op'] == 'key-in-memory' and msg['key'] == 'y-2':
break
p = MultiProgressWidget(['x-1', 'x-2', 'x-3'], scheduler=s)
assert set(concat(p.all_keys.values())).issuperset({'x-1', 'x-2', 'x-3'})
assert 'x' in p.bars
sched.put_nowait({'op': 'close'})
yield done
示例6: test_diagnostic
def test_diagnostic(s, a, b):
sched, report = Queue(), Queue(); s.handle_queues(sched, report)
msg = yield report.get(); assert msg['op'] == 'stream-start'
class Counter(SchedulerPlugin):
def start(self, scheduler):
scheduler.add_plugin(self)
self.count = 0
def task_finished(self, scheduler, key, worker, nbytes):
self.count += 1
counter = Counter()
counter.start(s)
assert counter.count == 0
sched.put_nowait({'op': 'update-graph',
'tasks': {'x': dumps_task((inc, 1)),
'y': dumps_task((inc, 'x')),
'z': dumps_task((inc, 'y'))},
'dependencies': {'y': ['x'], 'z': ['y']},
'keys': ['z']})
while True:
msg = yield report.get()
if msg['op'] == 'key-in-memory' and msg['key'] == 'z':
break
assert counter.count == 3
示例7: __init__
def __init__(self):
self.connected = False
self.connected_event = Event()
self.disconnected_event = Event()
self.presence_queue = Queue()
self.message_queue = Queue()
self.error_queue = Queue()
示例8: __init__
def __init__(self, center, delete_batch_time=1):
self.scheduler_queue = Queue()
self.report_queue = Queue()
self.delete_queue = Queue()
self.status = None
self.center = coerce_to_rpc(center)
self.dask = dict()
self.dependencies = dict()
self.dependents = dict()
self.generation = 0
self.has_what = defaultdict(set)
self.held_data = set()
self.in_play = set()
self.keyorder = dict()
self.nbytes = dict()
self.ncores = dict()
self.processing = dict()
self.restrictions = dict()
self.stacks = dict()
self.waiting = dict()
self.waiting_data = dict()
self.who_has = defaultdict(set)
self.exceptions = dict()
self.tracebacks = dict()
self.exceptions_blame = dict()
self.delete_batch_time = delete_batch_time
示例9: __init__
def __init__(self, server, name, stream):
self.server = server
self.name = name
self.rooms = {}
self.stream = stream
self.inqueue = Queue(maxsize=QUEUE_SIZE)
self.outqueue = Queue(maxsize=QUEUE_SIZE)
示例10: Publisher
class Publisher(MQAsyncSub):
"""Handles new data to be passed on to subscribers."""
def __init__(self):
self.WSmessages = Queue()
self.MQmessages = Queue()
self.sub = MQAsyncSub.__init__(self, zmq.Context(), 'admin', [])
self.subscribers = set()
def register(self, subscriber):
"""Register a new subscriber."""
self.subscribers.add(subscriber)
def deregister(self, subscriber):
"""Stop publishing to a subscriber."""
self.subscribers.remove(subscriber)
@gen.coroutine
def on_message(self, did, msg):
"""Receive message from MQ sub and send to WS."""
yield self.WSmessages.put({"msgid": did, "content": msg})
@gen.coroutine
def submit(self, message):
"""Submit a new message to publish to subscribers."""
yield self.WSmessages.put(message)
@gen.coroutine
def publishToWS(self):
while True:
message = yield self.WSmessages.get()
if len(self.subscribers) > 0:
print("Pushing MQ message {} to {} WS subscribers...".format(
message, len(self.subscribers)))
yield [subscriber.submit(message) for subscriber in self.subscribers]
@gen.coroutine
def publishToMQ(self):
ctx = zmq.Context()
cli = MQSyncReq(ctx)
pub = MQPub(ctx, 'admin')
while True:
message = yield self.MQmessages.get()
jsons = json.loads(message)
# req/rep
if 'mq_request' in jsons and 'data' in jsons:
msg = MQMessage()
msg.set_action(str(jsons['mq_request']))
msg.set_data(jsons['data'])
print("REQ : {0}".format(msg.get()))
if 'dst' in jsons:
print cli.request(str(jsons['dst']), msg.get(), timeout=10).get()
else:
print cli.request('manager', msg.get(), timeout=10).get()
# pub
elif 'mq_publish' in jsons and 'data' in jsons:
print("Publish : {0}".format(jsons['data']))
pub.send_event(jsons['mq_publish'],
jsons['data'])
示例11: run
def run(args):
if not args.test:
ip_iter = _create_ip_iterator()
else:
ip_iter = _get_test_ips()
good_ips = []
job_queue = Queue(maxsize=200)
start = time.time()
counter = Counter()
@gen.coroutine
def job_producer():
for ip in ip_iter:
yield job_queue.put(ip)
#print("Put {}".format(ip))
@gen.coroutine
def worker(id):
while True:
ip = yield job_queue.get()
try:
good = yield test_ip(ip)
counter['all'] += 1
if args.progress:
if counter['all'] % 10000 == 0:
print("Tested {} ips.".format(counter['all']))
if good:
print("Found good ip: {}".format(ip))
counter['good'] += 1
if not args.test:
yield record_good_ip(ip)
else:
good_ips.append(ip)
finally:
job_queue.task_done()
for i in range(CONCURRENCY):
worker(i)
_disable_logging()
try:
yield job_producer()
yield job_queue.join()
finally:
print("\n\nTested: {} ips\nFound {} good ips\nQps: {}".format(
counter['all'],
counter['good'],
counter['all'] / (time.time() - start)
))
if args.test and args.remove:
with open(GOOD_IP_FILE + '_removed', 'w') as f:
f.write('|'.join(good_ips))
示例12: _first_completed
def _first_completed(futures):
""" Return a single completed future
See Also:
_as_completed
"""
q = Queue()
yield _as_completed(futures, q)
result = yield q.get()
raise gen.Return(result)
示例13: __init__
def __init__(self, stream, interval):
self.stream = stream
self.interval = interval / 1000.0
self.last_transmission = default_timer()
self.send_q = Queue()
self.recv_q = Queue()
self._background_send_coroutine = self._background_send()
self._background_recv_coroutine = self._background_recv()
self._broken = None
self.pc = PeriodicCallback(lambda: None, 100)
self.pc.start()
示例14: __init__
def __init__(self, center, start=True, delete_batch_time=1):
self.center = coerce_to_rpc(center)
self.futures = dict()
self.refcount = defaultdict(lambda: 0)
self.dask = dict()
self.restrictions = dict()
self.loop = IOLoop()
self.report_queue = Queue()
self.scheduler_queue = Queue()
self._shutdown_event = Event()
self._delete_batch_time = delete_batch_time
if start:
self.start()
示例15: get_file_list
def get_file_list(account, **kwargs):
queue = Queue()
sem = BoundedSemaphore(FETCH_CONCURRENCY)
done, working = set(), set()
data = set()
@gen.coroutine
def fetch_url():
current_url = yield queue.get()
try:
if current_url in working:
return
page_no = working.__len__()
app_log.info("Fetching page {}".format(page_no))
working.add(current_url)
req = account.get_request(current_url)
client = AsyncHTTPClient()
response = yield client.fetch(req)
done.add(current_url)
app_log.info("Page {} downloaded".format(page_no))
response_data = json.loads(response.body.decode('utf-8'))
for file in response_data:
# be sure we're a valid file type and less than our maximum response size limit
extension = file['path'].lower().split('.')[-1]
if extension in VALID_FILETYPES and int(file['bytes']) < RESPONSE_SIZE_LIMIT * 1000000:
data.add((file['path'].lstrip('/'), file['path'], ))
app_log.info("Page {} completed".format(page_no))
finally:
queue.task_done()
sem.release()
@gen.coroutine
def worker():
while True:
yield sem.acquire()
fetch_url()
app_log.info("Gathering filelist for account {}".format(account._id))
for file_type in VALID_FILETYPES:
file_type = '.'.join([file_type])
url = "https://api.dropbox.com/1/search/auto/?query={}&include_membership=true".format(file_type)
queue.put(url)
# start our concurrency worker
worker()
# wait until we're done
yield queue.join(timeout=timedelta(seconds=MAXIMUM_REQ_TIME))
app_log.info("Finished list retrieval. Found {} items.".format(data.__len__()))
return sorted([{"title": title, "value": path} for title, path in data], key=lambda f: f['title'])