本文整理汇总了Python中six.moves.queue.Queue.empty方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.empty方法的具体用法?Python Queue.empty怎么用?Python Queue.empty使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类six.moves.queue.Queue
的用法示例。
在下文中一共展示了Queue.empty方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_buffer_for_iterator_with_thread
# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import empty [as 别名]
def make_buffer_for_iterator_with_thread(gen, n_workers, buffer_size):
wait_time = 0.02
generator_queue = Queue()
_stop = threading.Event()
def generator_task():
while not _stop.is_set():
try:
if generator_queue.qsize() < buffer_size:
generator_output = next(gen)
generator_queue.put(generator_output)
else:
time.sleep(wait_time)
except (StopIteration, KeyboardInterrupt):
_stop.set()
return
generator_threads = [threading.Thread(target=generator_task) for _ in range(n_workers)]
for thread in generator_threads:
thread.start()
while not _stop.is_set() or not generator_queue.empty():
if not generator_queue.empty():
yield generator_queue.get()
else:
time.sleep(wait_time)
示例2: generator_to_async_generator
# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import empty [as 别名]
def generator_to_async_generator(get_iterable):
"""
Turn a generator or iterable into an async generator.
This works by running the generator in a background thread.
The new async generator will yield both `Future` objects as well
as the original items.
:param get_iterable: Function that returns a generator or iterable when
called.
"""
q = Queue()
f = Future()
l = RLock()
quitting = False
def runner():
"""
Consume the generator in background thread.
When items are received, they'll be pushed to the queue and the
Future is set.
"""
for item in get_iterable():
with l:
q.put(item)
if not f.done():
f.set_result(None)
# When this async generator was cancelled (closed), stop this
# thread.
if quitting:
break
with l:
if not f.done():
f.set_result(None)
# Start background thread.
done_f = run_in_executor(runner, _daemon=True)
try:
while not done_f.done():
# Wait for next item(s): yield Future.
yield From(f)
# Items received. Yield all items so far.
with l:
while not q.empty():
yield AsyncGeneratorItem(q.get())
f = Future()
# Yield final items.
while not q.empty():
yield q.get()
finally:
# When this async generator is closed (GeneratorExit exception, stop
# the background thread as well. - we don't need that anymore.)
quitting = True
示例3: _SBPQueueIterator
# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import empty [as 别名]
class _SBPQueueIterator(six.Iterator):
"""
Class for upstream iterators. Implements callable interface for adding
messages into the queue, and iterable interface for getting them out.
"""
def __init__(self, maxsize):
self._queue = Queue(maxsize)
self._broken = False
def __iter__(self):
return self
def __call__(self, msg, **metadata):
self._queue.put((msg, metadata), False)
def breakiter(self):
self._broken = True
self._queue.put(None, True, 1.0)
def __next__(self):
if self._broken and self._queue.empty():
raise StopIteration
m = self._queue.get(True)
if self._broken and m is None:
raise StopIteration
return m
示例4: Search
# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import empty [as 别名]
class Search(TracePosterior):
"""
Trace and Poutine-based implementation of systematic search.
:param callable model: Probabilistic model defined as a function.
:param int max_tries: The maximum number of times to try completing a trace from the queue.
"""
def __init__(self, model, max_tries=1e6):
"""
Constructor. Default `max_tries` to something sensible - 1e6.
:param callable model: Probabilistic model defined as a function.
:param int max_tries: The maximum number of times to try completing a trace from the queue.
"""
self.model = model
self.max_tries = int(max_tries)
def _traces(self, *args, **kwargs):
"""
algorithm entered here
Running until the queue is empty and collecting the marginal histogram
is performing exact inference
:returns: Iterator of traces from the posterior.
:rtype: Generator[:class:`pyro.Trace`]
"""
# currently only using the standard library queue
self.queue = Queue()
self.queue.put(poutine.Trace())
p = poutine.trace(
poutine.queue(self.model, queue=self.queue, max_tries=self.max_tries))
while not self.queue.empty():
tr = p.get_trace(*args, **kwargs)
yield (tr, tr.log_pdf())
示例5: USBFtdiInterface
# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import empty [as 别名]
class USBFtdiInterface(USBInterface):
name = 'FtdiInterface'
def __init__(self, app, phy, interface_number):
super(USBFtdiInterface, self).__init__(
app=app,
phy=phy,
interface_number=interface_number,
interface_alternate=0,
interface_class=USBClass.VendorSpecific,
interface_subclass=0xff,
interface_protocol=0xff,
interface_string_index=0,
endpoints=[
USBEndpoint(
app=app,
phy=phy,
number=1,
direction=USBEndpoint.direction_out,
transfer_type=USBEndpoint.transfer_type_bulk,
sync_type=USBEndpoint.sync_type_none,
usage_type=USBEndpoint.usage_type_data,
max_packet_size=0x40,
interval=0,
handler=self.handle_data_available
),
USBEndpoint(
app=app,
phy=phy,
number=3,
direction=USBEndpoint.direction_in,
transfer_type=USBEndpoint.transfer_type_bulk,
sync_type=USBEndpoint.sync_type_none,
usage_type=USBEndpoint.usage_type_data,
max_packet_size=0x40,
interval=0,
handler=self.handle_ep3_buffer_available # at this point, we don't send data to the host
)
],
)
self.txq = Queue()
def handle_data_available(self, data):
self.debug('received string (%d): %s' % (len(data), data))
reply = b'\x01\x00' + data
self.txq.put(reply)
def handle_ep3_buffer_available(self):
if not self.txq.empty():
self.send_on_endpoint(3, self.txq.get())
示例6: _handle_messages_threaded
# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import empty [as 别名]
def _handle_messages_threaded(self):
# Handles messages in a threaded fashion.
queue = Queue()
def producer_loop():
# Read messages from file, and queue them for execution.
for msg in self._read_next_message():
queue.put(msg)
# Check if an error occurred.
if self._done:
break
# Wait until the queue empties out to signal completion from the
# producer's side.
if not self._done:
queue.join()
self._done = True
producer = Thread(name="Producer", target=producer_loop)
# @note Previously, when trying to do `queue.clear()` in the consumer,
# and `queue.join()` in the producer, there would be intermittent
# deadlocks. By demoting the producer to a daemon, I (eric.c) have not
# yet encountered a deadlock.
producer.daemon = True
producer.start()
# Consume.
# TODO(eric.cousineau): Trying to quit via Ctrl+C is awkward (but kinda
# works). Is there a way to have `plt.pause` handle Ctrl+C differently?
try:
pause = self.scope_globals['pause']
while not self._done:
# Process messages.
while not queue.empty():
msg = queue.get()
queue.task_done()
self._execute_message(msg)
# Spin busy for a bit, let matplotlib (or whatever) flush its
# event queue.
pause(0.01)
except KeyboardInterrupt:
# User pressed Ctrl+C.
self._done = True
print("Quitting")
except Exception as e:
# We encountered an error, and must stop.
self._done = True
self._had_error = True
traceback.print_exc(file=sys.stderr)
sys.stderr.write(" Stopping (--stop_on_error)\n")
示例7: test_thread_safe_object_creation
# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import empty [as 别名]
def test_thread_safe_object_creation(c):
"""
If two threads try to fetch the object at the same time,
only one instance should be created.
This also tests assigning an existing function as a service.
"""
cin = Queue()
cout = Queue()
def test_factory(username, password):
cout.put("ready")
cin.get()
res = libtest.sample.Foo(username, password)
cout.put("done")
return res
c['test_factory'] = test_factory
c.load_yaml("""
a:
:: <test_factory>
username: abc
password: xyz
""")
def run(q):
q.put("starting")
q.put(c['a'])
q1 = Queue()
t1 = Thread(target=run, kwargs={"q":q1})
t1.start()
assert cout.get(True, 2) == "ready"
assert q1.get(True, 2) == "starting"
# Now t1 is waiting inside factory method
q2 = Queue()
t2 = Thread(target=run, kwargs={"q":q2})
t2.start()
assert q2.get(True, 2) == "starting"
cin.put("go")
assert cout.get(True, 2) == "done"
t1.join(2)
t2.join(2)
assert cout.empty()
res1 = q1.get(True, 2)
res2 = q2.get(True, 2)
# This also implies that test_factory was only called once
# because otherwise t2 would hang waiting on cin
assert isinstance(res1, libtest.sample.Foo)
assert res1 is res2
示例8: QueueHandlerMixedTest
# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import empty [as 别名]
class QueueHandlerMixedTest(TestCase):
def setUp(self):
# Simple model with 1 continuous + 1 discrete + 1 continuous variable.
def model():
p = torch.tensor([0.5])
loc = torch.zeros(1)
scale = torch.ones(1)
x = pyro.sample("x", Normal(loc, scale)) # Before the discrete variable.
y = pyro.sample("y", Bernoulli(p))
z = pyro.sample("z", Normal(loc, scale)) # After the discrete variable.
return dict(x=x, y=y, z=z)
self.sites = ["x", "y", "z", "_INPUT", "_RETURN"]
self.model = model
self.queue = Queue()
self.queue.put(poutine.Trace())
def test_queue_single(self):
f = poutine.trace(poutine.queue(self.model, queue=self.queue))
tr = f.get_trace()
for name in self.sites:
assert name in tr
def test_queue_enumerate(self):
f = poutine.trace(poutine.queue(self.model, queue=self.queue))
trs = []
while not self.queue.empty():
trs.append(f.get_trace())
assert len(trs) == 2
values = [
{name: tr.nodes[name]['value'].view(-1).item() for name in tr.nodes.keys()
if tr.nodes[name]['type'] == 'sample'}
for tr in trs
]
expected_ys = set([0, 1])
actual_ys = set([value["y"] for value in values])
assert actual_ys == expected_ys
# Check that x was sampled the same on all each paths.
assert values[0]["x"] == values[1]["x"]
# Check that y was sampled differently on each path.
assert values[0]["z"] != values[1]["z"] # Almost surely true.
示例9: AudioStreaming
# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import empty [as 别名]
class AudioStreaming(object):
def __init__(self, app, phy, tx_ep, rx_ep):
self.app = app
self.phy = phy
self.tx_ep = tx_ep
self.rx_ep = rx_ep
self.txq = Queue()
def buffer_available(self):
if self.txq.empty():
self.phy.send_on_endpoint(self.tx_ep, b'\x00\x00\x00\x00\x00\x00\x00\x00')
else:
self.phy.send_on_endpoint(self.tx_ep, self.txq.get())
def data_available(self, data):
self.app.logger.info('[AudioStreaming] Got %#x bytes on streaming endpoint' % (len(data)))
示例10: _build_droot_impact
# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import empty [as 别名]
def _build_droot_impact(destroy_handler):
droot = {} # destroyed view + nonview variables -> foundation
impact = {} # destroyed nonview variable -> it + all views of it
root_destroyer = {} # root -> destroyer apply
for app in destroy_handler.destroyers:
for output_idx, input_idx_list in app.op.destroy_map.items():
if len(input_idx_list) != 1:
raise NotImplementedError()
input_idx = input_idx_list[0]
input = app.inputs[input_idx]
# Find non-view variable which is ultimatly viewed by input.
view_i = destroy_handler.view_i
_r = input
while _r is not None:
r = _r
_r = view_i.get(r)
input_root = r
if input_root in droot:
raise InconsistencyError(
"Multiple destroyers of %s" % input_root)
droot[input_root] = input_root
root_destroyer[input_root] = app
# The code here add all the variables that are views of r into
# an OrderedSet input_impact
input_impact = OrderedSet()
queue = Queue()
queue.put(input_root)
while not queue.empty():
v = queue.get()
for n in destroy_handler.view_o.get(v, []):
input_impact.add(n)
queue.put(n)
for v in input_impact:
assert v not in droot
droot[v] = input_root
impact[input_root] = input_impact
impact[input_root].add(input_root)
return droot, impact, root_destroyer
示例11: InEpThread
# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import empty [as 别名]
class InEpThread(EndpointThread):
def __init__(self, phy, ep):
super(InEpThread, self).__init__(phy, ep)
self.queue = Queue()
def send(self, data):
self.queue.put(data)
def handling_write(self):
return not self.queue.empty()
def io_op(self):
'''
Fetch data from send queue and write to endpoint
'''
try:
data = self.queue.get(True, 0.1)
os.write(self.ep.fd, data)
except Empty:
pass
示例12: AmqpSubscriber
# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import empty [as 别名]
class AmqpSubscriber(Subscriber):
def __init__(self, amqp_chan, exchanges):
self.channel = amqp_chan
self.messages = Queue(maxsize=0)
qname, _, _ = self.channel.queue_declare()
for exchange in exchanges:
self.channel.queue_bind(qname, exchange)
self.channel.basic_consume(queue=qname, callback=self.callback)
def callback(self, msg):
self.channel.basic_ack(msg.delivery_tag)
self.messages.put_nowait(msg.body)
def __iter__(self):
return self
def next(self):
while self.messages.empty():
self.channel.wait()
return self.messages.get_nowait()
__next__ = next # PY3
示例13: hangwatch
# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import empty [as 别名]
def hangwatch(timeout, func, *args, **kwargs):
def target(queue):
try:
func(*args, **kwargs)
except Exception as e:
queue.put(sys.exc_info())
queue.put(e)
sys.exit()
q = Queue()
thread = threading.Thread(target=target, args = (q,))
thread.start()
thread.join(timeout)
if thread.is_alive():
raise RuntimeError('Operation did not terminate within {} seconds'
.format(timeout))
if not q.empty():
info = q.get(block=False)
e = q.get(block=False)
eprint(''.join(traceback.format_exception(*info)))
raise e
示例14: Subscriber
# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import empty [as 别名]
#.........这里部分代码省略.........
self.event_handler_thread.exit()
try:
self._ws = create_connection(self._ws_url, sslopt=sslopt, **kwargs)
if not self._ws.connected:
logging.error('Unable to open websocket connection')
self.event_handler_thread = EventHandler(self)
self.event_handler_thread.daemon = True
self.event_handler_thread.start()
except WebSocketException:
logging.error('Unable to open websocket connection due to WebSocketException')
except socket.error:
logging.error('Unable to open websocket connection due to Socket Error')
def _resubscribe(self):
"""
Reissue the subscriptions.
Used to when the APIC login timeout occurs and a new subscription
must be issued instead of simply a refresh. Not meant to be called
directly by end user applications.
"""
self._process_event_q()
urls = []
for url in self._subscriptions:
urls.append(url)
self._subscriptions = {}
for url in urls:
self.subscribe(url, only_new=True)
def _process_event_q(self):
"""
Put the event into correct bucket based on URLs that have been
subscribed.
"""
if self._event_q.empty():
return
while not self._event_q.empty():
event = self._event_q.get()
orig_event = event
try:
event = json.loads(event)
except ValueError:
logging.error('Non-JSON event: %s', orig_event)
continue
# Find the URL for this event
num_subscriptions = len(event['subscriptionId'])
for i in range(0, num_subscriptions):
url = None
for k in self._subscriptions:
if self._subscriptions[k] == str(event['subscriptionId'][i]):
url = k
break
if url not in self._events:
self._events[url] = []
self._events[url].append(event)
if num_subscriptions > 1:
event = copy.deepcopy(event)
def subscribe(self, url, only_new=False):
"""
Subscribe to a particular APIC URL. Used internally by the
Class and Instance subscriptions.
:param url: URL string to send as a subscription
"""
logging.info('Subscribing to url: %s', url)
示例15: __init__
# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import empty [as 别名]
#.........这里部分代码省略.........
return False
self.__resizeLock.acquire()
try:
self.__setThreadCountNolock(newNumThreads)
finally:
self.__resizeLock.release()
return True
def __setThreadCountNolock(self, newNumThreads):
"""Set the current pool size, spawning or terminating threads
if necessary. Internal use only; assumes the resizing lock is
held."""
# If we need to grow the pool, do so
while newNumThreads > len(self.__threads):
name = "%s/%d" % (self._name, next(self._count))
newThread = WorkerThread(self, name)
self.__threads.append(newThread)
newThread.start()
# If we need to shrink the pool, do so
while newNumThreads < len(self.__threads):
self.__threads[0].goAway()
del self.__threads[0]
def getThreadCount(self):
"""Return the number of threads in the pool."""
self.__resizeLock.acquire()
try:
return len(self.__threads)
finally:
self.__resizeLock.release()
def queueTask(self, id, task, args=None, taskCallback=None):
"""Insert a task into the queue. task must be callable;
args and taskCallback can be None."""
if self.__isJoining:
return False
if not callable(task):
return False
self.__tasks.put((id, task, args, taskCallback))
return True
def getNextTask(self):
""" Retrieve the next task from the task queue. For use
only by WorkerThread objects contained in the pool."""
id = None
cmd = None
args = None
callback = None
try:
id, cmd, args, callback = self.__tasks.get(True, self.__waitTimeout)
except Empty:
pass
return id, cmd, args, callback
def stopThread(self):
return self.__tasks.put((None, None, None, None))
def joinAll(self, waitForTasks=True, waitForThreads=True):
""" Clear the task queue and terminate all pooled threads,
optionally allowing the tasks and threads to finish."""
# Mark the pool as joining to prevent any more task queuing
self.__isJoining = True
# Wait for tasks to finish
if waitForTasks:
while not self.__tasks.empty():
sleep(0.1)
# Tell all the threads to quit
self.__resizeLock.acquire()
try:
# Wait until all threads have exited
if waitForThreads:
for t in self.__threads:
t.goAway()
for t in self.__threads:
t.join()
# print t,"joined"
del t
self.__setThreadCountNolock(0)
self.__isJoining = True
# Reset the pool for potential reuse
self.__isJoining = False
finally:
self.__resizeLock.release()