本文整理汇总了Python中queue.Full方法的典型用法代码示例。如果您正苦于以下问题:Python queue.Full方法的具体用法?Python queue.Full怎么用?Python queue.Full使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类queue
的用法示例。
在下文中一共展示了queue.Full方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _queue_connection
# 需要导入模块: import queue [as 别名]
# 或者: from queue import Full [as 别名]
def _queue_connection(self, cnx):
"""Put connection back in the queue
This method is putting a connection back in the queue. It will not
acquire a lock as the methods using _queue_connection() will have it
set.
Raises PoolError on errors.
"""
if not isinstance(cnx, MySQLConnection):
raise errors.PoolError(
"Connection instance not subclass of MySQLConnection.")
try:
self._cnx_queue.put(cnx, block=False)
except queue.Full:
errors.PoolError("Failed adding connection; queue is full")
示例2: run
# 需要导入模块: import queue [as 别名]
# 或者: from queue import Full [as 别名]
def run(self) -> None:
data = []
current_idx = 0
while True:
while True:
try:
data.append(self.input_queue.get(timeout=0.1))
except queue.Empty:
continue
except KeyboardInterrupt:
return
break
data.sort(key=lambda data: data[0])
while len(data) > 0 and data[0][0] <= current_idx:
try:
self.output_queue.put(data[0], timeout=0.1)
self.output_queue.task_done()
del data[0]
current_idx += 1
except queue.Full:
continue
except KeyboardInterrupt:
return
示例3: shutdown_worker
# 需要导入模块: import queue [as 别名]
# 或者: from queue import Full [as 别名]
def shutdown_worker(self):
"""Shutdown the workers."""
for (q, w) in zip(self._to_worker, self._workers):
# Loop until either the exit message is accepted or the process has
# closed. These might cause us to block, but ensures that the
# workers are closed.
while True:
try:
# Set a timeout in case the child process crashed.
q.put(('exit', ()), timeout=1)
break
except queue.Full:
# If the child process has crashed, we're done here.
# Otherwise it should eventually accept our message.
if not w.is_alive():
break
# If this hangs forever, most likely a queue needs
# cancel_join_thread called on it, or a subprocess has tripped the
# "closing dowel with TensorboardOutput blocks forever bug."
w.join()
for q in self._to_worker:
q.close()
self._to_sampler.close()
示例4: serve
# 需要导入模块: import queue [as 别名]
# 或者: from queue import Full [as 别名]
def serve(self, socket):
echo_queue = queue.Queue(2)
echo_thread = threading.Thread(target=self.echo,
args=(socket, echo_queue))
echo_thread.start()
peer = socket.getpeername()
log.info("serving connection from sap {0}".format(peer))
while socket.poll("recv"):
data = socket.recv()
if data is None:
break
log.info("rcvd {0} byte from sap {1}".format(len(data), peer))
if echo_queue.full():
socket.setsockopt(nfc.llcp.SO_RCVBSY, True)
echo_queue.put(data)
log.info("remote peer {0} closed closed connection".format(peer))
try:
echo_queue.put_nowait(int(0))
except queue.Full:
pass
echo_thread.join()
socket.close()
log.info("serve thread terminated")
示例5: add_event_to_queue
# 需要导入模块: import queue [as 别名]
# 或者: from queue import Full [as 别名]
def add_event_to_queue(self, event_type, event_data, player_state):
"""Adds an event in the queue of events to be processed"""
videoid = common.VideoId.from_dict(event_data['videoid'])
# pylint: disable=unused-variable
previous_data, previous_player_state = self.cache_data_events.get(videoid.value, ({}, None))
manifest = get_manifest(videoid)
url = manifest['links']['events']['href']
if previous_data.get('xid') in self.banned_events_ids:
common.warn('EVENT [{}] - Not added to the queue. The xid {} is banned due to a previous failed request',
event_type, previous_data.get('xid'))
return
from resources.lib.services.msl.msl_request_builder import MSLRequestBuilder
request_data = MSLRequestBuilder.build_request_data(url,
self._build_event_params(event_type,
event_data,
player_state,
manifest))
try:
self.queue_events.put_nowait(Event(request_data, event_data))
except queue.Full:
common.warn('EVENT [{}] - Not added to the queue. The event queue is full.', event_type)
示例6: send
# 需要导入模块: import queue [as 别名]
# 或者: from queue import Full [as 别名]
def send(self, metric, value, timestamp=None, tags={}):
"""Send given metric and (int or float) value to Graphite host.
Performs send on background thread if "interval" was specified when
creating this Sender.
If a "tags" dict is specified, send the tags to the Graphite host along with the metric.
"""
if timestamp is None:
timestamp = time.time()
message = self.build_message(metric, value, timestamp, tags)
if self.interval is None:
self.send_socket(message)
else:
try:
self._queue.put_nowait(message)
except queue.Full:
logger.error('queue full when sending {!r}'.format(message))
示例7: preload_streamed_response_content_async
# 需要导入模块: import queue [as 别名]
# 或者: from queue import Full [as 别名]
def preload_streamed_response_content_async(requests_response_obj, buffer_queue):
"""
stream模式下, 预读远程响应的content
:param requests_response_obj:
:type buffer_queue: queue.Queue
"""
for particle_content in requests_response_obj.iter_content(stream_transfer_buffer_size):
try:
buffer_queue.put(particle_content, timeout=10)
except queue.Full: # coverage: exclude
traceback.print_exc()
exit()
if verbose_level >= 3: dbgprint('BufferSize', buffer_queue.qsize())
buffer_queue.put(None, timeout=10)
exit()
示例8: _proc_loop
# 需要导入模块: import queue [as 别名]
# 或者: from queue import Full [as 别名]
def _proc_loop(proc_id, alive, queue, fn):
"""
Thread loop for generating data
Parameters
----------
proc_id: int
Process id
alive: multiprocessing.Value
variable for signaling whether process should continue or not
queue: multiprocessing.Queue
queue for passing data back
fn: function
function object that returns a sample to be pushed into the queue
"""
print("proc {} started".format(proc_id))
try:
while alive.value:
data = fn()
put_success = False
while alive.value and not put_success:
try:
queue.put(data, timeout=0.5)
put_success = True
except QFullExcept:
# print("Queue Full")
pass
except KeyboardInterrupt:
print("W: interrupt received, stopping process {} ...".format(proc_id))
print("Closing process {}".format(proc_id))
queue.close()
示例9: add_nameserver
# 需要导入模块: import queue [as 别名]
# 或者: from queue import Full [as 别名]
def add_nameserver(self, nameserver):
keep_trying = True
while not self.time_to_die and keep_trying:
try:
self.resolver_q.put(nameserver, timeout = 1)
trace("Added nameserver:", nameserver)
keep_trying = False
except Exception as e:
if type(e) == Queue.Full or str(type(e)) == "<class 'queue.Full'>":
keep_trying = True
示例10: _put_conn
# 需要导入模块: import queue [as 别名]
# 或者: from queue import Full [as 别名]
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning(
"Connection pool is full, discarding connection: %s" %
self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
示例11: on_message
# 需要导入模块: import queue [as 别名]
# 或者: from queue import Full [as 别名]
def on_message(self, ws, message):
message = json.loads(message)
self.client.logger.debug(f"Received message: {message}")
quote = None
if message['event'] == 'phx_reply' and message['payload']['status'] == 'error':
error = message['payload']['response']
self.client.logger.error(f"Websocket ERROR: {error}")
elif self.client.provider == IEX:
if message['event'] == "quote":
quote = message['payload']
elif self.client.provider == QUODD:
if message['event'] == 'info' and message['data']['message'] == 'Connected':
self.client.on_connect()
if message['event'] == 'quote' or message['event'] == 'trade':
quote = message['data']
elif self.client.provider == CRYPTOQUOTE:
if message['event'] == 'book_update' or message['event'] == 'ticker' or message['event'] == 'trade':
quote = message['payload']
elif self.client.provider == FXCM:
if message['event'] == 'price_update':
quote = message['payload']
if quote:
try:
self.client.quotes.put_nowait(quote)
except queue.Full:
self.client.on_queue_full()
示例12: put
# 需要导入模块: import queue [as 别名]
# 或者: from queue import Full [as 别名]
def put(self, event):
success = False
try:
self.__events_queue.put(event)
success = True
except Full:
log.error("Memory storage is full!")
return success
示例13: _put_conn
# 需要导入模块: import queue [as 别名]
# 或者: from queue import Full [as 别名]
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning(
"Connection pool is full, discarding connection: %s",
self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
示例14: put_edit
# 需要导入模块: import queue [as 别名]
# 或者: from queue import Full [as 别名]
def put_edit(self, f, *args, **kwds):
"""
Defer an edit to run on the EditQueue.
:param callable f: The function to be called
:param tuple args: Positional arguments to the function
:param tuple kwds: Keyword arguments to the function
:throws queue.Full: if the queue is full
"""
self.put_nowait(functools.partial(f, *args, **kwds))
示例15: blocking_put_response
# 需要导入模块: import queue [as 别名]
# 或者: from queue import Full [as 别名]
def blocking_put_response(self, item):
while self.run_flag.value == 1:
try:
self.normal_out_queue.put_nowait(item)
return
except queue.Full:
self.log.warning("Response queue full (%s items). Sleeping", self.normal_out_queue.qsize())
time.sleep(1)