本文整理汇总了Python中six.moves.queue.Queue.full方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.full方法的具体用法?Python Queue.full怎么用?Python Queue.full使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类six.moves.queue.Queue
的用法示例。
在下文中一共展示了Queue.full方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _BatchWriter
# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import full [as 别名]
class _BatchWriter(object):
#: Truncate overly big items to that many bytes for the error message.
ERRMSG_DATA_TRUNCATION_LEN = 1024
def __init__(self, url, start, auth, size, interval, qsize,
maxitemsize, content_encoding, uploader, callback=None):
self.url = url
self.offset = start
self._nextid = count(start)
self.auth = auth
self.size = size
self.interval = interval
self.maxitemsize = maxitemsize
self.content_encoding = content_encoding
self.checkpoint = time.time()
self.itemsq = Queue(size * 2 if qsize is None else qsize)
self.closed = False
self.flushme = False
self.uploader = uploader
self.callback = callback
def write(self, item):
assert not self.closed, 'attempting writes to a closed writer'
data = jsonencode(item)
if len(data) > self.maxitemsize:
truncated_data = data[:self.ERRMSG_DATA_TRUNCATION_LEN] + "..."
raise ValueTooLarge(
'Value exceeds max encoded size of {} bytes: {!r}'
.format(self.maxitemsize, truncated_data))
self.itemsq.put(data)
if self.itemsq.full():
self.uploader.interrupt()
return next(self._nextid)
def flush(self):
self.flushme = True
self._waitforq()
self.flushme = False
def close(self, block=True):
self.closed = True
if block:
self._waitforq()
def _waitforq(self):
self.uploader.interrupt()
self.itemsq.join()
def __str__(self):
return self.url
示例2: run_multiple_commands_redirect_stdout
# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import full [as 别名]
def run_multiple_commands_redirect_stdout(
multiple_args_dict,
print_commands=True,
process_limit=0,
polling_freq=1,
**kwargs):
"""
Run multiple shell commands in parallel, write each of their
stdout output to files associated with each command.
Parameters
----------
multiple_args_dict : dict
A dictionary whose keys are files and values are args list.
Run each args list as a subprocess and write stdout to the
corresponding file.
print_commands : bool
Print shell commands before running them.
process_limit : int
Limit the number of concurrent processes to this number. 0
if there is no limit
polling_freq : int
Number of seconds between checking for done processes, if
we have a process limit
"""
assert len(multiple_args_dict) > 0
assert all(len(args) > 0 for args in multiple_args_dict.values())
assert all(hasattr(f, 'name') for f in multiple_args_dict.keys())
start_time = time.time()
processes = Queue(maxsize=process_limit)
def add_to_queue(process):
if print_commands:
print(" ".join(process.args), ">",
process.redirect_stdout_file.name)
processes.put(process)
for f, args in multiple_args_dict.items():
p = AsyncProcess(
args,
redirect_stdout_file=f,
**kwargs)
if not processes.full():
add_to_queue(p)
else:
while processes.full():
# Are there any done processes?
to_remove = []
for possibly_done in processes.queue:
if possibly_done.poll() is not None:
possibly_done.wait()
to_remove.append(possibly_done)
# Remove them from the queue and stop checking
if to_remove:
for process_to_remove in to_remove:
processes.queue.remove(process_to_remove)
break
# Check again in a second if there weren't
time.sleep(polling_freq)
add_to_queue(p)
# Wait for all the rest of the processes
while not processes.empty():
processes.get().wait()
elapsed_time = time.time() - start_time
logging.info(
"Ran %d commands in %0.4f seconds",
len(multiple_args_dict),
elapsed_time)