本文整理汇总了Python中multiprocessing.Queue.empty方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.empty方法的具体用法?Python Queue.empty怎么用?Python Queue.empty使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.Queue
的用法示例。
在下文中一共展示了Queue.empty方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: uploadBatch
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import empty [as 别名]
def uploadBatch(self):
numUploads = 0
time.sleep(1)
if not self.queue.empty():
managedQueue = Queue()
failQueue = Queue()
while not self.queue.empty():
managedQueue.put(self.queue.get())
numUploads = numUploads + 1
time.sleep(1)
print "Starting Batch of " + str(numUploads) + " images."
numWorkers = 0
for workerCount in range(4):
UploadWorker(managedQueue,failQueue,self.myClient).start()
numWorkers = workerCount
print str(numWorkers) + " workers started."
for workerCount in range(4):
managedQueue.put(None)
numWorkers = workerCount
#self.uploadFile(self.dequeue())
time.sleep(Utility.POLL_TIME) # The .empty() method is instantaneously unreliable after emptying a queue
while not managedQueue.empty():
time.sleep(Utility.POLL_TIME)
print "Waiting for uploads to finish..."
print str(numWorkers) + " workers ended."
while not failQueue.empty():
print "Failed image upload: " + failQueue.get()
print "Batch Upload finished..."
else:
print "Queue currently empty, canceling upload."
示例2: CommunicationQueues
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import empty [as 别名]
class CommunicationQueues(object):
"""Queues to handle communication between the threads.
On the bc side, this is also a logging handler sending
log messages to the node side."""
def __init__(self):
self.bc_to_node = Queue()
self.node_to_bc = Queue()
def set(self, bc_to_node=None, node_to_bc=None, queues=None):
if bc_to_node:
self.bc_to_node = bc_to_node
return
if node_to_bc:
self.node_to_bc = node_to_bc
return
assert queues.bc_to_node
self.bc_to_node = queues.bc_to_node
assert queues.node_to_bc
self.node_to_bc = queues.node_to_bc
def empty_queues(self):
print "Emptying queues:"
while not self.bc_to_node.empty():
print "BC to node:", self.bc_to_node.get()
while not self.node_to_bc.empty():
print "Node to BC:", self.node_to_bc.get()
print "Emptying queues done."
def get_handler():
return _CommQueueHandler(self.bc_to_node)
示例3: run
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import empty [as 别名]
def run():
#global admRunPath
#print vars()
#if not 'admRunPath' in vars():
# admRunPath = '.'
#else:
# print admRunPath
myTest = TestClass('setUpClass')
results = []
noresults = True
try:
myTest._setUp()
total = getattr(myTest, 'total', 0)
score = getattr(myTest, 'score', 0)
yield ["<test>" + json.dumps(["setup","setup", score , "The test environment has been setup."]),total]
except Exception:
try:
myTest.setUp()
total = getattr(myTest, 'total', 0)
score = getattr(myTest, 'score', 0)
yield ["<test>" + json.dumps(["setup","setup", score , "The test environment has been setup."]),total]
except Exception:
pass
myScore = Queue()
for name in dir(TestClass):
if not (name[:1] == '_' or name in dir(unittest.TestCase)):
attr = getattr(myTest,name)
if callable(attr):
import os
getBack = os.getcwd()
os.chdir(admRunPath)
myQ = Queue()
myProcess = Process(target=runTest, args=(myTest, name, attr, myQ))
myProcess.start()
count = 0
while myQ.empty() and count < 50:
time.sleep(0.05)
count += 1
time.sleep(0.1)
if myQ.empty():
myProcess.terminate()
yield ["<test>" + json.dumps([name, "time", 0 , "It took more then 2.5 seconds to execute this test \\n" + (attr.__doc__ or 'No suggestion')]), 2500]
noresults = False
else:
yield [myQ.get(False) , count * 50]
noresults = False
os.chdir(getBack)
if noresults:
yield ["<test>" + json.dumps(["no test","testless", 0,"There are no results. are there no tests?"]),0]
try:
myTest.tearDown()
yield ["<test>" + json.dumps(["teardown","teardown", 0, "The test environment has been teared down."]),total]
except Exception:
pass
示例4: general_multiproc_fitting
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import empty [as 别名]
def general_multiproc_fitting(run_wrapper, *args):
result_queue = Queue()
update_queue = Queue()
workers_num = get_max_workers()
optimizers = [
Process(target=run_wrapper, args=(result_queue, update_queue) + args)
for _ in range(workers_num)]
for optimizer in optimizers:
optimizer.start()
optimizers_left = workers_num
results = []
while optimizers_left > 0:
time.sleep(0.1)
if not update_queue.empty() and callback_progress is not None:
callback_progress(update_queue.get())
if not result_queue.empty():
results.append(result_queue.get())
optimizers_left -= 1
for optimizer in optimizers:
optimizer.join()
sorted_results = sorted(results, key=lambda x: x[2])
logger.debug(str(sorted_results[0]))
return sorted_results[0][1], sorted_results[0][2]
示例5: test_inceptionresnetv2_notop
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import empty [as 别名]
def test_inceptionresnetv2_notop():
def target(queue):
model = applications.InceptionResNetV2(weights=None, include_top=False)
queue.put(model.output_shape)
global_image_data_format = K.image_data_format()
queue = Queue()
K.set_image_data_format('channels_first')
p = Process(target=target, args=(queue,))
p.start()
p.join()
K.set_image_data_format(global_image_data_format)
assert not queue.empty(), 'Model creation failed.'
model_output_shape = queue.get_nowait()
assert model_output_shape == (None, 1536, None, None)
K.set_image_data_format('channels_last')
p = Process(target=target, args=(queue,))
p.start()
p.join()
K.set_image_data_format(global_image_data_format)
assert not queue.empty(), 'Model creation failed.'
model_output_shape = queue.get_nowait()
assert model_output_shape == (None, None, None, 1536)
示例6: test_worker_processes_shuts_down_after_processing_its_maximum_number_of_messages
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import empty [as 别名]
def test_worker_processes_shuts_down_after_processing_its_maximum_number_of_messages():
"""
Test worker processes shutdown after processing maximum number of messages
"""
# Setup SQS Queue
conn = boto.connect_sqs()
queue = conn.create_queue("tester")
# Build the SQS Message
message_body = {"task": "tests.tasks.index_incrementer", "args": [], "kwargs": {"message": 23}}
message = Message()
body = json.dumps(message_body)
message.set_body(body)
# Add message to internal queue
internal_queue = Queue(3)
internal_queue.put({"queue": queue.id, "message": message, "start_time": time.time(), "timeout": 30})
internal_queue.put({"queue": queue.id, "message": message, "start_time": time.time(), "timeout": 30})
internal_queue.put({"queue": queue.id, "message": message, "start_time": time.time(), "timeout": 30})
# When I Process messages
worker = ProcessWorker(internal_queue)
worker._messages_to_process_before_shutdown = 2
# Then I return from run()
worker.run().should.be.none
# With messages still on the queue
internal_queue.empty().should.be.false
internal_queue.full().should.be.false
示例7: __init__
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import empty [as 别名]
class ParallelBufferedIO:
"""
Provides asynchronous output to a file. You can call "write" and a subprocess will handle output buffering so that the
main processes does not hang while waiting for a lock.
NOTE: You MUST close this or else all hell breaks loose with subprocesses
"""
def __init__(self, path):
"""
- path - the output path of this file. Must append to.
"""
self.path = path
self.Q = Queue()
self.printing_process = Process(target=self.subprocess_loop, args=[])
self.printing_process.start()
# make sure we close on exit
signal.signal(signal.SIGINT, self.close )
# and set this to close on exiting, so that we don't hang
atexit.register(self.close)
def write(self,*args):
self.Q.put(args)
def writen(self,*args):
args = list(args)
args.extend("\n")
self.Q.put(args)
def close(self):
self.Q.put(None)
def subprocess_loop(self):
"""
An internal loop my subprocess maintains for outputting
"""
# convert to a full path and make a lock
path = os.path.realpath(self.path)
lock = FileLock(self.path)
while True:
time.sleep(DELAY_TIME)
if (not self.Q.empty()):
lock.acquire() # get the lock (or wait till we do)
with open(self.path, 'a') as o:
while not self.Q.empty(): # dump the entire queue
x = self.Q.get()
if x is None: # this is our signal we are done with input
lock.release()
return
else: # this
for xi in x: print >>o, xi,
# No newline by default now
#print >>o, "\n",
lock.release()
示例8: Importer
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import empty [as 别名]
class Importer(object):
def __init__(self):
logging.debug("In Importer::__init__()")
self._directories = []
self.series = {}
self.changed = 0
self.queue = Queue()
self.stopCancelQueue = Queue()
self._parentConn = None
self.finished = 0
def clearData(self):
logging.debug("In Importer::clearData()")
def loadDirectory(self, directory, recursive):
logging.debug("In Importer::loadDirectory()")
self.finished = 0
prov = []
while not self.stopCancelQueue.empty():
self.stopCancelQueue.get()
self.process = Process( target=scanDirectory,
args = (directory, recursive,
self.series,
self.queue, self.stopCancelQueue))
self.process.start()
if not directory in self._directories:
prov.append(directory)
self._directories = self._directories+prov
def stop(self):
self.stopCancelQueue.put("stop")
def cancel(self):
self.stopCancelQueue.put("cancel")
def updateSeries(self):
logging.debug("In Importer::updateSeries()")
if not self.queue.empty():
key, value = self.queue.get()
if key == "finished-1":
self.finished = 1
elif key == "finished-2":
self.finished = 2
else:
self.series.update(value)
def makeImport(self, indexes):
logging.debug("In Importer::makeImport()")
self.finished = 0
while not self.queue.empty():
self.queue.get()
while not self.stopCancelQueue.empty():
self.stopCancelQueue.get()
self.process = Process(target=processImport,
args = (indexes,
self.series,
self.queue, self.stopCancelQueue))
self.process.start()
示例9: test_worker_processes_shuts_down_after_processing_its_max_number_of_msgs
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import empty [as 别名]
def test_worker_processes_shuts_down_after_processing_its_max_number_of_msgs():
"""
Test worker processes shutdown after processing maximum number of messages
"""
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
# Build the SQS Message
message = {
'Body': json.dumps({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 23,
},
}),
"ReceiptHandle": "receipt-1234",
}
# Add message to internal queue
internal_queue = Queue(3)
internal_queue.put(
{
"queue": queue_url,
"message": message,
"start_time": time.time(),
"timeout": 30,
}
)
internal_queue.put(
{
"queue": queue_url,
"message": message,
"start_time": time.time(),
"timeout": 30,
}
)
internal_queue.put(
{
"queue": queue_url,
"message": message,
"start_time": time.time(),
"timeout": 30,
}
)
# When I Process messages
worker = ProcessWorker(internal_queue, INTERVAL)
worker._messages_to_process_before_shutdown = 2
# Then I return from run()
worker.run().should.be.none
# With messages still on the queue
internal_queue.empty().should.be.false
internal_queue.full().should.be.false
示例10: run_simulations
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import empty [as 别名]
def run_simulations( self ):
n_cpu = cpu_count()
commands = self.get_parameters()
n_scen = len( commands )
running = 0
finished = 0
q = Queue()
print '>> You have ' + str( n_scen ) + ' simulations to run!'
print '>> You`re using ' + self.par_dict['N_CPUs'] + ' of ' + str( n_cpu ) + ' CPUs available in this machine!'
self.start = datetime.now()
print '>> Starting simulations at ' + str( self.start ) + '\n'
try:
while finished < n_scen:
while len( commands ):
if running < int( self.par_dict['N_CPUs'] ):
running += 1
if len( commands ) == 1:
p = Process( target=self.trigger_simulation, args=( commands[-1], q, ) )
commands.pop()
self.counter( '\trunning: ', running,
'\twaiting: ', len( commands ),
'\tfinished: ', finished )
p.start()
p.join()
else:
p = Process( target=self.trigger_simulation, args=( commands[-1], q, ) )
p.start()
commands.pop()
else:
if not q.empty():
q.get()
running -= 1
finished += 1
time.sleep( 1 )
self.counter( '\trunning: ', running,
'\twaiting: ', len( commands ),
'\tfinished: ', finished )
if not q.empty():
q.get()
running -= 1
finished += 1
time.sleep( 1 )
self.counter( '\trunning: ', running,
'\twaiting: ', len( commands ),
'\tfinished: ', finished )
except KeyboardInterrupt:
print '\n\n>> Ctrl+c pressed! Exiting...\n'
exit()
self.counter( '\trunning: ', running,
'\twaiting: ', len( commands ),
'\tfinished: ', finished )
print '\n\n>> The simulations have finished!'
示例11: Connector
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import empty [as 别名]
class Connector(object):
def __init__(self, reply_generator: ConnectorReplyGenerator, connectors_event: Event):
self._reply_generator = reply_generator
self._scheduler = None
self._thread = Thread(target=self.run)
self._write_queue = Queue()
self._read_queue = Queue()
self._frontends_event = connectors_event
self._shutdown_event = Event()
self._muted = True
def give_nlp(self, nlp):
self._reply_generator.give_nlp(nlp)
def start(self):
self._scheduler.start()
self._thread.start()
def run(self):
while not self._shutdown_event.is_set():
message = self._scheduler.recv(timeout=0.2)
if self._muted:
self._scheduler.send(None)
elif message is not None:
# Receive the message and put it in a queue
self._read_queue.put(message)
# Notify main program to wakeup and check for messages
self._frontends_event.set()
# Send the reply
reply = self._write_queue.get()
self._scheduler.send(reply)
def send(self, message: str):
self._write_queue.put(message)
def recv(self) -> Optional[ConnectorRecvMessage]:
if not self._read_queue.empty():
return self._read_queue.get()
return None
def shutdown(self):
# Shutdown event signals both our thread and process to shutdown
self._shutdown_event.set()
self._scheduler.shutdown()
self._thread.join()
def generate(self, message: str, doc: Doc=None) -> str:
return self._reply_generator.generate(message, doc)
def mute(self):
self._muted = True
def unmute(self):
self._muted = False
def empty(self):
return self._read_queue.empty()
示例12: FileWatcher
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import empty [as 别名]
class FileWatcher(object):
def __init__(self,collector_path,supported_files):
self._initialize_members(collector_path,supported_files)
def _initialize_members(self,collector_path,supported_files):
# initializing observer.
event_handler = NewFileEvent(self)
self._observer = Observer()
self._observer.schedule(event_handler,collector_path)
self._collector_path = collector_path
self._files_queue = Queue()
self._supported_files = supported_files
self._logger = logging.getLogger('SPOT.INGEST.WATCHER')
self._logger.info("Creating File watcher")
self._logger.info("Supported Files: {0}".format(self._supported_files))
def start(self):
self._logger.info("Watching: {0}".format(self._collector_path))
self._observer.start()
def new_file_detected(self,file):
self._logger.info("-------------------------------------- New File detected --------------------------------------")
self._logger.info("File: {0}".format(file))
# Validate the file is supported.
collected_file_parts = file.split("/")
collected_file = collected_file_parts[len(collected_file_parts) -1 ]
if (collected_file.endswith(tuple(self._supported_files)) or collected_file.startswith(tuple(self._supported_files)) ) and not ".current" in collected_file:
self._files_queue.put(file)
self._logger.info("File {0} added to the queue".format(file))
else:
self._logger.warning("File extension not supported: {0}".format(file))
self._logger.warning("File won't be ingested")
self._logger.info("------------------------------------------------------------------------------------------------")
def stop(self):
self._logger.info("Stopping File Watcher")
self._files_queue.close()
while not self._files_queue.empty(): self._files_queue.get()
self._observer.stop()
self._observer.join()
def GetNextFile(self):
return self._files_queue.get()
@property
def HasFiles(self):
return not self._files_queue.empty()
示例13: run
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import empty [as 别名]
def run(self):
""" Start ProcessTask main function """
filenames = split_file_by_filenum(self.config.seedfile,
self.config.proc_num)
output_queue = Queue()
progress_queue = Queue()
processes = []
w = ProcessTask(self.config.scan_func,
self.config.pool_size,
self.config.pool_timeout)
if self.config.scan_callback:
w.callback = self.config.scan_callback
for i, filename in enumerate(filenames):
proc_name = 'Worker-{:<2d}'.format(i+1)
p = Process(name=proc_name,
target=w.run,
args=(filename, progress_queue, output_queue))
if p not in processes:
processes.append(p)
for p in processes:
p.start()
if self.config.enable_console:
monitor = ConsoleMonitor(self.config,
processes,
progress_queue,
output_queue)
monitor.run()
else:
progress = {}
task_total = count_file_linenum(self.config.seedfile)
task_num = 0
with_stream_logger = logging.getLogger('output.with.stream')
while any(p.is_alive() for p in processes):
time.sleep(0.1)
while not progress_queue.empty():
proc_name, count, task_total = progress_queue.get()
progress[proc_name] = count
task_num = sum([v for k, v in progress.items()])
while not output_queue.empty():
proc_name, output = output_queue.get()
with_stream_logger.info('{}'.format(output))
if task_num == task_total:
for _ in processes:
_.terminate()
示例14: download_all
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import empty [as 别名]
def download_all(self, *, resume = True, thread = 1, strip_filename = False, retry = 0):
# self.__entries = sorted( self.__entries, itemgetter('id') )
print( 'Soring list...' )
self.__entries.sort(key=lambda entry: entry['id'].lower())
queued = Queue()
# resume from break point
if resume:
skip_count = 0
for entry in self.__entries:
if strip_filename:
fn = self.__file_prefix + NameUtils.strip_filename( entry['id'] ) + self.__file_ext
else:
fn = self.__file_prefix + entry['id'] + self.__file_ext
if os.path.exists( fn ):
# print( 'Skipping ' + entry['id'] + ' (' + fn + ') ...' )
skip_count += 1
else:
queued.put( entry )
print( 'Determining files to download...' )
print( 'Skipped ' + str(skip_count) + ', ', end='' )
else:
for entry in self.__entries:
queued.put( entry )
if queued.empty():
print( 'Nothing to be downloaded.' )
else:
print( str(len(self.__entries)) + ' to be downloaded...' )
# Let's go multi-thread!
pool = list()
failed_queue = Queue()
args = [queued, failed_queue, strip_filename]
while retry >= 0:
for i in range(0, thread):
t = Thread(target=self.__download_entry, args=args)
pool.append( t )
if not failed_queue.empty():
queued = failed_queue # set the queue to previously failed queue to retry downloading
failed_queue = Queue() # clear failed queue
if not retry == 0: # when retry set to 0, retry downloading infinitely
retry -= 1
else:
break
# wait the threads
for t in pool:
t.join()
示例15: time_limit
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import empty [as 别名]
def time_limit(seconds, fp, func, *args, **kwargs):
if fp:
if not hasattr(fp, 'write'):
raise TypeError("Expected 'file-like' object, got '%s'" % fp)
else:
def record(msg):
fp.write(msg)
else:
def record(msg):
return
def capture_results(msg_queue, func, *args, **kwargs):
try:
result = func(*args, **kwargs)
except Exception as e:
msg_queue.put(
"Running function '%s' resulted in exception '%s' with "
"message: '%s'\n" % (func.__name__, e.__class__.__name__, e))
# no point re-raising an exception from the subprocess, instead
# return False
return False
else:
msg_queue.put(
"Running function '%s' finished with result '%s', and"
"stack:\n%s\n" % (func.__name__, result,
traceback.format_stack()))
return result
messages = Queue()
# although creating a separate process is expensive it's the only way to
# ensure cross platform that we can cleanly terminate after timeout
p = Process(target=functools.partial(capture_results, messages, func),
args=args, kwargs=kwargs)
p.start()
p.join(seconds)
if p.is_alive():
p.terminate()
while not messages.empty():
record(messages.get())
record("Running function '%s' did not finish\n" % func.__name__)
raise TestsTimeoutException
else:
while not messages.empty():
record(messages.get())
record("Running function '%s' finished with exit code '%s'\n"
% (func.__name__, p.exitcode))