本文整理汇总了Python中multiprocessing.queues.SimpleQueue.empty方法的典型用法代码示例。如果您正苦于以下问题:Python SimpleQueue.empty方法的具体用法?Python SimpleQueue.empty怎么用?Python SimpleQueue.empty使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.queues.SimpleQueue
的用法示例。
在下文中一共展示了SimpleQueue.empty方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Logger
# 需要导入模块: from multiprocessing.queues import SimpleQueue [as 别名]
# 或者: from multiprocessing.queues.SimpleQueue import empty [as 别名]
class Logger(object):
def __init__(self, logfilepath):
try:
os.remove(logfilepath)
except OSError:
pass
self.logfilepath = logfilepath
self.logq = SimpleQueue()
self.tags = ''
self.num_tags = 0
def add_tag(self, tag):
#self.log("adding tag {}".format(tag))
self.num_tags += 1
if self.tags != '':
self.tags = self.tags + '.' + tag
else:
self.tags = tag
def remove_tag(self):
#self.log("removing tag")
tags = self.tags.split('.')
self.tags = ".".join(tags[:-1])
self.num_tags -= 1
def get_tag_part(self):
if self.tags != '':
return self.tags + ": "
else:
return ''
def log(self, message, start_group=None, end_group=None):
assert(type(message)==str)
self.logq.put(" "*self.num_tags*4 + self.get_tag_part() + message + '\n')
def getlog(self):
return self.logq.get()
def getlogs(self, n=None):
logs = []
if n == None:
while not self.logq.empty():
logs.append(self.getlog())
else:
assert(type(n)==int)
while not (self.logq.empty() or len(logs) == n):
logs.append(self.getlog())
return logs
def write_to_file(self):
# mode 'a' for append
with open(self.logfilepath, 'a') as f:
f.writelines(self.getlogs())
示例2: QuiverPlotter
# 需要导入模块: from multiprocessing.queues import SimpleQueue [as 别名]
# 或者: from multiprocessing.queues.SimpleQueue import empty [as 别名]
def QuiverPlotter(num):
data_q = SimpleQueue()
plot = Process(target=quiverPlotter,args=(data_q,num))
plot.start()
try:
while True:
data = (yield)
if data_q.empty() == False:
continue
data_q.put(data)
except GeneratorExit:
plot.join()
示例3: Plotter3D
# 需要导入模块: from multiprocessing.queues import SimpleQueue [as 别名]
# 或者: from multiprocessing.queues.SimpleQueue import empty [as 别名]
def Plotter3D(plots,scale):
data_q = SimpleQueue()
plot = Process(target=plotter3D,args=(data_q,plots,scale))
plot.start()
data = {}
try:
while True:
data.update((yield))
if data_q.empty() == False:
continue
data_q.put(data)
except GeneratorExit:
pass
示例4: StatusTracker
# 需要导入模块: from multiprocessing.queues import SimpleQueue [as 别名]
# 或者: from multiprocessing.queues.SimpleQueue import empty [as 别名]
class StatusTracker(object):
def __init__(self):
self.logq = SimpleQueue()
self.history = []
def put(self, msg):
assert(type(msg)==str)
self.logq.put(msg)
def flushq(self):
while not self.logq.empty():
self.history.append(self.logq.get())
self.prune_history()
def prune_history(self):
self.history = self.history[-100:]
示例5: launch_graph_plot
# 需要导入模块: from multiprocessing.queues import SimpleQueue [as 别名]
# 或者: from multiprocessing.queues.SimpleQueue import empty [as 别名]
def launch_graph_plot():
q = SimpleQueue()
Pyro4.config.HOST="10.1.1.2"
daemon = Pyro4.Daemon()
ns = Pyro4.locateNS()
p = Process(target=_launch_daemon, args=(daemon, q,))
p.start()
graph_plot = GraphPlotPanel()
while True:
if not q.empty():
item = q.get()
if item[0] == 'time':
print "got queue:", item
graph_plot.set_time(item[1])
elif item[0] == 'vertex_color':
pass
graph_plot.run()
fpsClock.tick(60)
示例6: RangerControlServer
# 需要导入模块: from multiprocessing.queues import SimpleQueue [as 别名]
# 或者: from multiprocessing.queues.SimpleQueue import empty [as 别名]
class RangerControlServer(HTTPServer):
def __init__(self, fm):
self.fm = fm
self.queue = SimpleQueue()
self.goDie = False
HTTPServer.__init__(self, ("127.0.0.1", 5964), RangerControlHandler)
def start(self):
self.thread = threading.Thread(target=self.process)
self.thread.start()
def stop(self):
self.shutdown()
def process(self):
self.serve_forever()
def check_messages(self):
if self.queue.empty():
return None
return self.queue.get()
def act_on_messages(self):
msg = self.check_messages()
if msg == None:
return False
action, arg = msg
match = re.match(r"/cdtab-(\S+)", action)
if match != None:
tab = match.group(1)
if not (tab in self.fm.tabs):
self.fm.tab_open(tab, arg)
else:
self.fm.tabs[tab].enter_dir(arg)
elif action == "/cd":
self.fm.enter_dir(arg)
elif action == "/cdfirst":
first_tab = self.fm._get_tab_list()[0]
self.fm.tabs[first_tab].enter_dir(arg)
else:
self.fm.notify("Unknown server command", bad=True)
return True
示例7: DensityPlotter
# 需要导入模块: from multiprocessing.queues import SimpleQueue [as 别名]
# 或者: from multiprocessing.queues.SimpleQueue import empty [as 别名]
def DensityPlotter(num,size):
# num = size/scale
range = [[-size,size],[-size,size]]
data_q = SimpleQueue()
plot = Process(target=imagedraw,args=(data_q,num))
plot.start()
while True:
x = (yield)
if data_q.empty() == False:
continue
hist,_,_ = np.histogram2d(x[:,0],x[:,1],bins=num,range=range)
avg = np.average(hist)
hist = (hist - avg)/avg
data_q.put(hist.astype(np.float32))
示例8: __init__
# 需要导入模块: from multiprocessing.queues import SimpleQueue [as 别名]
# 或者: from multiprocessing.queues.SimpleQueue import empty [as 别名]
class LinePlotter:
def __init__(self,*args,**kwargs):
self.data_q = SimpleQueue()
self.data = {}
self.plot = LinePlotterProcess(self.data_q)
self.plot.add_plot(*args,**kwargs)
def show(self):
self.plot.start()
def add_plot(self,*args,**kwargs):
self.plot.add_plot(*args,**kwargs)
def send(self,data):
if data == GeneratorExit:
self.plot.join()
self.data.update(data)
if self.data_q.empty() != False:
self.data_q.put(data)
示例9: spawn_import_clients
# 需要导入模块: from multiprocessing.queues import SimpleQueue [as 别名]
# 或者: from multiprocessing.queues.SimpleQueue import empty [as 别名]
def spawn_import_clients(options, files_info):
# Spawn one reader process for each db.table, as well as many client processes
task_queue = SimpleQueue()
error_queue = SimpleQueue()
exit_event = multiprocessing.Event()
interrupt_event = multiprocessing.Event()
errors = []
reader_procs = []
client_procs = []
parent_pid = os.getpid()
signal.signal(signal.SIGINT, lambda a, b: abort_import(a, b, parent_pid, exit_event, task_queue, client_procs, interrupt_event))
try:
progress_info = []
rows_written = multiprocessing.Value(ctypes.c_longlong, 0)
for i in xrange(options["clients"]):
client_procs.append(multiprocessing.Process(target=client_process,
args=(options["host"],
options["port"],
options["auth_key"],
task_queue,
error_queue,
rows_written,
options["force"],
options["durability"])))
client_procs[-1].start()
for file_info in files_info:
progress_info.append((multiprocessing.Value(ctypes.c_longlong, -1), # Current lines/bytes processed
multiprocessing.Value(ctypes.c_longlong, 0))) # Total lines/bytes to process
reader_procs.append(multiprocessing.Process(target=table_reader,
args=(options,
file_info,
task_queue,
error_queue,
progress_info[-1],
exit_event)))
reader_procs[-1].start()
# Wait for all reader processes to finish - hooray, polling
while len(reader_procs) > 0:
time.sleep(0.1)
# If an error has occurred, exit out early
if not error_queue.empty():
exit_event.set()
reader_procs = [proc for proc in reader_procs if proc.is_alive()]
update_progress(progress_info)
# Wait for all clients to finish
alive_clients = sum([client.is_alive() for client in client_procs])
for i in xrange(alive_clients):
task_queue.put("exit")
while len(client_procs) > 0:
time.sleep(0.1)
client_procs = [client for client in client_procs if client.is_alive()]
# If we were successful, make sure 100% progress is reported
if error_queue.empty() and not interrupt_event.is_set():
print_progress(1.0)
def plural(num, text):
return "%d %s%s" % (num, text, "" if num == 1 else "s")
# Continue past the progress output line
print("")
print("%s imported in %s" % (plural(rows_written.value, "row"),
plural(len(files_info), "table")))
finally:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if interrupt_event.is_set():
raise RuntimeError("Interrupted")
if not task_queue.empty():
error_queue.put((RuntimeError, RuntimeError("Error: Items remaining in the task queue"), None))
if not error_queue.empty():
# multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
while not error_queue.empty():
error = error_queue.get()
print("%s" % error[1], file=sys.stderr)
if options["debug"]:
print("%s traceback: %s" % (error[0].__name__, error[2]), file=sys.stderr)
if len(error) == 4:
print("In file: %s" % error[3], file=sys.stderr)
raise RuntimeError("Errors occurred during import")
示例10: run_clients
# 需要导入模块: from multiprocessing.queues import SimpleQueue [as 别名]
# 或者: from multiprocessing.queues.SimpleQueue import empty [as 别名]
def run_clients(options, db_table_set):
# Spawn one client for each db.table
exit_event = multiprocessing.Event()
processes = []
error_queue = SimpleQueue()
interrupt_event = multiprocessing.Event()
stream_semaphore = multiprocessing.BoundedSemaphore(options["clients"])
signal.signal(signal.SIGINT, lambda a, b: abort_export(a, b, exit_event, interrupt_event))
try:
progress_info = []
for db, table in db_table_set:
progress_info.append((multiprocessing.Value(ctypes.c_longlong, -1),
multiprocessing.Value(ctypes.c_longlong, 0)))
processes.append(multiprocessing.Process(target=export_table,
args=(options["host"],
options["port"],
options["auth_key"],
db, table,
options["directory_partial"],
options["fields"],
options["format"],
error_queue,
progress_info[-1],
stream_semaphore,
exit_event)))
processes[-1].start()
# Wait for all tables to finish
while len(processes) > 0:
time.sleep(0.1)
if not error_queue.empty():
exit_event.set() # Stop rather immediately if an error occurs
processes = [process for process in processes if process.is_alive()]
update_progress(progress_info)
# If we were successful, make sure 100% progress is reported
# (rows could have been deleted which would result in being done at less than 100%)
if error_queue.empty() and not interrupt_event.is_set():
print_progress(1.0)
# Continue past the progress output line and print total rows processed
def plural(num, text):
return "%d %s%s" % (num, text, "" if num == 1 else "s")
print("")
print("%s exported from %s" % (plural(sum([info[0].value for info in progress_info]), "row"),
plural(len(db_table_set), "table")))
finally:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if interrupt_event.is_set():
raise RuntimeError("Interrupted")
if not error_queue.empty():
# multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
while not error_queue.empty():
error = error_queue.get()
print("%s" % error[1], file=sys.stderr)
if options["debug"]:
print("%s traceback: %s" % (error[0].__name__, error[2]), file=sys.stderr)
raise RuntimeError("Errors occurred during export")
示例11: AsyncScanner
# 需要导入模块: from multiprocessing.queues import SimpleQueue [as 别名]
# 或者: from multiprocessing.queues.SimpleQueue import empty [as 别名]
class AsyncScanner(object):
""" Class to derive all the scanner classes from.
To implement a scanner you have to override:
update_str_last_scanned()
Use try-finally to call terminate, if not processes will be
hanging in the background
"""
def __init__(self, data_structure, processes, scan_function, init_args, _mp_init_function):
""" Init the scanner.
data_structure is a world.DataSet
processes is the number of child processes to use
scan_function is the function to use for scanning
init_args are the arguments passed to the init function
_mp_init_function is the function used to init the child processes
"""
assert (isinstance(data_structure, world.DataSet))
self.data_structure = data_structure
self.list_files_to_scan = data_structure._get_list()
self.processes = processes
self.scan_function = scan_function
# Queue used by processes to pass results
self.queue = SimpleQueue()
init_args.update({'queue': self.queue})
# NOTE TO SELF: initargs doesn't handle kwargs, only args!
# Pass a dict with all the args
self.pool = multiprocessing.Pool(processes=processes, initializer=_mp_init_function, initargs=(init_args,))
# TODO: make this automatic amount
# Recommended time to sleep between polls for results
self.SCAN_START_SLEEP_TIME = 0.001
self.SCAN_MIN_SLEEP_TIME = 1e-6
self.SCAN_MAX_SLEEP_TIME = 0.1
self.scan_sleep_time = self.SCAN_START_SLEEP_TIME
self.queries_without_results = 0
self.last_time = time()
self.MIN_QUERY_NUM = 1
self.MAX_QUERY_NUM = 5
# Holds a friendly string with the name of the last file scanned
self._str_last_scanned = None
def scan(self):
""" Launch the child processes and scan all the files. """
logging.debug("########################################################")
logging.debug("########################################################")
logging.debug("Starting scan in: " + str(self))
logging.debug("########################################################")
logging.debug("########################################################")
total_files = len(self.data_structure)
# Tests indicate that smaller amount of jobs per worker make all type
# of scans faster
jobs_per_worker = 5
# jobs_per_worker = max(1, total_files // self.processes
self._results = self.pool.map_async(self.scan_function, self.list_files_to_scan, jobs_per_worker)
# No more tasks to the pool, exit the processes once the tasks are done
self.pool.close()
# See method
self._str_last_scanned = ""
def get_last_result(self):
""" Return results of last file scanned. """
q = self.queue
ds = self.data_structure
if not q.empty():
d = q.get()
if isinstance(d, tuple):
self.raise_child_exception(d)
# Copy it to the father process
ds._replace_in_data_structure(d)
self.update_str_last_scanned(d)
# Got result! Reset it!
self.queries_without_results = 0
return d
else:
# Count amount of queries without result
self.queries_without_results += 1
return None
def terminate(self):
""" Terminate the pool, this will exit no matter what.
"""
self.pool.terminate()
def raise_child_exception(self, exception_tuple):
""" Raises a ChildProcessException using the info
contained in the tuple returned by the child process. """
e = exception_tuple
raise ChildProcessException(e[0], e[1][0], e[1][1], e[1][2])
def update_str_last_scanned(self):
""" Updates the string that represents the last file scanned. """
raise NotImplemented
#.........这里部分代码省略.........