本文整理汇总了Python中concurrent.futures.thread.ThreadPoolExecutor.shutdown方法的典型用法代码示例。如果您正苦于以下问题:Python ThreadPoolExecutor.shutdown方法的具体用法?Python ThreadPoolExecutor.shutdown怎么用?Python ThreadPoolExecutor.shutdown使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类concurrent.futures.thread.ThreadPoolExecutor
的用法示例。
在下文中一共展示了ThreadPoolExecutor.shutdown方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: prepareServer
# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import shutdown [as 别名]
def prepareServer(RequestHandlerClass, pipe, threads, timeout):
'''
Prepare in a process the request handling.
'''
def process(request, address):
RequestHandlerClass(request, address, None)
try: request.shutdown(socket.SHUT_WR)
except socket.error: pass # some platforms may raise ENOTCONN here
request.close()
pool = ThreadPoolExecutor(threads)
while True:
if not pipe.poll(timeout): break
else:
data = pipe.recv()
if data is None: break
elif data is True: continue
requestfd, address = data
request = socket.fromfd(rebuild_handle(requestfd), socket.AF_INET, socket.SOCK_STREAM)
pool.submit(process, request, address)
pool.shutdown(False)
示例2: Bender
# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import shutdown [as 别名]
class Bender(object):
def __init__(self, backbone, brain=None):
self._backbone = backbone
self._brain = brain if brain is not None else Brain()
self._brain_lock = threading.Lock()
self._regex_to_response = OrderedDict()
self._scripts = OrderedDict()
self._pool = ThreadPoolExecutor(max_workers=4)
self._futures = [] # list of futures submitted to the pool
self._stop_loop = threading.Event()
def register_script(self, name, script):
self._scripts[name] = script
def register_builtin_scripts(self):
for name, script in scripts.get_builtin_scripts():
self.register_script(name, script)
def register_setuptools_scripts(self):
for p in pkg_resources.iter_entry_points('bender_script'):
obj = p.load()
if inspect.isclass(obj):
obj = obj()
self.register_script(p.name, obj)
def get_script(self, name):
return self._scripts[name]
def iter_scripts(self):
return iter(self._scripts.items())
def start(self):
self._brain.load()
self._backbone.on_message_received = self.on_message_received
self.register_builtin_scripts()
self.register_setuptools_scripts()
for script in self._scripts.values():
hooks.call_unique_hook(script, 'script_initialize_hook',
brain=self._brain)
hooks.call_unique_hook(self._backbone, 'backbone_start_hook')
def shutdown(self):
self._pool.shutdown(wait=True)
for name, script in list(self._scripts.items()):
self._scripts.pop(name)
hooks.call_unique_hook(script, 'script_shutdown_hook',
brain=self._brain)
hooks.call_unique_hook(self._backbone, 'backbone_shutdown_hook',
brain=self._brain)
self._brain.dump()
self._stop_loop.set()
def request_shutdown(self):
self._stop_loop.set()
def loop(self):
self.start()
self._stop_loop.wait()
self.shutdown()
def on_message_received(self, msg):
def thread_exec(hook, brain, msg, match):
try:
hooks.call(hook, brain=self._brain, msg=msg, match=match,
bender=self)
except Exception as e:
msg.reply('*BZZT* %s' % e)
else:
with self._brain_lock:
brain.dump()
handled = False
for script in self._scripts.values():
for hook in hooks.find_hooks(script, 'respond_hook'):
match = re.match(hook.inputs['regex'], msg.get_body(),
re.IGNORECASE | re.DOTALL)
if match:
f = self._pool.submit(thread_exec, hook, self._brain, msg,
match)
self._futures.append(f)
handled = True
if not handled:
msg.reply('Command not recognized')
def wait_all_messages(self):
while self._futures:
f = self._futures.pop()
f.result() # wait until future returns
示例3: RabbitManager
# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import shutdown [as 别名]
#.........这里部分代码省略.........
try:
if self._tasks_number >= self._max_tasks:
raise RuntimeError("Max tasks limit reached")
self._tasks_number += 1
ftr = self._executor.submit(self.process_task, body)
def process_done(future: Future):
nonlocal self
self._tasks_number -= 1
if future.cancelled():
# process_task ended by cancel
self.requeue_message(self.requeue_message(
basic_deliver.delivery_tag)
)
else:
if future.exception():
exception = future.exception()
if not isinstance(exception, RequeueMessage):
self.log.exception(exception)
self.requeue_message(
basic_deliver.delivery_tag
)
else:
self.acknowledge_message(basic_deliver.delivery_tag)
ftr.add_done_callback(process_done)
return ftr
except RuntimeError:
self.requeue_message(basic_deliver.delivery_tag)
time.sleep(0.5)
except Exception as e:
self.log.exception(e)
self.requeue_message(basic_deliver.delivery_tag)
time.sleep(10)
def stop_consuming(self):
"""Send Basic.Cancel to rabbit
:return:
"""
if self._channel:
self.log.info("Stop consuming")
self._channel.basic_cancel(self.on_cancel_ok, self._consumer_tag)
def start_consuming(self):
"""Begins to consume messages
:return:
"""
self.log.info("Start consuming")
self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
self._consumer_tag = self._channel.basic_consume(self.on_message,
self.queue)
self.run()
def run(self):
"""Run consumer"""
self.log.info("Running consumer")
connection = self.connect()
""":type: pika.SelectConnection"""
channel = connection.channel()
self._channel = channel
self._connection = connection
for method_frame, properties, body in channel.consume(self.queue):
while self._tasks_number >= self._max_tasks:
time.sleep(0.1)
self.on_message(channel, method_frame, properties, body)
def stop(self):
"""Stops consuming service
:return:
"""
self.log.info("Stopping")
self._closing = True
self.stop_consuming()
self._executor.shutdown(True)
# if self._connection is not None:
# self._connection.ioloop.start()
self.log.info("Stopped")
def __exit__(self, *args, **kwargs):
self.stop()
super(RabbitManager, self).__exit__(*args, **kwargs)
示例4: do_test1
# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import shutdown [as 别名]
def do_test1(workers):
param = {"max_workers": workers}
start = round(time.time() + _start_warm_up)
input = input_generator(workers, start)
loop = asyncio.new_event_loop()
lock = threading.Lock()
tresult = []
presult = []
cresult = []
def result_checker(list, lock, fut):
with lock:
try:
list.append(fut.result())
except Exception as e:
list.append(e)
texec = ThreadPoolExecutor(**param)
pexec = ProcessPoolExecutor(**param)
cexec = CoroutinePoolExecutor(**param, loop=loop)
for x in input:
future = texec.submit(wake_at, x)
future.add_done_callback(
functools.partial(result_checker, tresult, lock))
future = pexec.submit(wake_at, x)
future.add_done_callback(
functools.partial(result_checker, presult, lock))
future = cexec.submit(async_wake_at, x)
future.add_done_callback(
functools.partial(result_checker, cresult, lock))
texec.shutdown(False)
pexec.shutdown(False)
loop.run_until_complete(cexec.shutdown(False))
try:
loop.run_until_complete(cexec.shutdown(True))
texec.shutdown(True)
pexec.shutdown(True)
finally:
loop.close()
tresult = [round((x - start) / _precision) for x in tresult]
presult = [round((x - start) / _precision) for x in presult]
cresult = [round((x - start) / _precision) for x in cresult]
result = True
for (t, p, c) in zip(tresult, presult, cresult):
result = result and (t == p)
if not result:
print(tresult)
print(presult)
print(cresult)
print(t, p, c)
assert False
result = result and (p == c)
if not result:
print(tresult)
print(presult)
print(cresult)
print(t, p, c)
assert False
result = result and (c == t)
if not result:
print(tresult)
print(presult)
print(cresult)
print(t, p, c)
assert False
return result
示例5: do_test3
# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import shutdown [as 别名]
def do_test3(workers):
param = {"max_workers": workers}
loop = asyncio.new_event_loop()
lock = threading.Lock()
tresult = []
presult = []
cresult = []
pre_input1 = input_generator(workers, 0)
pre_input2 = input_generator(workers, max(pre_input1))
pre_input3 = input_generator(workers, max(pre_input2))
def result_checker(list, lock, fut):
with lock:
try:
list.append(fut.result())
except Exception as e:
list.append(e)
texec = ThreadPoolExecutor(**param)
pexec = ProcessPoolExecutor(**param)
cexec = CoroutinePoolExecutor(**param, loop=loop)
tstart = round(time.time()+1)
input1 = [tstart + i for i in pre_input1]
input2 = [tstart + i for i in pre_input2]
input3 = [tstart + i for i in pre_input3]
for x in input1:
future = texec.submit(wake_at, x)
future.add_done_callback(
functools.partial(result_checker, tresult, lock))
result_iter = texec.map(wake_at, input2)
for x in input3:
future = texec.submit(wake_at, x)
future.add_done_callback(
functools.partial(result_checker, tresult, lock))
for x in result_iter:
with lock:
tresult.append(x)
texec.shutdown(True)
pstart = round(time.time() + _start_warm_up)
input1 = [pstart + i for i in pre_input1]
input2 = [pstart + i for i in pre_input2]
input3 = [pstart + i for i in pre_input3]
for x in input1:
future = pexec.submit(wake_at, x)
future.add_done_callback(
functools.partial(result_checker, presult, lock))
result_iter = pexec.map(wake_at, input2)
for x in input3:
future = pexec.submit(wake_at, x)
future.add_done_callback(
functools.partial(result_checker, presult, lock))
for x in result_iter:
with lock:
presult.append(x)
pexec.shutdown(True)
cstart = round(time.time() + _start_warm_up)
input1 = [cstart + i for i in pre_input1]
input2 = [cstart + i for i in pre_input2]
input3 = [cstart + i for i in pre_input3]
async def async_main():
for x in input1:
future = cexec.submit(async_wake_at, x)
future.add_done_callback(
functools.partial(result_checker, cresult, lock))
result_iter = cexec.map(async_wake_at, input2)
for x in input3:
future = cexec.submit(async_wake_at, x)
future.add_done_callback(
functools.partial(result_checker, cresult, lock))
async for x in result_iter:
with lock:
cresult.append(x)
await cexec.shutdown(False)
loop.run_until_complete(async_main())
try:
loop.run_until_complete(cexec.shutdown(True))
texec.shutdown(True)
pexec.shutdown(True)
finally:
loop.close()
tresult = [round((x - tstart) / _precision) for x in tresult]
presult = [round((x - pstart) / _precision) for x in presult]
cresult = [round((x - cstart) / _precision) for x in cresult]
result = True
for (t, p, c) in zip(tresult, presult, cresult):
result = result and (t == p)
#.........这里部分代码省略.........
示例6: UI
# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import shutdown [as 别名]
#.........这里部分代码省略.........
:param i: int
:return: None
"""
if len(self.buffers) == 1:
# we don't need to display anything
# listing is already displayed
return
else:
try:
self.display_buffer(self.buffers[i])
except IndexError:
# i > len
self.display_buffer(self.buffers[0])
@property
def current_buffer_index(self):
return self.buffers.index(self.current_buffer)
def remove_current_buffer(self):
# don't allow removing main_list
if isinstance(self.current_buffer, MainListBuffer):
logger.warning("you can't remove main list widget")
return
self.buffers.remove(self.current_buffer)
self.current_buffer.destroy()
# FIXME: we should display last displayed widget here
self.display_buffer(self.buffers[0], True)
def unhandled_input(self, key):
logger.debug("unhandled input: %r", key)
try:
if key in ("q", "Q"):
self.executor.shutdown(wait=False)
raise urwid.ExitMainLoop()
elif key == "ctrl o":
self.pick_and_display_buffer(self.current_buffer_index - 1)
elif key == "ctrl i":
self.pick_and_display_buffer(self.current_buffer_index + 1)
elif key == "x":
self.remove_current_buffer()
elif key == "/":
self.prompt("/", search)
elif key == "f4":
self.footer.prompt("filter ", filter)
elif key == "n":
self.current_buffer.find_next()
elif key == "N":
self.current_buffer.find_previous()
elif key in ["h", "?"]:
self.display_help()
elif key == "f5":
self.display_tree()
except NotifyError as ex:
self.notify_message(str(ex), level="error")
logger.error(repr(ex))
def run(self):
self.main_list_buffer = MainListBuffer(self.d, self)
@log_traceback
def chain_fcs():
self.main_list_buffer.refresh(focus_on_top=True)
self.add_and_display_buffer(self.main_list_buffer, redraw=True)
self.run_in_background(chain_fcs)
示例7: OpticalPathManager
# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import shutdown [as 别名]
#.........这里部分代码省略.........
if "chamber-view" in self._modes:
self._focus_in_chamber_view = None
self._focus_out_chamber_view = None
# Check whether the focus affects the chamber view
try:
chamb_det = self._getComponent(self._modes["chamber-view"][0])
focus = self._getComponent("focus")
if self.affects(focus.name, chamb_det.name):
self._chamber_view_own_focus = True
except LookupError:
pass
if not self._chamber_view_own_focus:
logging.debug("No focus component affecting chamber")
# will take care of executing setPath asynchronously
self._executor = ThreadPoolExecutor(max_workers=1)
def __del__(self):
logging.debug("Ending path manager")
# Restore the spectrometer focus, so that on next start, this value will
# be used again as "out of chamber view".
if self._chamber_view_own_focus and self._last_mode == "chamber-view":
focus_comp = self._getComponent("focus")
if self._focus_out_chamber_view is not None:
logging.debug("Restoring focus from before coming to chamber view to %s",
self._focus_out_chamber_view)
try:
focus_comp.moveAbsSync(self._focus_out_chamber_view)
except IOError as e:
logging.info("Actuator move failed giving the error %s", e)
try:
self._executor.shutdown(wait=False)
except AttributeError:
pass # Not created
def _getComponent(self, role):
"""
same as model.getComponent, but optimised by caching the result.
Uses regex to match the name to a list of cached components
return Component
raise LookupError: if matching component not found
"""
# if we have not returned raise an exception
for comp in self._cached_components:
if comp.role is not None and re.match(role + "$", comp.role):
return comp
# if not found...
raise LookupError("No component with the role %s" % (role,))
def setAcqQuality(self, quality):
"""
Update the acquisition quality expected. Depending on the quality,
some hardware settings will be adjusted.
quality (ACQ_QUALITY): the acquisition quality
"""
assert quality in (ACQ_QUALITY_FAST, ACQ_QUALITY_BEST)
if quality == self.quality:
return
self.quality = quality
if self.microscope.role in ("secom", "delphi"):
if quality == ACQ_QUALITY_FAST:
示例8: Server
# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import shutdown [as 别名]
#.........这里部分代码省略.........
"""Starts the Remote Adapter. A connection to the Proxy Adapter is
performed (as soon as one is available). Then, requests issued by
the Proxy Adapter are received and forwarded to the Remote Adapter.
"""
if self.keep_alive > 0:
self._log.info("Keepalive time for %s set to %f milliseconds",
self.name, self.keep_alive)
else:
self._log.info("Keepalive for %s disabled", self.name)
self._server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._server_sock.connect(self._config['address'])
# Creates and starts the Request Receiver.
self._request_receiver = _RequestReceiver(sock=self._server_sock,
server=self)
self._request_receiver.start()
# Invokes hook to notify subclass that the Request Receiver
# has been started.
self._on_request_receiver_started()
def close(self):
"""Stops the management of the Remote Adapter and destroys the threads
used by this Server. This instance can no longer be used.
Note that this does not stop the supplied Remote Adapter, as no close
method is available in the Remote Adapter interface. If the process is
not terminating, then the Remote Adapter cleanup should be performed by
accessing the supplied Adapter instance directly and calling custom
methods.
"""
self._request_receiver.quit()
self._executor.shutdown()
self._server_sock.close()
def on_received_request(self, request):
"""Invoked when the RequestReciver gets a new request coming from the
Proxy Adapter.
This method takes the responsibility to proceed with a first
coarse-grained parsing, to identify the three main components of the
packet structure, as follows:
<ID>|<method>|<data>
| | |
| | The arguments to be passed to the method
| |
| The method to invoke on the Remote Adapter
|
The Request Id
Once parsed, the request is then dispatched to the subclass for later
management.
"""
try:
parsed_request = protocol.parse_request(request)
if parsed_request is None:
self._log.warning("Discarding malformed request: %s", request)
return
request_id = parsed_request["id"]
method_name = parsed_request["method"]
data = parsed_request["data"]
self._handle_request(request_id, data, method_name)
except RemotingException as err: