本文整理汇总了Python中multiprocessing.Process.is_alive方法的典型用法代码示例。如果您正苦于以下问题:Python Process.is_alive方法的具体用法?Python Process.is_alive怎么用?Python Process.is_alive使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.Process
的用法示例。
在下文中一共展示了Process.is_alive方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: mixer_and_priority_synthesis
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import is_alive [as 别名]
def mixer_and_priority_synthesis(s_ast, synrkt, bugs, mutators, score):
queue = Queue()
mixer = Process(target=mixer_synthesis, args=(s_ast, synrkt, bugs,
mutator, True, queue))
priority = Process(target=priority_synthesis, args=(s_ast, synrkt, bugs,
mutator, score, True, queue))
mixer.start()
priority.start()
while True:
if not queue.empty():
# terminate all processes
if mixer.is_alive():
mixer.terminate()
if priority.is_alive():
priority.terminate()
# display results
(fixes, synthesizer) = queue.get()
if fixes:
for fix in fixes:
print "At line " + str(fix.lineno) + " and offset " + str(fix.col_offset)
print "\t " + SourceVisitor().visit(fix)
else:
print "No solution found!"
print "Winner: " + synthesizer
break
time.sleep(1)
示例2: gentree_multi
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import is_alive [as 别名]
def gentree_multi(options, familylist, gene_sp):
'''
Run "pli2tree" on multiple threads.
'''
global q
for fam in familylist:
q.put(fam)
global n
print "# Building tree from multiple alignment for the following families: \n"
for i in range(options.threads):
p = Process(target=pli2tree, name='%i' % (i+1), args = (options, gene_sp))
p.start()
sleep(options.threads*0.05)
q.join()
while n.qsize() > 0:
for _ in range(10):
if n.qsize() > 0:
print n.get(),
print ""
sleep(options.threads*0.05)
if p.is_alive() and q.empty():
sleep(options.threads*0.2)
if p.is_alive() and q.empty():
p.terminate()
return None
示例3: collect
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import is_alive [as 别名]
def collect(self, targetdir, timestamp, delta):
from logging import root
from multiprocessing import Process
from os import getpid
from .. import multiprocessing_logger
# We want to copy the files in a child process, so in case the filesystem is stuck, we won't get stuck too
kwargs = dict(targetdir=targetdir, timestamp=timestamp, delta=delta)
try:
[logfile_path] = [
handler.target.baseFilename for handler in root.handlers if self._is_my_kind_of_logging_handler(handler)
] or [None]
except ValueError:
logfile_path = None
subprocess = Process(
target=multiprocessing_logger,
args=(logfile_path, getpid(), Windows_Event_Logs.collect_process),
kwargs=kwargs,
)
subprocess.start()
subprocess.join(self.timeout_in_seconds)
if subprocess.is_alive():
msg = "Did not finish collecting {!r} within the {} seconds timeout_in_seconds"
logger.error(msg.format(self, self.timeout_in_seconds))
subprocess.terminate()
if subprocess.is_alive():
logger.info("Subprocess {!r} terminated".format(subprocess))
else:
logger.error("Subprocess {!r} is stuck".format(subprocess))
raise TimeoutError()
elif subprocess.exitcode:
logger.error("Subprocess {!r} returned non-zero exit code".format(subprocess))
raise RuntimeError(subprocess.exitcode)
示例4: runFunction
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import is_alive [as 别名]
def runFunction(self, func, args, timeout=3600):
def target(func, args, retQ):
ret= func(*args)
retQ.put(ret)
retQ = Queue()
process = Process(target=target, args=(func, args, retQ))
process.start()
try:
ret = retQ.get(block=True, timeout=timeout)
except Empty:
ret = (-1, "function timeout, killed")
try:
if process.is_alive():
process.terminate()
process.join(2)
if process.is_alive():
os.kill(int(process.pid), signal.SIGKILL)
process.join(2)
except:
if process.is_alive():
try:
os.kill(int(process.pid), signal.SIGKILL)
except:
pass
process.join(2)
return ret
示例5: run
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import is_alive [as 别名]
def run(self, output_prefix, run_dict, cmd_dict):
self.set_params(run_dict)
self.cmd_dict = cmd_dict
timeout = None
try:
timeout = self.timeout
except:
pass
sleeptime = 5
p = Process(target=self.run_)
p.start()
if not timeout:
p.join()
status = 'normal'
else:
total_sleep = 0
while total_sleep < timeout:
time.sleep(sleeptime)
total_sleep = total_sleep + sleeptime
if not p.is_alive():
p.join()
status = 'normal'
break
if p.is_alive():
p.terminate()
status = 'timeout'
else:
p.join()
status = 'normal'
return status
示例6: run
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import is_alive [as 别名]
def run(self):
timeout = None
try:
timeout = self.timeout
except:
pass
sleeptime = 5
p = Process(target=self.run_)
p.start()
if not timeout:
p.join()
status = 'normal'
else:
total_sleep = 0
while total_sleep < timeout:
time.sleep(sleeptime)
total_sleep = total_sleep + sleeptime
if not p.is_alive():
p.join()
status = 'normal'
break
if p.is_alive():
p.terminate()
status = 'timeout'
else:
p.join()
status = 'normal'
return status
示例7: main
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import is_alive [as 别名]
def main():
"""
Creates instances of the above methods and occassionally checks for crashed
worker processes & relaunches.
"""
worker_process = list()
get_update_process = Process(target=get_updates)
get_update_process.start()
for i in range(0, int(CONFIG['BOT_CONFIG']['workers'])):
worker_process.append(Process(target=process_updates))
worker_process[i].start()
time_worker = Process(target=check_time_args)
time_worker.start()
while RUNNING.value:
time.sleep(30)
for index, worker in enumerate(worker_process):
if not worker.is_alive():
del worker_process[index]
worker_process.append(Process(target=process_updates))
worker_process[-1].start()
if not time_worker.is_alive():
time_worker = Process(target=check_time_args)
time_worker.start()
if not get_update_process.is_alive():
get_update_process = Process(target=get_updates)
get_update_process.start()
get_update_process.join()
time_worker.join()
for worker in worker_process:
worker.join()
示例8: LiveviewServerProtocol
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import is_alive [as 别名]
class LiveviewServerProtocol(WebSocketServerProtocol):
def __init__(self, endpoint_url):
print("LiveviewServerProtocol init.")
self.endpoint_url = endpoint_url
def onConnect(self, request):
print("Client connecting: {0}".format(request.peer))
self.queue = Queue()
self.process = Process(target=liveview_main, args=(self.endpoint_url, self.queue))
self.process.start()
# @asyncio.coroutine
def onOpen(self):
while True:
self.sendMessage("UNCHI".encode("utf-8"), False)
time.sleep(0.001)
print("WebSocket connection open.")
if self.process and self.process.is_alive():
while True:
if not self.queue.empty():
payload = self.queue.get()
print(payload)
self.sendMessage(payload)
# yield from asyncio.sleep(0.001)
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
if self.process and self.process.is_alive():
self.process.terminate()
# this is hack that enables dynamic protocol initialization
def __call__(self):
return self
示例9: main
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import is_alive [as 别名]
def main(configs, timeout=10 * 60):
makedirs('data/results', exist_ok=True)
for config in configs:
outpath = resultpath(config)
if path.isfile(outpath):
continue
out_queue = Queue()
def worker():
result = None
try:
result = run_classification(config)
except BaseException as exc:
print(traceback.format_exc(), file=stderr)
result = {'error': repr(exc)}
out_queue.put(result)
p = Process(target=worker)
try:
p.start()
p.join(timeout)
if not p.is_alive():
result = out_queue.get()
else:
result = {'error': 'timed out'}
out_obj = { 'config': config \
, 'result': result }
with gzip.open(outpath, mode='xt') as f:
json.dump(utils.namedtuples_replaced(out_obj), f, cls=NamedtupleJSONEncoder, indent=4)
finally:
if p.is_alive():
p.terminate()
示例10: build_rec_multi
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import is_alive [as 别名]
def build_rec_multi(options, familylist):
'''
Run "build_reconciled_tree" on multiple threads.
'''
global q
for fam in familylist:
q.put(fam)
global n
for i in range(options.threads):
p = Process(target=build_reconciled_tree, name='%i' % (i+1), args = (options,))
p.start()
sleep(options.threads*0.05)
q.join()
while n.qsize() > 0:
for _ in range(10):
if n.qsize() > 0:
print n.get(),
print ""
sleep(options.threads*0.1)
if p.is_alive() and q.empty():
sleep(options.threads*0.2)
if p.is_alive() and q.empty():
p.terminate()
return None
示例11: create_tables_force
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import is_alive [as 别名]
def create_tables_force(engine, delay, retries):
"""Create the tables and **KILL ANY BLOCKING PROCESSES**.
This command will spawn a process to create the new tables in
order to find out which process is blocking us. If we didn't do
this concurrently, then the table creation will have disappeared
by the time we tried to find its blocker in the postgres backend
tables.
"""
logger.info('Running table creator named %s', app_name)
logger.warning('Running with force=True option %s', app_name)
from multiprocessing import Process
p = Process(target=create_graph_tables, args=(engine, delay))
p.start()
time.sleep(delay)
if p.is_alive():
logger.warning('Table creation blocked!')
kill_blocking_psql_backend_processes(engine)
# Wait some time for table creation to proceed
time.sleep(4)
if p.is_alive():
if retries <= 0:
raise RuntimeError('Max retries exceeded.')
logger.warning('Table creation failed, retrying.')
return create_tables_force(engine, delay, retries-1)
示例12: UDPPipe
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import is_alive [as 别名]
class UDPPipe(Pipe):
class PipeHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request[0]
print("Got data from " + str(self.client_address) + ": " + str(data))
socket = self.request[1]
socket.sendto(data, (self.server.reader_ip, self.server.reader_port))
def __init__(self, host, port):
self.server = SocketServer.UDPServer((host, port), UDPPipe.PipeHandler)
self.server.writer_ip = None
self.server.reader_ip = None
self.server.reader_port = None
self.server_proc = Process(target=self.server.serve_forever)
@property
def writer_ip(self):
return self.server.writer_ip
@writer_ip.setter
def writer_ip(self, ip):
if self.server_proc.is_alive():
raise Exception('Can not modify Pipe writer_ip when server is working')
self.server.writer_ip = ip
@property
def reader_ip(self):
return self.server.reader_ip
@reader_ip.setter
def reader_ip(self, ip):
if self.server_proc.is_alive():
raise Exception('Can not modify Pipe reader_ip when server is working')
self.server.reader_ip = ip
@property
def reader_port(self):
return self.server.reader_port
@reader_port.setter
def reader_port(self, port):
if self.server_proc.is_alive():
raise Exception("Can not modify Pipe reader_port when server is running")
self.server.reader_port = port
def run(self):
if (self.writer_ip is None) or (self.reader_ip is None):
raise Exception('You must specify writer and reader ip before running Pipe')
self.server_proc.start()
def stop(self):
self.server.shutdown()
self.server.server_close()
self.server_proc.join()
示例13: test_kills_process
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import is_alive [as 别名]
def test_kills_process(self):
p = Process(target=time.sleep, args=(100,))
p.start()
self.assertTrue(p.is_alive())
kill_tree(p.pid)
p.join(1)
self.assertFalse(p.is_alive())
示例14: data_handler
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import is_alive [as 别名]
def data_handler(spigot_data: SpigotData, lock: threading.Lock):
"""
Main thread for reading output from spigot IO worker.
Also interprets data.
spigot_data - shared SpigotData object
"""
client, child = Pipe()
t = Process(target=r_w_worker, args=(child, spigot_data.close_event))
t.start()
spigot_data.game.status = SpigotState.RUNNING
running = True
while running:
while t.is_alive():
if client.poll(0.3):
buf = client.recv()
parse_event(spigot_data, buf)
spigot_data.add_message(buf)
logging.debug('<<OUTPUT>> {}'.format(buf))
while not spigot_data.commands.empty():
command = spigot_data.commands.get()
client.send(command)
logging.debug('<<COMMAND>> {}'.format(command))
# After java process is dead
spigot_data.status = SpigotState.STOPPED
spigot_data.add_message(info_message("""The server has stopped. Type 'start' to start. """
"""Type 'quit' to close Spigot Monitor""")) # PEP8ers gonna hate
spigot_data.game.players = {} # No players are available on a stopped server...
while True and not AUTO_RESTART:
command = spigot_data.commands.get().strip() # strip because commands have newline appended
if command.lower() == 'start':
t = Process(target=r_w_worker, args=(child, spigot_data.close_event))
t.start()
logging.debug('Thread created.')
break
elif command.lower() == 'quit' or command.lower() == 'stop':
message = info_message("KTHXBAI")
spigot_data.add_message(message)
logging.debug('Quitting program.')
break
if AUTO_RESTART:
t = Process(target=r_w_worker, args=(child, spigot_data.close_event))
t.start()
logging.debug('Thread created.')
if not t.is_alive(): # thread hasn't started again
running = False
print('exiting data_handler loop')
示例15: test_does_not_fail
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import is_alive [as 别名]
def test_does_not_fail(self):
p = Process(target=child_process)
p.start()
self.assertTrue(p.is_alive())
time.sleep(.1)
kill_tree(p.pid)
p.join(1)
self.assertFalse(p.is_alive())