本文整理汇总了Python中multiprocessing.Process.daemon方法的典型用法代码示例。如果您正苦于以下问题:Python Process.daemon方法的具体用法?Python Process.daemon怎么用?Python Process.daemon使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.Process
的用法示例。
在下文中一共展示了Process.daemon方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setup
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import daemon [as 别名]
def setup(self):
create_link('dummyX', 'dummy')
t_url = 'unix://\0%s' % (uuid.uuid4())
p_url = 'unix://\0%s' % (uuid.uuid4())
self.connect = Event()
self.release = Event()
target = Process(target=_run_remote_uplink,
args=(t_url, self.connect, self.release))
target.daemon = True
target.start()
self.connect.wait()
self.connect.clear()
proxy = Process(target=_run_remote_uplink,
args=(p_url, self.connect, self.release))
proxy.daemon = True
proxy.start()
self.connect.wait()
self.connect.clear()
self.ip = IPRoute(do_connect=False)
link, proxy = self.ip.connect(p_url)
self.ip.register('bala', proxy)
link, host = self.ip.connect(t_url, addr=proxy)
service = self.ip.discover(self.ip.default_target, addr=host)
self.ip.default_peer = host
self.ip.default_dport = service
self.dev = self.ip.link_lookup(ifname='dummyX')
示例2: main
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import daemon [as 别名]
def main():
d = Manager().dict()
a = Process(target=processT, args=(1,d))
b = Process(target=processT, args=(2,d))
a.daemon = False
b.daemon = False
a.start()
b.start()
time.sleep(2)
d[1]=False
print d
time.sleep(5)
d[1]=True
print d
time.sleep(2)
d[2] = False
print d
time.sleep(5)
d[1] = False
示例3: p3_add
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import daemon [as 别名]
def p3_add():
conn = Connection()
db = conn.wc
InvertHour = returnInvertedHour(HOUR)
for lang in LANGLIST:
if WEEKDAY == '5':
HOURDAYDB = str(lang) + '_hitshourlydaily'
db[HOURDAYDB].update({str(InvertHour): {'$exists': True}}, {'$set': {str(InvertHour): 0}}, False,
{'multi': True})
ruFILE1 = "/tmp/" + str(lang) + "_action/q1_pagecounts.*"
ruFILE2 = "/tmp/" + str(lang) + "_action/q2_pagecounts.*"
ruFILE3 = "/tmp/" + str(lang) + "_action/q3_pagecounts.*"
ruFILE4 = "/tmp/" + str(lang) + "_action/q4_pagecounts.*"
t = Process(target=UpdateHits, args=(ruFILE1, HOUR, DAY, MONTH, YEAR, lang))
u = Process(target=UpdateHits, args=(ruFILE2, HOUR, DAY, MONTH, YEAR, lang))
v = Process(target=UpdateHits, args=(ruFILE3, HOUR, DAY, MONTH, YEAR, lang))
w = Process(target=UpdateHits, args=(ruFILE4, HOUR, DAY, MONTH, YEAR, lang))
t.daemon = True
u.daemon = True
v.daemon = True
w.daemon = True
t.start()
u.start()
v.start()
w.start()
t.join()
u.join()
v.join()
w.join()
示例4: _start_continuous_write_and_verify
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import daemon [as 别名]
def _start_continuous_write_and_verify(self, wait_for_rowcount=0, max_wait_s=600):
"""
Starts a writer process, a verifier process, a queue to track writes,
and a queue to track successful verifications (which are rewrite candidates).
wait_for_rowcount provides a number of rows to write before unblocking and continuing.
Returns the writer process, verifier process, and the to_verify_queue.
"""
# queue of writes to be verified
to_verify_queue = Queue()
# queue of verified writes, which are update candidates
verification_done_queue = Queue(maxsize=500)
writer = Process(target=data_writer, args=(self, to_verify_queue, verification_done_queue, 25))
# daemon subprocesses are killed automagically when the parent process exits
writer.daemon = True
self.subprocs.append(writer)
writer.start()
if wait_for_rowcount > 0:
self._wait_until_queue_condition('rows written (but not verified)', to_verify_queue, operator.ge, wait_for_rowcount, max_wait_s=max_wait_s)
verifier = Process(target=data_checker, args=(self, to_verify_queue, verification_done_queue))
# daemon subprocesses are killed automagically when the parent process exits
verifier.daemon = True
self.subprocs.append(verifier)
verifier.start()
return writer, verifier, to_verify_queue
示例5: main
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import daemon [as 别名]
def main():
inifile = ConfigParser.SafeConfigParser()
inifile.read("polecolor.ini")
teamcolor = str(inifile.get("team", "color"))
abe = absclass.AbsEncoder()
abe.SetOffset()
pole = poleclass.Pole("polecolor.ini")
q_turret = Queue(maxsize = 1)
q_debug = Queue(maxsize = 5)
p_send = Process(target = sendData, args = (q_turret, ))
p_debug = Process(target = debug, args = (q_debug, ))
p_send.daemon = True
p_debug.daemon = True
p_debug.start()
p_send.start()
while True:
while gpio.input(19):
pass
#q_debug.put(["not auto"])
for i in [3, 2, 1, 3, 2, 1]:
autoFire(abe, pole, q_debug, q_turret, teamcolor, i)
time.sleep(5)
q_turret.put("notauto")
while not gpio.input(19):
q_debug.put(["back please"])
示例6: parallel_precompute
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import daemon [as 别名]
def parallel_precompute(global_conf_file=None):
# Define queues
queueIn = Queue(nb_workers+2)
queueOut = Queue(nb_workers+8)
queueProducer = Queue()
queueFinalizer = Queue()
queueConsumer = Queue(nb_workers)
# Start finalizer
t = Process(target=finalizer, args=(global_conf_file, queueOut, queueFinalizer))
t.daemon = True
t.start()
# Start consumers
for i in range(nb_workers):
t = Process(target=consumer, args=(global_conf_file, queueIn, queueOut, queueConsumer))
t.daemon = True
t.start()
# Start producer
t = Process(target=producer, args=(global_conf_file, queueIn, queueProducer))
t.daemon = True
t.start()
# Wait for everything to be started properly
producerOK = queueProducer.get()
finalizerOK = queueFinalizer.get()
for i in range(nb_workers):
consumerOK = queueConsumer.get()
print "[parallel_precompute: log] All workers are ready."
sys.stdout.flush()
# Wait for everything to be finished
finalizerEnded = queueFinalizer.get()
print "[parallel_precompute: log] Done at {}".format(get_now())
return
示例7: _start_continuous_counter_increment_and_verify
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import daemon [as 别名]
def _start_continuous_counter_increment_and_verify(self, wait_for_rowcount=0, max_wait_s=600):
"""
Starts a counter incrementer process, a verifier process, a queue to track writes,
and a queue to track successful verifications (which are re-increment candidates).
Returns the writer process, verifier process, and the to_verify_queue.
"""
# queue of writes to be verified
to_verify_queue = Queue()
# queue of verified writes, which are update candidates
verification_done_queue = Queue(maxsize=500)
incrementer = Process(target=data_writer, args=(self, to_verify_queue, verification_done_queue, 25))
# daemon subprocesses are killed automagically when the parent process exits
incrementer.daemon = True
self.subprocs.append(incrementer)
incrementer.start()
if wait_for_rowcount > 0:
self._wait_until_queue_condition('counters incremented (but not verified)', to_verify_queue, operator.ge, wait_for_rowcount, max_wait_s=max_wait_s)
count_verifier = Process(target=data_checker, args=(self, to_verify_queue, verification_done_queue))
# daemon subprocesses are killed automagically when the parent process exits
count_verifier.daemon = True
self.subprocs.append(count_verifier)
count_verifier.start()
return incrementer, count_verifier, to_verify_queue
示例8: serve
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import daemon [as 别名]
def serve():
global serving
if serving:
return
serving = True
def _serve(dir, port, bad_ssl_cert=False):
base_dir = os.path.join('tests', dir)
os.chdir(base_dir)
server = HTTPServer(('', port), SimpleHTTPRequestHandler)
if bad_ssl_cert:
server.socket = ssl.wrap_socket(server.socket,
server_side=True,
certfile=os.path.join(
'..', 'cert.pem')
)
server.serve_forever()
proc_site = Process(target=_serve, args=('site', 8000))
proc_site.daemon = True
proc_site.start()
proc_external_site = Process(target=_serve, args=('external-site', 8001))
proc_external_site.daemon = True
proc_external_site.start()
proc_bad_ssl = Process(target=_serve, args=('one-page-site', 8002, True))
proc_bad_ssl.daemon = True
proc_bad_ssl.start()
示例9: mp_extract
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import daemon [as 别名]
def mp_extract(jobs, nWorkers):
procs = []
ctarget = len(jobs)
count = Value("i", 0)
q_read = Queue(5)
q_work = Queue()
# start the reading process
p = Process(target=read, args=[jobs, q_read])
p.daemon = True
p.start()
# start the worker processes
for i in range(nWorkers):
p = Process(target=work, args=[q_read, q_work, count, ctarget])
p.daemon = True
p.start()
procs.append(p)
# start the saver process
p = Process(target=save, args=[q_work, ctarget])
p.daemon = True
p.start()
p.join()
for p in procs:
p.join()
示例10: run
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import daemon [as 别名]
def run(self):
tsdb_type = config.get('TSDB', 'tsdtype')
if (tsdb_type == 'OddEye') or (tsdb_type == 'InfluxDB') or (tsdb_type == 'KairosDB') or (tsdb_type == 'OpenTSDB'):
def run_normal():
while True:
run_scripts()
run_shell_scripts()
time.sleep(cron_interval)
def run_cache():
while True:
upload_cache()
time.sleep(cron_interval)
from multiprocessing import Process
p1 = Process(target=run_normal)
p1.daemon = True
p1.start()
p2 = Process(target=run_cache())
if not p2.is_alive():
p2.daemon = True
p2.start()
p2.join()
p1.join()
else:
while True:
run_scripts()
run_shell_scripts()
time.sleep(cron_interval)
示例11: run
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import daemon [as 别名]
def run(self):
'''
并行处理
jobs:迭代器或生成器
'''
workers = []
#添加job
worker = Process(target = ADD_JOB,args=(self.job_queue,))
worker.daemon = True
worker.start()
workers.append(worker)
for i in range(self.process_num):
worker = Process(target = manual_function,args=(self.function,self.job_queue, self.result_queue))
worker.daemon = True
worker.start()
workers.append(worker)
#map(lambda worker:worker.start(),workers)
try:
map(lambda worker:worker.join(),workers)
except KeyboardInterrupt:
print 'parent received ctrl-c'
map(lambda worker:worker.terminate(),workers)
map(lambda worker:worker.join(),workers)
示例12: setup
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import daemon [as 别名]
def setup():
r = requests.get('http://localhost:8888/sign_in?ocupus_orchestrator')
peers = dict()
connected = r.text.split("\n")
my_info = connected[0].split(",")
my_id = int(my_info[1])
messages = Queue()
initial_peer_id = -1
for l in connected[1:]:
info = l.split(",")
if len(info) > 1:
if info[0] == "receiver":
initial_peer_id = int(info[1])
t = threading.Thread(target=hanging_get, args = (my_id, messages, initial_peer_id))
t.daemon = True
t.start()
print "Started hanging get thread"
p = Process(target=from_remote_server, args=(5554,messages))
p.daemon = True
p.start()
sysproc = Process(target=system_utilities.power_control_listener)
sysproc.daemon = True
sysproc.start()
print "Started xmq process"
示例13: main
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import daemon [as 别名]
def main():
logging.basicConfig(level=logging.DEBUG)
event_queue = Queue()
led_control_queue = Queue()
monitor = Process(target=button_monitor.run, args=(event_queue,))
monitor.daemon = True
monitor.start()
lcon = Process(target=led_controller.run, args=(
event_queue, led_control_queue))
lcon.daemon = True
lcon.start()
eloop = Process(target=dispatcher.run, args=({
'events': event_queue,
'led_control': led_control_queue,
},))
eloop.daemon = True
eloop.start()
try:
monitor.join()
eloop.join()
lcon.join()
except KeyboardInterrupt:
pass
logging.info('Successfully quit')
示例14: spawn_process
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import daemon [as 别名]
def spawn_process(self, target, *args):
"""
:type target: function or class
"""
p = Process(target=target, args=args)
p.daemon = True
if target == worker:
p.daemon = Conf.DAEMONIZE_WORKERS
p.timer = args[2]
self.pool.append(p)
p.start()
return p
示例15: me_multiprocessing_need_queue
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import daemon [as 别名]
def me_multiprocessing_need_queue():
# 其他多进程用法都请见 recommending/init_index.py
# 可以直接使用queue同步数据
p_fi = Process(target=me_threading, args=(chr_need_print_2[0],))
p_fi.daemon = True
p_fi.start()
p_se = Process(target=me_threading, args=(chr_need_print_2[1],))
p_se.daemon = True
p_se.start()
p_fi.join()
p_se.join()