本文整理汇总了Python中multiprocessing.Event类的典型用法代码示例。如果您正苦于以下问题:Python Event类的具体用法?Python Event怎么用?Python Event使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Event类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
class fmanager:
def __init__(self,data,fn):
self.sf = Event()
self.sf.clear()
self.nproc=cpu_count()
self.pipes = [Pipe() for i in xrange(self.nproc)]
self.e = [evaluator(self.pipes[i][1],self.sf,data,fn) for i in xrange(self.nproc)]
null = [i.start() for i in self.e]
return
def __del__(self):
self.sf.set()
null = [i.join() for i in self.e]
null = [i.terminate() for i in self.e]
return
def eval(self,x):
nd = len(x)
for i in xrange(nd):
self.pipes[i % self.nproc][0].send([i, x[i]])
solns = []
while len(solns) < nd:
for i in xrange(self.nproc):
if self.pipes[i][0].poll(0.005):
solns.append(self.pipes[i][0].recv())
solns.sort(key=lambda i: i[0])
return [i[1] for i in solns]
示例2: execute_action
def execute_action(self, action):
event = Event()
queue = Queue()
proc = Process(
target=execute_action_proc,
args=(self.execute, action, event, queue))
proc.start()
# Send heartbeat.
heartbeat_retry = 0
while not event.is_set():
event.wait(config.ACTIVITY_HEARTBEAT_INTERVAL)
try:
res = self.heartbeat(self.task_token)
if res['cancelRequested']:
proc.terminate()
proc.join()
return Result('cancelled', -1, '', '', '', -1)
except Exception as err:
if heartbeat_retry <= config.ACTIVITY_HEARTBEAT_MAX_RETRY:
heartbeat_retry += 1
continue
else:
proc.terminate()
proc.join()
raise
# Evaluate the result.
result = queue.get_nowait()
proc.join()
return result
示例3: New_Process_Actor
class New_Process_Actor(Actor):
'''Create an Actor in a new process. Connected as usual with scipysim
channels. When this Actor is started, it launches a new process, creates
an instance of the Actor class passed to it in a second thread, and starts
that actor.
'''
def __init__(self, cls, *args, **kwargs):
super(New_Process_Actor, self).__init__()
self.cls = cls
self.args = list(args)
self.kwargs = kwargs
self.mqueue = MQueue()
self.mevent = MEvent()
if 'input_channel' not in kwargs:
kwargs['input_channel'] = self.args[0]
chan = kwargs['input_channel']
kwargs['input_channel'] = self.mqueue
print 'chan: ', chan
self.c2p = Channel2Process(chan, self.mevent, self.mqueue)
self.c2p.start()
def run(self):
self.t = Process(target=target, args=(self.cls, self.args, self.kwargs))
self.t.start()
self.mevent.set() # signal that process is ready to receive
self.c2p.join()
self.t.join()
示例4: test_play_and_record
def test_play_and_record(self):
"""
Verifies that a Device and play back prerecorded events.
"""
device = evemu.Device(self.get_device_file())
devnode = device.devnode
events_file = self.get_events_file()
# device.record() calls evemu_record() and is thus missing the
# description that the input file has
with open(events_file) as e:
indata = extract_events(strip_comments(e.readlines()))
recording_started = Event()
q = Queue()
record_process = Process(target=record,
args=(recording_started, devnode, q))
record_process.start()
recording_started.wait(100)
device.play(open(events_file))
outdata = strip_comments(q.get())
record_process.join()
self.assertEquals(len(indata), len(outdata))
fuzz = re.compile("E: \d+\.\d+ (.*)")
for i in range(len(indata)):
lhs = fuzz.match(indata[i])
self.assertTrue(lhs)
rhs = fuzz.match(outdata[i])
self.assertTrue(rhs)
self.assertEquals(lhs.group(1), rhs.group(1))
示例5: initProcessCommunications
def initProcessCommunications(self):
# Queues and events for the acquisition processes
self.newFreqEvent = Event()
self.controlEvent = Event()
self.newScanEvent = Event()
self.captureRunningEvent = Event()
self.recordingEvent = Event()
self.currentVolt = Value('d',0.0)
self.currentSamples = Value('i',0)
self.currentFreq = Value('d',0.0)
self.currentThick = Value('d',0.0)
self.currentThin = Value('d',0.0)
self.currentPower = Value('d',0.0)
self.currentLW = Value('d',0.0)
self.currentCycle = Value('i',0)
self.protonsPerCycle = Value('i',0)
self.protonsForHRS = Value('i',0)
self.protonPulse = Value('b',False)
self.iscool = Value('d',0.0)
self.dataQueue = Queue()
self.freqQueue = Queue()
self.errorQueue = Queue()
self.messageQueue = Queue()
self.dataStreamQueue = Queue()
示例6: test_sentinel
def test_sentinel():
start_event = Event()
stop_event = Event()
stop_event.set()
s = Sentinel(stop_event, start_event, broker=get_broker('sentinel_test:q'))
assert start_event.is_set()
assert s.status() == Conf.STOPPED
示例7: test_sentinel
def test_sentinel():
start_event = Event()
stop_event = Event()
stop_event.set()
s = Sentinel(stop_event, start_event, list_key='sentinel_test:q')
assert start_event.is_set()
assert s.status() == Conf.STOPPED
示例8: DeviceServer
class DeviceServer(ThreadedTCPServer, Process):
#causes handle_request to return
timeout = 1
def __init__(self, mux, muxdevice, server_address, RequestHandlerClass):
Process.__init__(self)
ThreadedTCPServer.__init__(self, server_address, RequestHandlerClass)
self.mux = mux
self.muxdev = muxdevice
self._stop = Event()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.is_set()
def run(self):
if self.stopped():
_LOGGER.warning("Thread already stopped")
while not self.stopped():
self.handle_request()
self.socket.close()
_LOGGER.debug("%s will exit now" % (str(self)))
示例9: Logger
class Logger(object):
def __init__(self, filename):
self.qtag = Queue()
self.done = Event()
self.tag = None
self.filename = filename
self.file = None
def start(self):
self.file = open(self.filename, 'w')
print 'Opened',self.filename,'for writing.'
def set_tag(self, tag):
self.qtag.put(tag)
def set_done(self):
self.done.set()
def log(self, nodeid, msgid, data):
if not self.qtag.empty():
self.tag = self.qtag.get()
if self.done.is_set():
self.done.clear()
return True
L = ['%f'%time.time(), '%d'%nodeid, '%d'%msgid] + map(str,data)
if self.tag:
L.append(self.tag)
print >>self.file, ','.join(L)
self.file.flush()
def close(self):
if self.file:
self.file.close()
print 'File closed.'
示例10: main
def main():
# Use alphazero self-play for data generation
agents_meta = parse_schedule()
# worker variable of main process
board = Board()
sigexit = Event()
sigexit.set() # pre-set signal so main proc generator will iterate only once
# subprocess data generator
helper = DataHelper(data_files=[])
helper.set_agents_meta(agents_meta=agents_meta)
generator = helper.generate_batch(TRAINING_CONFIG["batch_size"])
# start generating
with h5py.File(f"{DATA_CONFIG['data_path']}/latest.train.hdf5", 'a') as hf:
for state_batch, value_batch, probs_batch in generator:
for batch_name in ("state_batch", "value_batch", "probs_batch"):
if batch_name not in hf:
shape = locals()[batch_name].shape
hf.create_dataset(batch_name, (0, *shape), maxshape=(None, *shape))
hf[batch_name].resize(hf[batch_name].shape[0] + 1, axis=0)
hf[batch_name][-1] = locals()[batch_name]
# prevent main proc from generating data too quick
# since sigexit has been set, proc will iterate only once
run_proc(helper.buffer, helper.buffer_size, helper.lock,
sigexit, agents_meta, board)
board.reset()
示例11: __init__
def __init__(self, id, config, sequence, hist_obj, results_path, log_id):
Process.__init__(self)
self.id = id
self.config = config
self.sequence = sequence
self.hist_obj = hist_obj
self.agent = Agent(self.id, config, sequence)
self.results_path = results_path
self.log_id = log_id
self.leader_send = None
self.leader_recv = None
self.support_send = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
self.support_recv = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
self.root_div_send = None
self.leader_div_send = None
self.agent_div_recv = [None for i in range(1, self.config.num_agents)] if self.agent.id_leader == None else None
self.support_div_recv = [None for i in range(1, self.config.num_sup+1)] if self.agent.id_supporters else None
self.leader_reset_send = None
self.leader_reset_recv = None
self.support_reset_send = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
self.support_reset_recv = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
self.event_restart = Event()
self.stop_event = Event()
self.support_stop_event = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
self.energy_number = Queue(1)
self.support_energy_number = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
示例12: __init__
class SubProcessWrapper:
cname = __name__ + '.SubProcessWrapper'
def __init__(self, target, name=None):
self.target = target
self.running = False
self.name = name if name else target.task_name()
self.kill_event = Event()
self.logger = logging.getLogger(self.cname)
def run(self):
self.logger.info("starting SubProcessTask: {}".format(self.target.task_name()))
th = Thread(target=self.target, name=self.target.task_name())
th.start()
signal.signal(signal.SIGINT, signal.SIG_IGN)
self.kill_event.wait()
self.logger.info("stopping SubProcessTask: {}".format(self.target.task_name()))
self.target.stop()
th.join()
self.logger.info("Stopped SubProcessTask: {}".format(self.target.task_name()))
def __call__(self):
self.run()
def get_kill_event(self):
return self.kill_event
示例13: __init__
class QueueTask:
def __init__(self):
self.queue = JoinableQueue()
self.event = Event()
atexit.register( self.queue.join )
process = Process(target=self.work)
process.daemon = True
process.start()
def work(self):
while True:
func, args, wait_for = self.queue.get()
for evt in wait_for:
evt.wait()
func(*args)
self.event.set()
self.queue.task_done()
def enqueue(self, func, args=[], wait_for=[]):
self.event.clear()
self.queue.put( (func, args, wait_for) )
return self.event
示例14: ClassifierWorkerPool
class ClassifierWorkerPool(object):
def __init__(self):
self.queue = Queue(100)
self.workers = []
self.stop = Event()
self.stop.clear()
self.queue_feeder = QueueFeeder(self.queue, self.stop)
row = TrainedClassifiers.objects(name=config.classifier).first()
if not row:
raise Exception("Classifier %s does not exists" % config.classifier)
self.trained_classifier = row.get_classifier()
def start(self):
self.queue_feeder.start()
for i in range(0, config.classifier_pool_size):
worker = ClassifierWorker(self.trained_classifier, self.queue, self.stop)
worker.start()
self.workers.append(worker)
def terminate(self):
self.stop.set()
self.queue_feeder.join()
for w in self.workers:
w.join()
示例15: DataLoaderOnTheFly
class DataLoaderOnTheFly():
def __init__(self, config):
default_config = Config(proc_count = 4, limit_batch_count = None)
self.config = default_config(**config)
self.exit = Event()
self.batch_queue = Queue(maxsize = 10)
if self.config.limit_batch_count is None:
self.limited = False
else:
self.limited = True
self.batch_list = []
self.index = -1
self.workers = []
for _ in range(self.config.proc_count):
self.workers.append(Process(target = config.worker, args = (self,)))
for w in self.workers:
w.daemon = True
w.start()
def next_batch(self):
if self.limited:
if len(self.batch_list) < self.config.limit_batch_count:
self.batch_list.append(Config(self.batch_queue.get()))
self.index = (self.index + 1) % self.config.limit_batch_count
return Config(self.batch_list[self.index])
else:
return Config(self.batch_queue.get())
def __del__(self):
self.exit.set()
for w in self.workers:
w.join()