本文整理汇总了Python中multiprocessing.Condition.notify_all方法的典型用法代码示例。如果您正苦于以下问题:Python Condition.notify_all方法的具体用法?Python Condition.notify_all怎么用?Python Condition.notify_all使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.Condition
的用法示例。
在下文中一共展示了Condition.notify_all方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: SynchronizingBus
# 需要导入模块: from multiprocessing import Condition [as 别名]
# 或者: from multiprocessing.Condition import notify_all [as 别名]
class SynchronizingBus(Bus):
def __init__(self, sync_delay=1):
Bus.__init__(self)
self.sync_delay = sync_delay
self.condition = Condition()
def start(self):
import time
time.sleep(self.sync_delay)
self.log("Releasing children")
self.condition.acquire()
self.condition.notify_all()
self.condition.release()
Bus.start(self)
示例2: Barrier
# 需要导入模块: from multiprocessing import Condition [as 别名]
# 或者: from multiprocessing.Condition import notify_all [as 别名]
class Barrier(object):
def __init__(self, num_threads):
self.num_threads = num_threads
self.threads_left = Value('i', num_threads, lock=True)
self.mutex = Lock()
self.waitcond = Condition(self.mutex)
def wait(self):
self.mutex.acquire()
self.threads_left.value -= 1
if self.threads_left.value == 0:
self.threads_left.value = self.num_threads
self.waitcond.notify_all()
self.mutex.release()
else:
self.waitcond.wait()
self.mutex.release()
示例3: OrderedQueue
# 需要导入模块: from multiprocessing import Condition [as 别名]
# 或者: from multiprocessing.Condition import notify_all [as 别名]
class OrderedQueue(object):
def __init__(self, maxsize):
self.queue = Queue(maxsize=maxsize)
self.lock = Lock()
self.getlock = Lock()
self.putcounter = Value('i', -1)
self.getcounter = Value('i', 0)
self.cond = Condition(self.lock)
self.manager = Manager()
self.getlist = self.manager.list()
def put(self, index, elem):
with self.lock:
while index != self.putcounter.value + 1:
self.cond.wait()
self.queue.put((index, elem))
#sys.stderr.write("right after adding data with SEED %i. Queue size is now %i\n" %(index, self.queue.qsize()))
self.putcounter.value += 1
self.cond.notify_all()
def get(self):
with self.getlock:
for i, element in enumerate(self.getlist):
index, elem = element
if index == self.getcounter.value:
self.getcounter.value += 1
del self.getlist[i]
return (index, elem)
while True:
index, elem = self.queue.get()
if index == self.getcounter.value:
self.getcounter.value += 1
return (index, elem)
else:
self.getlist.append((index, elem))
def close(self):
return self.queue.close()
def qsize(self):
return self.queue.qsize()
示例4: CountBucket
# 需要导入模块: from multiprocessing import Condition [as 别名]
# 或者: from multiprocessing.Condition import notify_all [as 别名]
class CountBucket(Query):
"""
Class for registering callbacks on counts of packets sent to
the controller.
"""
def __init__(self):
super(CountBucket, self).__init__()
self.matches = set([])
self.runtime_stats_query_fun = None
self.outstanding_switches = []
self.packet_count = 0
self.byte_count = 0
self.packet_count_persistent = 0
self.byte_count_persistent = 0
self.in_update_cv = Condition()
self.in_update = False
self._classifier = self.generate_classifier()
def __repr__(self):
return "CountBucket"
def eval(self, pkt):
"""
evaluate this policy on a single packet
:param pkt: the packet on which to be evaluated
:type pkt: Packet
:rtype: set Packet
"""
return set()
def generate_classifier(self):
return Classifier([Rule(identity,{self})])
def apply(self):
with self.bucket_lock:
for pkt in self.bucket:
self.packet_count_persistent += 1
self.byte_count_persistent += pkt['header_len'] + pkt['payload_len']
self.bucket.clear()
def start_update(self):
"""
Use a condition variable to mediate access to bucket state as it is
being updated.
Why condition variables and not locks? The main reason is that the state
update doesn't happen in just a single function call here, since the
runtime processes the classifier rule by rule and buckets may be touched
in arbitrary order depending on the policy. They're not all updated in a
single function call. In that case,
(1) Holding locks *across* function calls seems dangerous and
non-modular (in my opinion), since we need to be aware of this across a
large function, and acquiring locks in different orders at different
points in the code can result in tricky deadlocks (there is another lock
involved in protecting bucket updates in runtime).
(2) The "with" semantics in python is clean, and splitting that into
lock.acquire() and lock.release() calls results in possibly replicated
failure handling code that is boilerplate.
"""
with self.in_update_cv:
self.in_update = True
self.matches = set([])
self.runtime_stats_query_fun = None
self.outstanding_switches = []
def finish_update(self):
with self.in_update_cv:
self.in_update = False
self.in_update_cv.notify_all()
def add_match(self, m):
"""
Add a match m to list of classifier rules to be queried for
counts.
"""
if not m in self.matches:
self.matches.add(m)
def add_pull_stats(self, fun):
"""
Point to function that issues stats queries in the
runtime.
"""
if not self.runtime_stats_query_fun:
self.runtime_stats_query_fun = fun
def pull_stats(self):
"""Issue stats queries from the runtime"""
queries_issued = False
with self.in_update_cv:
while self.in_update: # ensure buckets not updated concurrently
self.in_update_cv.wait()
if not self.runtime_stats_query_fun is None:
self.outstanding_switches = []
queries_issued = True
self.runtime_stats_query_fun()
#.........这里部分代码省略.........
示例5: DownloadManager
# 需要导入模块: from multiprocessing import Condition [as 别名]
# 或者: from multiprocessing.Condition import notify_all [as 别名]
#.........这里部分代码省略.........
def _fetch(addr, indices, bit_map):
sock = self.ctx.socket(zmq.REQ)
try:
sock.setsockopt(zmq.LINGER, 0)
sock.connect(addr)
sock.send_pyobj((SERVER_FETCH, (uuid, indices, self.server_addr)))
avail = sock.poll(1 * 1000, zmq.POLLIN)
check_sock = None
if not avail:
try:
check_sock = socket.socket()
addr_list = addr[len('tcp://'):].split(':')
addr_list[1] = int(addr_list[1])
check_sock.connect(tuple(addr_list))
except Exception as e:
logger.warning('connect the addr %s failed with exception %s',
addr, e)
_report_bad(addr)
else:
logger.debug("%s recv broadcast %s from %s timeout",
self.server_addr, str(indices), addr)
finally:
if check_sock:
check_sock.close()
return
result, msg = sock.recv_pyobj()
if result == SERVER_FETCH_FAIL:
_report_bad(addr)
return
if result == SERVER_FETCH_OK:
indices, blocks = msg
for rank, index in enumerate(indices):
if blocks[rank] is not None:
write_mmap_handler.seek(bit_map[index][0])
write_mmap_handler.write(blocks[rank])
bitmap[index] = bit_map[index]
else:
raise RuntimeError('Unknown server response: %s %s' % (result, msg))
finally:
sock.close()
final_path = env.workdir.alloc_tmp_file("broadcast")
self.uuid_state_dict[uuid] = (final_path, False)
fp = open(final_path, 'wb')
fp.truncate(compressed_size)
fp.close()
fd = os.open(final_path, os.O_RDWR)
write_mmap_handler = mmap.mmap(fd, 0,
access=ACCESS_WRITE)
os.close(fd)
while not all(bitmap):
remote = []
for _addr, _bitmap in six.iteritems(sources):
if block_num == 0:
block_num = len(_bitmap)
bitmap = [0] * block_num
self.uuid_map_dict[uuid] = bitmap
if not _addr.startswith('tcp://%s:' % self.host):
remote.append((_addr, _bitmap))
self.random_inst.shuffle(remote)
for _addr, _bitmap in remote:
_indices = [i for i in range(block_num) if not bitmap[i] and _bitmap[i]]
if _indices:
self.random_inst.shuffle(_indices)
_fetch(_addr, _indices[:BATCHED_BLOCKS], _bitmap)
self._update_sources(uuid, bitmap, download_guide_sock)
sources = self._get_sources(uuid, download_guide_sock)
write_mmap_handler.flush()
write_mmap_handler.close()
self.shared_uuid_map_dict[uuid] = bitmap
self.shared_uuid_fn_dict[uuid] = self.uuid_state_dict[uuid][0]
self.uuid_state_dict[uuid] = self.uuid_state_dict[uuid][0], True
download_guide_sock.close()
with self.download_cond:
self.download_cond.notify_all()
def clear(self, uuid):
if uuid in self.master_broadcast_blocks:
del self.master_broadcast_blocks[uuid]
del self.shared_master_blocks[uuid]
if uuid in self.uuid_state_dict:
del self.uuid_state_dict[uuid]
if uuid in self.shared_uuid_fn_dict:
del self.shared_uuid_fn_dict[uuid]
del self.shared_uuid_map_dict[uuid]
def shutdown(self):
if not self._started:
return
self._started = False
if self.server_thread and self.server_addr. \
startswith('tcp://%s:' % socket.gethostname()):
for _, th in six.iteritems(self.download_threads):
th.join(timeout=0.1) # only in executor, not needed
self.server_thread.join(timeout=1)
if self.server_thread.is_alive():
logger.warning("Download mananger server_thread not stopped.")
self.manager.shutdown() # shutdown will try join and terminate server process
示例6: DBPipeline
# 需要导入模块: from multiprocessing import Condition [as 别名]
# 或者: from multiprocessing.Condition import notify_all [as 别名]
#.........这里部分代码省略.........
return self._table_prefix
def __len__(self):
return self._table_map['job'].count().execute().fetchone()[0]
def __contains__(self, item):
return self.has_id(id(item))
def get_from_name(self, name):
"""
Returns the item with the given name, or None if no such item
is known.
"""
with self.condition:
tbl_j = self._table_map['job']
query = tbl_j.select(tbl_j.c.name == name)
row = query.execute().fetchone()
if row is None:
return None
return row.job
def has_id(self, item_id):
"""
Returns True if the queue contains an item with the given id.
"""
tbl_j = self._table_map['job']
query = tbl_j.select(tbl_j.c.id == item_id).count()
return query.execute().fetchone()[0] > 0
def task_done(self, item):
with self.condition:
self.working.remove(item)
self.all.remove(id(item))
self.condition.notify_all()
def append(self, item):
with self.condition:
self.queue.append(item)
self.all.add(id(item))
self.condition.notify_all()
def appendleft(self, item, force = False):
with self.condition:
if force:
self.force.append(item)
else:
self.queue.appendleft(item)
self.all.add(id(item))
self.condition.notify_all()
def prioritize(self, item, force = False):
"""
Moves the item to the very left of the queue.
"""
with self.condition:
# If the job is already running (or about to be forced),
# there is nothing to be done.
if item in self.working or item in self.force:
return
self.queue.remove(item)
self.appendleft(item, force)
self.condition.notify_all()
def clear(self):
with self.condition:
self.queue = deque()
示例7: Condition
# 需要导入模块: from multiprocessing import Condition [as 别名]
# 或者: from multiprocessing.Condition import notify_all [as 别名]
import time
from multiprocessing import Process, Condition
import os
cond = Condition()
def wait_condition():
cond.acquire()
cond.wait()
print '[%d] waked!' % os.getpid()
cond.release()
for i in range(3):
Process(target=wait_condition).start()
time.sleep(1)
print 'notify!'
cond.acquire()
cond.notify_all()
cond.release()
示例8: solve
# 需要导入模块: from multiprocessing import Condition [as 别名]
# 或者: from multiprocessing.Condition import notify_all [as 别名]
def solve(max_level, goal, num_workers):
# prepare message queue shared with workers
tasks = Queue()
task_lock = Lock()
task_cv = Condition(lock=task_lock)
# create and start workers
workers = []
for i in range(0, num_workers):
solutions = set()
parent_conn, child_connn = Pipe()
worker = Process(target=run_worker,
args=(child_connn, goal, max_level, tasks,
task_lock, task_cv))
worker.start()
workers.append((worker, parent_conn))
# Find all possible sequences: [n0, n1, n2, ..., nM] (M=max_level)
# where nX is the number of binary operators so that
# '1 <n0 ops> 2 <n1 ops> 3 <n2 ops> ... M+1 <nM ops>' can be a valid
# Reverse Polish Notation. Key conditions are:
# 1. n0 + n1 + ... + nM = M
# 2. for any X, n0 + n1 + ... + nX <= X
# (Note that from condition #2 n0 is always 0.)
# We'll build the sequences in 'numops_list' below while exploring cases
# in a BFS-like (or DP-like) manner.
# This is a queue to maintain outstanding search results. Its each element
# is a tuple of 2 items: 'numops_list', 'total_ops'
# A tuple of (N, T) means:
# - N = [n0, n1, ..., nX]
# - T = sum(N)
# (Note that we don't necessarily have to keep T as it can be derived
# from N. But we do this for efficiency).
# The search is completed when len(N) reaches M (i.e., X=M-1) by appending
# the last item of nM = M - (n0 + n1 + ... + nX) = M - T (see condition #1).
tmp = [([0], 0)]
while tmp:
numops_list, total_ops = tmp.pop(0)
level = len(numops_list)
if level < max_level:
# Expand the sequence with all possible numbers of operators at
# the current level so we can explore the next level for each of
# them.
for i in range(0, level - total_ops + 1): # see condition #2
tmp.append((numops_list + [i], total_ops + i))
else:
# Found one valid RPN template. Pass it to workers and have them
# work on it.
numops_list.append(max_level - total_ops)
with task_lock:
tasks.put(numops_list)
task_cv.notify()
# Tell workers all data have been passed.
solutions = set()
with task_lock:
for _ in workers:
tasks.put(None)
task_cv.notify_all()
# Wait until all workers complete the tasks, while receiving any
# intermediate and last solutions. The received solutions may not
# necessarily be fully unique, so we have to unify them here, again.
# Received data of 'None' means the corresponding worker has completed
# its task.
# Note: here we assume all workers are reasonably equally active in
# sending data, so we simply perform blocking receive.
conns = set([w[1] for w in workers])
while conns:
for c in conns.copy():
worker_data = c.recv()
if worker_data is None:
conns.remove(c)
continue
for solution in worker_data:
if solution not in solutions:
solutions.add(solution)
# All workers have completed. Cleanup them and print the final unified
# results. If we are to show all expressions (i.e. goal is None), sort
# results by the expressions' values (listing integers followed by all
# non-integers, followed by 'divided by 0' cases.
for w in workers:
w[0].join()
if goal is None:
l = list(solutions)
l.sort(key=lambda x: (0, x[0]) if type(x[0]) == int else (1, str(x[0])))
for solution in l:
print('%s = %s' % (solution[1], str(solution[0])))
else:
for solution in solutions:
print(solution)
示例9: Pipeline
# 需要导入模块: from multiprocessing import Condition [as 别名]
# 或者: from multiprocessing.Condition import notify_all [as 别名]
class Pipeline(object):
"""
A collection that is similar to Python's Queue object, except
it also tracks items that are currently sleeping or in progress.
"""
def __init__(self, max_working = 1):
self.condition = Condition(RLock())
self.max_working = max_working
self.running = True
self.paused = False
self.queue = None
self.force = None
self.sleeping = None
self.working = None
self.item2id = None
self.id2item = None # for performance reasons
self.name2id = None
self.id2name = None
self.clear()
def __len__(self):
with self.condition:
return len(self.id2item)
def __contains__(self, item):
with self.condition:
return item in self.item2id
def _register_item(self, name, item):
uuid = uuid4().hex
self.id2item[uuid] = item
self.item2id[item] = uuid
if name is None:
return uuid
if name in self.name2id:
msg = 'an item named %s is already queued' % repr(name)
raise AttributeError(msg)
self.name2id[name] = uuid
self.id2name[uuid] = name
return uuid
def get_from_name(self, name):
"""
Returns the item with the given name, or None if no such item
is known.
"""
with self.condition:
try:
item_id = self.name2id[name]
except KeyError:
return None
return self.id2item[item_id]
return None
def has_id(self, item_id):
"""
Returns True if the queue contains an item with the given id.
"""
return item_id in self.id2item
def task_done(self, item):
with self.condition:
try:
self.working.remove(item)
except KeyError:
# This may happen if we receive a notification from a
# thread that was previously enqueued, but then the
# workqueue was forcefully stopped without waiting for
# child threads to complete.
self.condition.notify_all()
return
item_id = self.item2id.pop(item)
self.id2item.pop(item_id)
try:
name = self.id2name.pop(item_id)
except KeyError:
pass
else:
self.name2id.pop(name)
self.condition.notify_all()
def append(self, item, name = None):
"""
Adds the given item to the end of the pipeline.
"""
with self.condition:
self.queue.append(item)
uuid = self._register_item(name, item)
self.condition.notify_all()
return uuid
def appendleft(self, item, name = None, force = False):
with self.condition:
if force:
self.force.append(item)
else:
self.queue.appendleft(item)
uuid = self._register_item(name, item)
self.condition.notify_all()
return uuid
#.........这里部分代码省略.........
示例10: WaitableQueue
# 需要导入模块: from multiprocessing import Condition [as 别名]
# 或者: from multiprocessing.Condition import notify_all [as 别名]
class WaitableQueue(Queue):
"""Queue that uses a semaphore to reliably count items in it"""
class Vacuum(ThreadLoop):
def __init__(self, q, l):
def callback():
q.wait_notempty(0.1)
while True:
try:
val = q.get(False)
l.append(val)
except Empty:
break
ThreadLoop.__init__(self, callback)
def __init__(self, maxsize=0):
self.cond_empty = Condition()
self.cond_notempty = Condition()
self._put_counter = Value('i', 0)
Queue.__init__(self, maxsize)
def put(self, obj, block=True, timeout=None):
Queue.put(self, obj, block, timeout)
self._put_counter.value += 1
if self.qsize() != 0:
self.cond_notempty.acquire()
try:
self.cond_notempty.notify_all()
finally:
self.cond_notempty.release()
@property
def put_counter(self):
return self._put_counter.value
def get(self, block=True, timeout=None):
ret = Queue.get(self, block, timeout)
if self.qsize() == 0:
self.cond_empty.acquire()
try:
self.cond_empty.notify_all()
finally:
self.cond_empty.release()
return ret
def wait_empty(self, timeout=None):
"""Wait for all items to be got"""
self.cond_empty.acquire()
try:
if self.qsize():
self.cond_empty.wait(timeout)
finally:
self.cond_empty.release()
def wait_notempty(self, timeout=None):
"""Wait for all items to be got"""
self.cond_notempty.acquire()
try:
if self.qsize() == 0:
self.cond_notempty.wait(timeout)
finally:
self.cond_notempty.release()
示例11: RWLock
# 需要导入模块: from multiprocessing import Condition [as 别名]
# 或者: from multiprocessing.Condition import notify_all [as 别名]
class RWLock():
"""A Readers-Writer lock.
Allows for multiple readers or one writer. Writers will not starve.
Attributes:
for_reading (RWLock.ReadLock): A lock-like object with appropriate
`acquire`, `release`, `__enter__` and `__exit__` methods pointed
to the *read methods of the RWLock. Chiefly for use with the
`with` statement.
for_writing (RWLock.WriteLock): A lock-like object with appropriate
`acquire`, `release`, `__enter__` and `__exit__` methods pointed
to the *write methods of the RWLock. Chiefly for use with the
`with` statement.
"""
class ReadLock():
def __init__(self, rw):
self._rw = rw
self.acquire = rw.acquire_read
self.release = rw.release_read
def __enter__(self):
self.acquire()
def __exit__(self, exception_type, exception_value, traceback):
self.release()
class WriteLock():
def __init__(self, rw):
self._rw = rw
self.acquire = rw.acquire_write
self.release = rw.release_write
def __enter__(self):
self.acquire()
def __exit__(self, exception_type, exception_value, traceback):
self.release()
def __init__(self):
"""Initialises the RWLock."""
self._condition = Condition()
self._readers = Value(c_uint64, 0, lock=False)
self._writers_waiting = Value(c_uint64, 0, lock=False)
self.for_reading = self.ReadLock(self)
self.for_writing = self.WriteLock(self)
def acquire_read(self):
"""Acquire a read lock.
Blocks if a thread has acquired the write lock or is waiting to
acquire the write lock.
"""
with self._condition:
while self._writers_waiting.value:
self._condition.wait()
self._readers.value += 1
def release_read(self):
"""Release a read lock."""
with self._condition:
self._readers.value -= 1
if not self._readers.value:
self._condition.notify_all()
def acquire_write(self):
"""Acquire a write lock.
Blocks until there are no acquired read or write locks.
"""
self._condition.acquire()
self._writers_waiting.value += 1
while self._readers.value:
self._condition.wait()
self._writers_waiting.value -= 1
def release_write(self):
"""Release a write lock."""
self._condition.release()