本文整理汇总了Python中weakref.WeakValueDictionary.keys方法的典型用法代码示例。如果您正苦于以下问题:Python WeakValueDictionary.keys方法的具体用法?Python WeakValueDictionary.keys怎么用?Python WeakValueDictionary.keys使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类weakref.WeakValueDictionary
的用法示例。
在下文中一共展示了WeakValueDictionary.keys方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: LockManager
# 需要导入模块: from weakref import WeakValueDictionary [as 别名]
# 或者: from weakref.WeakValueDictionary import keys [as 别名]
class LockManager(object):
"""
Thread safe lock manager class
"""
def __init__(self):
self._lock = _get_lock()
self._lock_store = WeakValueDictionary()
def get_lock(self, lockname, reentrant=False):
"""
Create or return a existing lock identified by lockname.
"""
with self._lock:
try:
lock = self._lock_store[lockname]
logging.debug("LockManager.get_lock: existing lock: %s, %s",
lockname, lock)
except KeyError:
lock = _get_lock(reentrant)
self._lock_store[lockname] = lock
logging.debug("LockManager.get_lock: new lock: %s, %s",
lockname, lock)
logging.debug("LockManager existing locks in store: %s",
self._list_locks())
return lock
def _list_locks(self):
return self._lock_store.keys()
def list_locks(self):
"""
Return a list of existing lock names in lock store.
"""
with self._lock:
return self._list_locks()
示例2: WrappedThreadPoolExecutor
# 需要导入模块: from weakref import WeakValueDictionary [as 别名]
# 或者: from weakref.WeakValueDictionary import keys [as 别名]
class WrappedThreadPoolExecutor(ThreadPoolExecutor, EventReactorMixin):
'''Wraps a :class:`.ThreadPoolExecutor` that listens to a stop event'''
def __init__(self, max_workers, event_reactor):
ThreadPoolExecutor.__init__(self, max_workers)
EventReactorMixin.__init__(self, event_reactor)
event_reactor.register_handler(EventReactor.STOP_ID, self._stop_cb)
self._task_map = WeakValueDictionary()
def _stop_cb(self, event_id):
_logger.debug('WrappedThreadPoolExecutor stopping everything')
for key in self._task_map.keys():
self._task_map[key].stop()
self.shutdown(wait=False)
def submit(self, fn, *args, **kwargs):
if isinstance(fn, Task):
self._task_map[id(fn)] = fn
return ThreadPoolExecutor.submit(self, fn, *args, **kwargs)
示例3: BufferManager
# 需要导入模块: from weakref import WeakValueDictionary [as 别名]
# 或者: from weakref.WeakValueDictionary import keys [as 别名]
class BufferManager(object):
MEMFLAGS= mf.READ_WRITE|mf.COPY_HOST_PTR
READ = 0
WRITE = 1
DEBUG = True
def __init__(self, engine=None, context=None, queue=None):
if engine is None and context is None:
# Look in environment for engine selection
engine = os.environ.get("ENGINE", None)
assert engine != None
self.ctx = context
self.queue = queue
if self.ctx is None:
self.ctx = pyopencl.Context([pyopencl.get_platforms()[0].get_devices()[engine]])
self.queue = None
if self.queue is None:
self.queue = pyopencl.CommandQueue(self.ctx)
# Buffer management
self.arrays = Weak()
self.buffers = {}
self.hits = self.misses = 0
self.purged = 0
def purgeBuffers(self):
# Drop buffers if no array or data buffer is referencing them
ids = set(self.arrays.keys())
for id in self.buffers.keys():
if not id in ids:
del self.buffers[id]
self.purged += 1
def makeBuffer(self, a):
# print "makeBuffer", id(a), a.shape, a.size, id(a.data)
buf = pyopencl.Buffer(self.ctx, self.MEMFLAGS, hostbuf=a)
aid = id(a)
self.arrays[aid] = a
self.buffers[aid] = buf
return buf
def ensureBuffer(self, a):
buf = self.findBuffer(a, self.WRITE)
if buf is None:
buf = self.makeBuffer(a)
return buf
def readBuffer(self, a):
# print "readBuffer", id(a)
buf = self.findBuffer(a, self.READ)
shape = a.shape
strides = a.strides
a.shape = (a.size,)
a.strides = (strides[-1],)
#pyopencl.enqueue_barrier(self.queue)
pyopencl.enqueue_copy(self.queue, a, buf).wait()
a.shape = shape
a.strides = strides
return buf
def writeBuffer(self, a):
# print "writeBuffer", id(a)
buf = self.ensureBuffer(a)
shape = a.shape
strides = a.strides
a.shape = (a.size,)
a.strides = (strides[-1],)
pyopencl.enqueue_copy(self.queue, buf, a).wait()
a.shape = shape
a.strides = strides
#pyopencl.enqueue_barrier(self.queue)
return buf
def findBuffer(self, a, op):
"Find an appropriate buffer. Tricky."
assert op in (self.READ, self.WRITE)
self.purgeBuffers()
aid = id(a)
havea = aid in self.buffers
# Complete match, easy decision
if havea:
self.hits += 1
return self.buffers[aid]
else:
self.misses += 1
# No match at all, also easy.
if not havea:
# Reading an array back with no matching buffer is fatal
if op == self.READ:
raise ValueError("Array not in yapocis management, you may have written to it, or be using an assigned or .copy")
return None
raise "Epic fail"
示例4: restartProcessGroup
# 需要导入模块: from weakref import WeakValueDictionary [as 别名]
# 或者: from weakref.WeakValueDictionary import keys [as 别名]
def restartProcessGroup(self, name):
'''Restart all procs in supervisor process group .. rapidly!
Returns a list of rpc faults if an error occurs.
@param string name name of process group to restart
@return boolean result true if successful
'''
self._update('restartProcessGroup')
group = self.supervisord.process_groups.get(name)
if group is None:
raise RPCError(RestarterFaults.BAD_GROUP)
transit_states = (STARTING,STOPPING)
processes = WeakValueDictionary((p.config.name,p) for p in group.processes.itervalues())
allprocs = set(processes.keys())
procnames = [p.config.name for p in group.get_unstopped_processes()]
unstopped = set(procnames)
started = set()
ignore = set()
errs = list()
timer = Timer()
def get_proc(name):
try:
return processes[name]
except KeyError:
if name in procnames:
procnames.remove(name)
unstopped.discard(name)
started.discard(name)
ignore.discard(name)
def restartem():
loop_count = timer.inc_counter()
# stagger_factor is how "often" we stop procs
# 2 = every other call
# 3 = every third call, etc
stagger = min(self.stagger_factor or 1,len(unstopped) or 1)
stop_modulus = loop_count % stagger
if not timer.is_started():
timer.start()
elif timer.elapsed() > self.timeout:
nremaining = (len(allprocs) - len(started.union(ignore))) + len(unstopped)
e = RPCError(RestarterFaults.TIMEOUT,
'timeout expired after %.1f seconds, loop count %d, %d procs pending restart' % \
(timer.elapsed(),loop_count,nremaining))
if errs:
errs.append(e)
return errs
raise e
for name in sorted(allprocs):
p = get_proc(name)
if p is None:
continue
if name not in unstopped and name not in started and name not in ignore:
state = p.get_state()
if state == BACKOFF:
if loop_count > 0:
errs.append(RPCError(RestarterFaults.START_FAILED,
'%s: process failing startup, in backoff mode' % (name,)))
ignore.add(name)
else:
msg = p.stop()
if msg is not None:
errs.append(RPCError(RestarterFaults.STOP_FAILED,'BACKOFF/%s: %s' % (name,msg)))
ignore.add(name)
elif state != STARTING and state in RUNNING_STATES:
started.add(name)
elif state in STOPPED_STATES:
p.spawn()
if p.spawnerr:
errs.append(RPCError(Faults.SPAWN_ERROR,name))
ignore.add(name)
elif state not in transit_states:
errs.append(RPCError(RestarterFaults.BAD_STATE,
'%s: bad state during start [%s]' % (name,_get_state_desc(state))))
ignore.add(name)
for i,name in enumerate(sorted(unstopped,reverse=True)):
if loop_count < stagger and (i % stagger) != stop_modulus:
continue
p = get_proc(name)
if p is None:
continue
state = p.get_state()
unstopped.discard(name)
if state in RUNNING_STATES:
msg = p.stop()
if msg is not None:
errs.append(RPCError(RestarterFaults.STOP_FAILED,'%s: %s' % (name,msg)))
ignore.add(name)
elif state not in STOPPED_STATES and state not in transit_states:
errs.append(RPCError(Faults.BAD_STATE,
'%s: bad state during stop [%s]' % (name,_get_state_desc(state))))
ignore.add(name)
if not unstopped and started.union(ignore) == allprocs:
if errs:
return errs
return True
#.........这里部分代码省略.........
示例5: Memory
# 需要导入模块: from weakref import WeakValueDictionary [as 别名]
# 或者: from weakref.WeakValueDictionary import keys [as 别名]
#.........这里部分代码省略.........
#remove m pages from the page2maps..
for p in xrange(self._page(m.start), self._page(m.end)):
del self.page2map[p]
#remove m from the maps set
self.maps.remove(m)
#unmap the range from m possibly generating 0, 1 or 2 new maps
new_maps += m.unmap(start, size)
#reattach the newly generated maps (it may be none)
for nm in new_maps:
self.maps.add(nm)
for p in xrange(self._page(nm.start), self._page(nm.end)):
self.page2map[p] = nm
logger.debug("Unmap memory @%x size:%x", start, size)
def mprotect(self, start, size, perms):
'''
Changes the access permissions to the memory mapped in the specified range.
@param start: start range address.
@param size: size of the range.
@param perms: new permissions for the memory within the range.
@todo: fix when fail return True./False/Exception?
@todo: check perms and what happens if the same of existent perms.
'''
start = self._floor(start)
end = self._ceil(start+size-1)
size = end-start
#select all mappings that have at least 1 byte mprotected
affected = set()
p = self._page(start)
while p < self._page(end):
if p in self.page2map.keys():
m = self.page2map[p]
#if perms.replace(' ', '') != m.perms.replace(' ', ''):
affected.add(m)
p = self._page(m.end)
else:
p += 1
new_maps = []
for m in affected:
#remove m pages from the page2maps..
for p in xrange(self._page(m.start), self._page(m.end-1)):
del self.page2map[p]
#remove m from the maps set
self.maps.remove(m)
#unmap the range from m posibly generating 0, 1 or 2 new maps
new_maps += m.mprotect(start, size, perms)
#reattach the newly generated maps (it may be none)
for nm in new_maps:
self.maps.add(nm)
for p in xrange(self._page(nm.start), self._page(nm.end)):
self.page2map[p] = nm
logger.debug("Change perms to memory @%x size:%x newperms: %s", start, size, perms)
def _getMap(self, address):
"""
Returns the L{MMap} object containing the address.
@rtype: L{MMap}
@param address: the address to obtain its mapping.
示例6: MessageFactory
# 需要导入模块: from weakref import WeakValueDictionary [as 别名]
# 或者: from weakref.WeakValueDictionary import keys [as 别名]
#.........这里部分代码省略.........
def set_frozen(self):
"""Disable ability to register new messages to allow generation
of hash.
"""
self._frozen = True
def reset_context(self, context):
"""Prepares object to behave as context for stream unpacking.
:param context: object which will be prepared
"""
context._unpacker = self.s_adapter.unpacker()
def _process_message(self, message):
try:
type_id = message[0]
return self._message_types[type_id](*message[1:])
except KeyError:
_logger.error('Unknown message type_id: %s', type_id)
except:
_logger.error('Message unpacking error: %s', message)
def unpack(self, data):
"""Unpack message from string.
:param data: packed message data as a string
:return: message
"""
_logger.debug("Unpacking message (length: %d)", len(data))
message = self.s_adapter.unpack(data)
if message is not None:
return self._process_message(message)
else:
_logger.error('Data corrupted')
_logger.debug('Data: %r', data)
def unpack_all(self, data, context):
"""Feed unpacker with data from stream and unpack all messages.
:param data: packed message(s) data as a string
:param context: object previously prepared with :meth:`reset_context`
:return: iterator over messages
"""
_logger.debug("Unpacking data (length: %d)", len(data))
context._unpacker.feed(data)
try:
for message in context._unpacker:
yield self._process_message(message)
except:
_logger.error('Data corrupted')
self._reset_unpacker() # prevent from corrupting next data
return
def get_by_name(self, name):
"""Returns message class with given name.
:param name: name of message
:return: message class (namedtuple)
"""
return self._message_names[name]
def get_by_type(self, type_id):
"""Returns message class with given type_id.
:param type_id: type identifier of message
:return: message class (namedtuple)
"""
return self._message_types[type_id]
def get_params(self, message_cls):
"""Return tuple containing type_id, and sending keyword arguments
:param message_cls: message class created by register
:return: int, dict
"""
return self._message_params[message_cls]
def get_hash(self):
"""Calculate and return hash.
Hash depends on registered messages and used serializing library.
:return: int
"""
if self._frozen:
if self._hash is None:
ids = self._message_types.keys()
ids.sort()
l = list()
a = getattr(self.s_adapter, 'selected_adapter', self.s_adapter)
l.append(a.__name__)
for i in ids:
p = self._message_types[i]
l.append((i, p.__name__, p._fields))
# should be the same on 32 & 64 platforms
self._hash = hash(tuple(l)) & 0xffffffff
return self._hash
else:
_logger.warning('Attempt to get hash of not frozen MessageFactory')
示例7: __init__
# 需要导入模块: from weakref import WeakValueDictionary [as 别名]
# 或者: from weakref.WeakValueDictionary import keys [as 别名]
class Entity:
def __init__(self, entType, entValue, entField):
if isinstance(entField, Field):
self.type = entType
self.value = entValue
self.field = entField
self.group = None
self.links = WeakValueDictionary() # dict of linked entities
self.field.registerEntity(self) # update the entity registry
else:
raise TypeError("Invalid field argument, field instance expected!")
def linkTo(self, eTwo):
''' Linking operation is bi-directional, affects both entities equally.'''
# check if entities not already linked
if Edge.linkId(self, eTwo) not in self.links.keys():
# update both entities' list of links
# create a new edge
newlink = Edge(self, eTwo, self.field)
self.links[newlink.id] = eTwo
eTwo.links[newlink.id] = self
# case when the first entity's group is not set
if self.group is None:
# assuming the second entity has already a group assigned
try:
eTwo.group.addMember(self)
# except the second entity has no group
except AttributeError:
newGroup = Group(self.field)
newGroup.addMember(self)
newGroup.addMember(eTwo)
# case when the first entity's group is set, but the second entity's is not
elif eTwo.group is None:
self.group.addMember(eTwo)
# case when both entities have groups set and they are different groups
elif self.group.name != eTwo.group.name:
if self.group.size > eTwo.group.size:
# first group wins
self.group.annexMembers(eTwo.group)
else:
# second group wins
eTwo.group.annexMembers(self.group)
def getLinks(self):
''' Print the list of entities directly linked.'''
return self.links.values()
def removeLink(self, eTwo):
''' Remove linked entity.'''
linkId = Edge.linkId(self, eTwo)
self.links.pop(linkId)
def __repr__(self):
return repr(self.value)
def __del__(self):
''' Delete itself from linked entities, and delete links.'''
# remove link from linked entity necessary? no because it's a weaklink
for linkId in self.links.keys():
self.field.eliminateEdge(linkId)
del self
示例8: PersistentDict
# 需要导入模块: from weakref import WeakValueDictionary [as 别名]
# 或者: from weakref.WeakValueDictionary import keys [as 别名]
class PersistentDict(object):
"""
Mapping object that is persistently stored
:param store_uri: URI for storing buckets; see :py:class:`~BaseBucketStore`
:type store_uri: :py:class:`str`
:param bucket_count: number of buckets to use for storing data
:type bucket_count: :py:class:`int`
:param bucket_salt: salt for finding buckets to store data
:type bucket_salt: :py:class:`int`
:param cache_size: number of buckets to LRU-cache in memory
:type cache_size: :py:class:`int`
:param cache_keys: whether to cache all keys in memory
:type cache_keys: :py:class:`bool`
"""
persistent_defaults = {
'bucket_count': 32,
'bucket_salt': 0,
}
def __init__(self, store_uri, bucket_count=NOTSET, bucket_salt=NOTSET, cache_size=3, cache_keys=True):
self._bucket_store = BaseBucketStore.from_uri(store_uri=store_uri, default_scheme='file')
# set empty fields
self._bucket_count = None
self._bucket_salt = None
self._bucket_keys = set()
self.bucket_key_fmt = None
self._keys_cache = None
self._bucket_cache = None
self._cache_size = None
# load current settings
try:
for attr, value in self._bucket_store.fetch_head().items():
setattr(self, attr, value)
self._update_bucket_key_fmt()
except BucketNotFound:
pass
# apply new settings
self.bucket_count = bucket_count
self.bucket_salt = bucket_salt
# LRU store for objects fetched from disk
self.cache_size = cache_size
# weakref store for objects still in use
self._active_buckets = WeakValueDictionary()
self._active_items = WeakValueDictionary()
# store new settings
self._store_head()
# cache keys in memory
self.cache_keys = cache_keys
@property
def store_uri(self):
return self._bucket_store.store_uri
# Settings
def _store_head(self):
"""
Store the meta-information of the dict
"""
self._bucket_store.store_head({
attr: getattr(self, attr) for attr in
# work directly on internal values, setters are called as part of init for finalization
('_bucket_count', '_bucket_salt', '_bucket_keys')
})
def _bucket_fmt_digits(self, bucket_count=None):
"""Return the number of hex digits required for the bucket name"""
bucket_count = bucket_count or self._bucket_count
return max(int(math.ceil(math.log(bucket_count, 16))), 1)
# exposed settings
@property
def cache_size(self):
return self._cache_size
@cache_size.setter
def cache_size(self, value):
self._cache_size = int(value or 1)
self._bucket_cache = deque(maxlen=self.cache_size)
@property
def bucket_salt(self):
"""
Get/Set the ``bucket_salt`` of the persistent mapping
:note: Setting ``bucket_salt`` causes **all** buckets storing data to be
recreated. Until the new buckets have been created, changes to the
mapping content may be silently dropped.
"""
return self._bucket_salt
@bucket_salt.setter
def bucket_salt(self, value):
# default if unset
if value == NOTSET:
if self._bucket_salt is not None:
return
self._bucket_salt = self.persistent_defaults['bucket_salt']
#.........这里部分代码省略.........