本文整理汇总了Python中sortedcontainers.SortedDict.popitem方法的典型用法代码示例。如果您正苦于以下问题:Python SortedDict.popitem方法的具体用法?Python SortedDict.popitem怎么用?Python SortedDict.popitem使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sortedcontainers.SortedDict
的用法示例。
在下文中一共展示了SortedDict.popitem方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: arrayRDP
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import popitem [as 别名]
def arrayRDP(arr, epsilon=0.0, n=None):
"""
This is a slightly modified version of the _aRDP function, that accepts
as arguments the tolerance in the distance and the maximum number of points
the algorithm can select.
**Note:** The results of this algoritm should be identical to the arrayRDP
function if the *n* parameter is not specified. In that case, the
performance is slightly worse, although the asymptotic complexity is the
same. For this reason, this function internally delegates the solution in
that function if the *n* parameter is missing.
Parameters
----------
arr:
Array of values of consecutive points.
epsilon:
Maximum difference allowed in the simplification process.
n:
Maximum number of points of the resulted simplificated array.
Returns
-------
out:
Array of indices of the selected points.
"""
if n is None:
return _aRDP(arr, epsilon)
if epsilon <= 0.0:
raise ValueError('Epsilon must be > 0.0')
n = n or len(arr)
if n < 3:
return arr
fragments = SortedDict()
#We store the distances as negative values due to the default order of
#sorteddict
dist, idx = max_vdist(arr, 0, len(arr) - 1)
fragments[(-dist, idx)] = (0, len(arr) - 1)
while len(fragments) < n-1:
(dist, idx), (first, last) = fragments.popitem(last=False)
if -dist <= epsilon:
#We have to put again the last item to prevent loss
fragments[(dist, idx)] = (first, last)
break
else:
#We have to break the fragment in the selected index
dist, newidx = max_vdist(arr, first, idx)
fragments[(-dist, newidx)] = (first, idx)
dist, newidx = max_vdist(arr, idx, last)
fragments[(-dist, newidx)] = (idx, last)
#Now we have to get all the indices in the keys of the fragments in order.
result = SortedList(i[0] for i in fragments.itervalues())
result.add(len(arr) - 1)
return np.array(result)
示例2: BaseNode
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import popitem [as 别名]
class BaseNode(object):
def __init__(self, tree):
self.tree = tree
self.bucket = SortedDict()
self.changed = False
def _split(self):
"""
Creates a new node of the same type and splits the contents of the
bucket into two parts of an equal size. The lower keys are being stored
in the bucket of the current node. The higher keys are being stored in
the bucket of the new node. Afterwards, the new node is being returned.
"""
other = self.__class__(tree=self.tree)
size = len(self.bucket)
for i in range(int(size/2)):
key, value = self.bucket.popitem()
other.bucket[key] = value
# print("New node created: " + str(other))
return LazyNode(node=other, tree=self.tree)
def _insert(self, key, value):
"""
Inserts the key and value into the bucket. If the bucket has become too
large, the node will be split into two nodes.
"""
self.bucket[key] = value
self.changed = True
# print(str(key)+" inserted into: " + str(self.bucket))
if len(self.bucket) > self.tree.max_size:
new_node = self._split()
new_node.node.changed = True
return new_node
pass
def _get_data(self):
"""
Returns the encoded data of the leaf node, containing its type, and the
key/value pairs. These values will eventually be the offsets of the
documents.
"""
# print("Leaf committed: " + str(self) + " bucketsize: " +
# str(len(self.bucket)))
data = {"type":"Leaf", "entries":self.bucket}
# print("Leaf data: "+ str(data))
return(add_integrity(encode(data)))
示例3: len
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import popitem [as 别名]
sys.stdout.flush()
start = time.clock()
while len(upwords) < l - 2:
for i in range(0,l):
if i in upwords:
continue
for j in range(i+1,l):
if j in upwords:
continue
dist = spatial.distance.cosine(a[i],a[j])
tempp[dist] = (i,j)
if len(tempp) > limit:
tempp.popitem()
dicl = len(tempp)
for i in range(0,dicl):
tmp = tempp.popitem(last=False)
if tmp[1][0] not in upwords and tmp[1][1] not in upwords:
l1.append(tmp[1])
upwords.add(tmp[1][0])
upwords.add(tmp[1][1])
print 'l1len:',len(l1)
sys.stdout.flush()
l = len(l1)
upwords = set()
while len(upwords) < l - 2:
for i in range(0,l):
示例4: test_popitem2
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import popitem [as 别名]
def test_popitem2():
temp = SortedDict()
temp.popitem()
示例5: test_popitem
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import popitem [as 别名]
def test_popitem():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert temp.popitem() == ('z', 25)
示例6: SortedList
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import popitem [as 别名]
# http://www.grantjenks.com/docs/sortedcontainers/
from sortedcontainers import SortedList
sl = SortedList(['e', 'a', 'c', 'd', 'b'])
print(sl)
sl *= 10000000
print(sl.count('c'))
print(sl[-3:])
['e', 'e', 'e']
from sortedcontainers import SortedDict
sd = SortedDict({'c': 3, 'a': 1, 'b': 2})
print(sd)
SortedDict({'a': 1, 'b': 2, 'c': 3})
print(sd.popitem())
from sortedcontainers import SortedSet
ss = SortedSet('abracadabra')
print(ss)
print(ss.bisect_left('c'))
示例7: StreamChangeCache
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import popitem [as 别名]
#.........这里部分代码省略.........
def has_entity_changed(self, entity, stream_pos):
"""Returns True if the entity may have been updated since stream_pos
"""
assert type(stream_pos) in integer_types
if stream_pos < self._earliest_known_stream_pos:
self.metrics.inc_misses()
return True
latest_entity_change_pos = self._entity_to_key.get(entity, None)
if latest_entity_change_pos is None:
self.metrics.inc_hits()
return False
if stream_pos < latest_entity_change_pos:
self.metrics.inc_misses()
return True
self.metrics.inc_hits()
return False
def get_entities_changed(self, entities, stream_pos):
"""
Returns subset of entities that have had new things since the given
position. Entities unknown to the cache will be returned. If the
position is too old it will just return the given list.
"""
assert type(stream_pos) is int
if stream_pos >= self._earliest_known_stream_pos:
changed_entities = {
self._cache[k] for k in self._cache.islice(
start=self._cache.bisect_right(stream_pos),
)
}
result = changed_entities.intersection(entities)
self.metrics.inc_hits()
else:
result = set(entities)
self.metrics.inc_misses()
return result
def has_any_entity_changed(self, stream_pos):
"""Returns if any entity has changed
"""
assert type(stream_pos) is int
if not self._cache:
# If we have no cache, nothing can have changed.
return False
if stream_pos >= self._earliest_known_stream_pos:
self.metrics.inc_hits()
return self._cache.bisect_right(stream_pos) < len(self._cache)
else:
self.metrics.inc_misses()
return True
def get_all_entities_changed(self, stream_pos):
"""Returns all entites that have had new things since the given
position. If the position is too old it will return None.
"""
assert type(stream_pos) is int
if stream_pos >= self._earliest_known_stream_pos:
return [self._cache[k] for k in self._cache.islice(
start=self._cache.bisect_right(stream_pos))]
else:
return None
def entity_has_changed(self, entity, stream_pos):
"""Informs the cache that the entity has been changed at the given
position.
"""
assert type(stream_pos) is int
if stream_pos > self._earliest_known_stream_pos:
old_pos = self._entity_to_key.get(entity, None)
if old_pos is not None:
stream_pos = max(stream_pos, old_pos)
self._cache.pop(old_pos, None)
self._cache[stream_pos] = entity
self._entity_to_key[entity] = stream_pos
while len(self._cache) > self._max_size:
k, r = self._cache.popitem(0)
self._earliest_known_stream_pos = max(
k, self._earliest_known_stream_pos,
)
self._entity_to_key.pop(r, None)
def get_max_pos_of_last_change(self, entity):
"""Returns an upper bound of the stream id of the last change to an
entity.
"""
return self._entity_to_key.get(entity, self._earliest_known_stream_pos)
示例8: test_popitem2
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import popitem [as 别名]
def test_popitem2():
temp = SortedDict()
with pytest.raises(KeyError):
temp.popitem()
示例9: __init__
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import popitem [as 别名]
class LayerDirector:
verboseUnits = True
def __init__(self, nClusters, nTotalUnits, Ti, Tn, NBin_nEntries, ZF):
#schedule
self.wakeQ = SortedDict()
self.now = 0
self.ZF = ZF
self.VERBOSE = op.verboseDirector
self.nClusters = nClusters
self.Tn = Tn # it is used when assigning filters to clusters
self.nUnitsCluster = nTotalUnits / nClusters
#components
self.centralMem = simpleMemory.SimpleMemory(self, op.CM_size, op.CM_nPorts, op.CM_bytesCyclePort)
self.clusters = []
self.coordsWindow = {}
self.clustersProcWindow = {} # [windowID] -> count of clusters processing this window
self.filtersPending = {}
self.clustersReadingWindow = {}
self.output = []
for i in range(nClusters):
self.clusters.append(cluster.Cluster(self, i, self.nUnitsCluster, Ti, Tn, NBin_nEntries, op.SB_size_per_cluster, self.cbClusterDoneReading, self.cbClusterDone))
def schedule(self, entity, when = 1):
when += self.now
if when in self.wakeQ:
self.wakeQ[when].add(entity)
else:
self.wakeQ[when] = set([entity])
def wakeup(self):
if self.VERBOSE > 1: print "layerdirector, cycle ", self.now, len(self.wakeQ)
entities = []
if len(self.wakeQ) !=0:
aux, entities = self.wakeQ.popitem(False)
if self.VERBOSE > 1: print "layerdirector, cycle ", self.now, aux, len(entities), " objects to wakeup"
self.now = aux
for obj in entities:
obj.wakeup()
##################################################################################
###
### This function copy the filter weights into the unit eDRAM
###
### weights: ndarray containing the weights of the filters
###
##################################################################################
def initializeLayer(self, weights):
self.nTotalFilters = weights.shape[0]
#how many filters have to go to each cluster
nFiltersPerCluster = self.nTotalFilters / self.nClusters
if self.VERBOSE: print "%d filters per cluster" % (nFiltersPerCluster)
# if the total number of filters is not a multiple of nClusters
nAdditionalFilters = self.nTotalFilters - (nFiltersPerCluster * self.nClusters)
if self.VERBOSE: print "plus %d additional filters"%(nAdditionalFilters)
# send the units the size of the filters so they can configure SB properly (simulation)
for i in range(self.nClusters):
self.clusters[i].initialize(weights[0].size)
##for idxFilter in range(nTotalFilters):
##self.clusters[(idxFilter / self.nClusters) % self.nClusters].fill_SB(weights[idxFilter], idxFilter)
filtersAssignedSoFar = 0
idxFilter = 0
cntCluster = 0
while filtersAssignedSoFar < nFiltersPerCluster:
cntFilterCluster = 0
listFilterData = []
listFilterIdxs = []
while cntFilterCluster < min(self.Tn, nFiltersPerCluster-filtersAssignedSoFar ):
listFilterData.append(weights[idxFilter])
listFilterIdxs.append(idxFilter)
cntFilterCluster += 1
idxFilter += 1
self.clusters[cntCluster].fill_SB(listFilterData, listFilterIdxs)
cntCluster += 1
if cntCluster == self.nClusters:
filtersAssignedSoFar += cntFilterCluster
cntCluster = 0
#print '%d %d %d %d %d'%(nFiltersPerCluster, filtersAssignedSoFar, cntFilterCluster, self.Tn, self.nClusters)
##################################################################################
# in:
# data : numpy 3D ndarray of dimensions i * Wx * Wy, i=# input features, Wx=Wy= size of input
# filters: a list with two elements, we will use the field "data" of both,
# filters[0].data = numpy 4D ndarray of dimensions N * i * Fx * Fy with the filter values
# filters[1].data = numpy 1D vector with the N biases
# N = # filters, Fx=Fy= filter size
##################################################################################
def computeConvolutionalLayer(self, data, filters, stride, padding, group):
#.........这里部分代码省略.........