本文整理汇总了Python中lru.LRU.values方法的典型用法代码示例。如果您正苦于以下问题:Python LRU.values方法的具体用法?Python LRU.values怎么用?Python LRU.values使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lru.LRU
的用法示例。
在下文中一共展示了LRU.values方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from lru import LRU [as 别名]
# 或者: from lru.LRU import values [as 别名]
class topic4:
def __init__(self, c_hash, c_user, c_words):
self.topic_count =1
self.l1 = LRU(c_hash)
self.l2 = LRU(c_user)
def set_hashLRU(self,l):
self.set(self.l1, l)
def set_userLRU(self,l):
self.set(self.l2, l)
def set(self, lru, l):
for k in l:
v = lru.get(k,0)
lru[k]=v+1
def set_cluster(self, hashtags, users, words):
for k in hashtags:
self.l1[k]=self.l1.get(k,0)+1
for k in users:
self.l2[k]=self.l2.get(k,0)+1
self.topic_count+=1
def get_similarity(self,hashtags,users,words):
h_sum = 1
u_sum = 1
w_sum = 1
h_match =0
h_ind =0
u_ind =0
w_ind =0
c=0
h1 = self.l1.get_size()
u1 = self.l2.get_size()
for h in hashtags:
# l1_items=zip(*self.l1.items())
h_sum+= self.l1.get(h,0)
if(self.l1.has_key(h)):
ind = self.l1.keys().index(h)
h_ind+= h1 - ind
h_match+= 1 if ind<250 else 0
for u in users:
u_sum+= self.l2.get(u,0)
if(self.l2.has_key(u)):
u_ind+= u1 - self.l2.keys().index(u)
if(h_match !=0):
c = h_match -1
# print(h_ind,h1,u_ind,u1,w_ind,w1, h_sum,w_sum,)
similarity = (h_ind/(h1+1))*(h_sum/sum(self.l1.values() +[1])) + (u_ind/(u1+1))*(u_sum/sum(self.l2.values()+[1])) +c
return similarity
示例2: test_callback
# 需要导入模块: from lru import LRU [as 别名]
# 或者: from lru.LRU import values [as 别名]
def test_callback(self):
counter = [0]
first_key = 'a'
first_value = 1
def callback(key, value):
self.assertEqual(key, first_key)
self.assertEqual(value, first_value)
counter[0] += 1
l = LRU(1, callback=callback)
l[first_key] = first_value
l['b'] = 1 # test calling the callback
self.assertEqual(counter[0], 1)
self.assertEqual(l.keys(), ['b'])
l['b'] = 2 # doesn't call callback
self.assertEqual(counter[0], 1)
self.assertEqual(l.keys(), ['b'])
self.assertEqual(l.values(), [2])
l = LRU(1, callback=callback)
l[first_key] = first_value
l.set_callback(None)
l['c'] = 1 # doesn't call callback
self.assertEqual(counter[0], 1)
self.assertEqual(l.keys(), ['c'])
l.set_callback(callback)
del l['c'] # doesn't call callback
self.assertEqual(counter[0], 1)
self.assertEqual(l.keys(), [])
l = LRU(2, callback=callback)
l['a'] = 1 # test calling the callback
l['b'] = 2 # test calling the callback
self.assertEqual(counter[0], 1)
self.assertEqual(l.keys(), ['b', 'a'])
l.set_size(1)
self.assertEqual(counter[0], 2) # callback invoked
self.assertEqual(l.keys(), ['b'])
示例3: test_empty
# 需要导入模块: from lru import LRU [as 别名]
# 或者: from lru.LRU import values [as 别名]
def test_empty(self):
l = LRU(1)
self.assertEquals([], l.keys())
self.assertEquals([], l.values())
示例4: __init__
# 需要导入模块: from lru import LRU [as 别名]
# 或者: from lru.LRU import values [as 别名]
class topic4:
def __init__(self, c_hash, c_user, c_words):
self.topic_count =1
# self.time = (self.first,self.last)
self.l1 = LRU(c_hash)
self.first =""
self.last=""
self.lats=[]
self.longs=[]
self.l2 = LRU(c_user)
self.l3 = LRU(c_words)
self.l4 = LRU(400)
def set_hashLRU(self,l):
self.set(self.l1, l)
def set_userLRU(self,l):
self.set(self.l2, l)
def set_wordLRU(self,l):
self.set(self.l3, l)
def set(self, lru, l):
for k in l:
v = lru.get(k,0)
lru[k]=v+1
def set_cluster(self, hashtags, users, words,links, cords):
for k in hashtags:
self.l1[k]=self.l1.get(k,0)+1
for k in users:
self.l2[k]=self.l2.get(k,0)+1
for k in words:
self.l3[k]=self.l3.get(k,0)+1
for k in links:
self.l4[k]=self.l4.get(k,0)+1
if(cords is not None):
self.lats.append(cords["coordinates"][1])
self.longs.append(cords["coordinates"][0])
self.topic_count+=1
def get_similarity(self,hashtags,users,words):
h_sum = 1
u_sum = 1
w_sum = 1
h_match =0
h_ind =0
u_ind =0
w_ind =0
c=0
h1 = self.l1.get_size()
u1 = self.l2.get_size()
w1 = self.l3.get_size()
for h in hashtags:
# l1_items=zip(*self.l1.items())
h_sum+= self.l1.get(h,0)
if(self.l1.has_key(h)):
ind = self.l1.keys().index(h)
h_ind+= h1 - ind
h_match+= 1 if ind<250 else 0
for u in users:
u_sum+= self.l2.get(u,0)
if(self.l2.has_key(u)):
u_ind+= u1 - self.l2.keys().index(u)
for w in words:
w_sum+= self.l3.get(w,0)
if(self.l3.has_key(w)):
w_ind+= w1 - self.l3.keys().index(w)
if(h_match !=0):
c = h_match -1
# print(h_ind,h1,u_ind,u1,w_ind,w1, h_sum,w_sum,)
similarity = (h_ind/(h1+1))*(h_sum/sum(self.l1.values() +[1])) + (u_ind/(u1+1))*(u_sum/sum(self.l2.values()+[1])) + (w_ind/(w1+1))*(w_sum/sum(self.l3.values()+[1])) +c
return similarity
def flush1(self, cache, size):
if(len(cache.keys())>5):
tokens = reversed(cache.keys()[5])
cache.clear()
for i in tokens:
cache[i]=1
def flush(self):
self.flush1(self.l1,500)
self.flush1(self.l2, 500)
self.flush1(self.l3,3500)
self.topic_count=1
示例5: FCP
# 需要导入模块: from lru import LRU [as 别名]
# 或者: from lru.LRU import values [as 别名]
class FCP(BaseTask):
def __init__(self, circle, src, dest,
treewalk=None,
totalsize=0,
hostcnt=0,
prune=False,
verify=False,
resume=False,
workq=None):
BaseTask.__init__(self, circle)
self.circle = circle
self.treewalk = treewalk
self.totalsize = totalsize
self.prune = prune
self.workq = workq
self.resume = resume
self.checkpoint_file = None
self.src = src
self.dest = os.path.abspath(dest)
# cache, keep the size conservative
# TODO: we need a more portable LRU size
if hostcnt != 0:
max_ofile, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
procs_per_host = self.circle.size / hostcnt
self._read_cache_limit = ((max_ofile - 64) / procs_per_host) / 3
self._write_cache_limit = ((max_ofile - 64) / procs_per_host) * 2 / 3
if self._read_cache_limit <= 0 or self._write_cache_limit <= 0:
self._read_cache_limit = 1
self._write_cache_limit = 8
self.rfd_cache = LRU(self._read_cache_limit)
self.wfd_cache = LRU(self._write_cache_limit)
self.cnt_filesize_prior = 0
self.cnt_filesize = 0
self.blocksize = 1024 * 1024
self.chunksize = 1024 * 1024
# debug
self.d = {"rank": "rank %s" % circle.rank}
self.wtime_started = MPI.Wtime()
self.wtime_ended = None
self.workcnt = 0 # this is the cnt for the enqued items
self.reduce_items = 0 # this is the cnt for processed items
if self.treewalk:
log.debug("treewalk files = %s" % treewalk.flist, extra=self.d)
# fini_check
self.fini_cnt = Counter()
# verify
self.verify = verify
self.chunksums = []
# checkpointing
self.checkpoint_interval = sys.maxsize
self.checkpoint_last = MPI.Wtime()
if self.circle.rank == 0:
print("Start copying process ...")
def rw_cache_limit(self):
return (self._read_cache_limit, self._write_cache_limit)
def set_fixed_chunksize(self, sz):
self.chunksize = sz
def set_adaptive_chunksize(self, totalsz):
self.chunksize = utils.calc_chunksize(totalsz)
if self.circle.rank == 0:
print("Adaptive chunksize: %s" % bytes_fmt(self.chunksize))
def cleanup(self):
for f in self.rfd_cache.values():
try:
os.close(f)
except OSError as e:
pass
for f in self.wfd_cache.values():
try:
os.close(f)
except OSError as e:
pass
# remove checkpoint file
if self.checkpoint_file and os.path.exists(self.checkpoint_file):
os.remove(self.checkpoint_file)
# we need to do this because if last job didn't finish cleanly
# the fwalk files can be found as leftovers
# and if fcp cleanup has a chance, it should clean up that
fwalk = "%s/fwalk.%s" % (self.circle.tempdir, self.circle.rank)
if os.path.exists(fwalk):
os.remove(fwalk)
#.........这里部分代码省略.........