本文整理汇总了Python中heapq.nsmallest方法的典型用法代码示例。如果您正苦于以下问题:Python heapq.nsmallest方法的具体用法?Python heapq.nsmallest怎么用?Python heapq.nsmallest使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类heapq
的用法示例。
在下文中一共展示了heapq.nsmallest方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _similar_names
# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nsmallest [as 别名]
def _similar_names(owner, attrname, distance_threshold, max_choices):
"""Given an owner and a name, try to find similar names
The similar names are searched given a distance metric and only
a given number of choices will be returned.
"""
possible_names = []
names = _node_names(owner)
for name in names:
if name == attrname:
continue
distance = _string_distance(attrname, name)
if distance <= distance_threshold:
possible_names.append((name, distance))
# Now get back the values with a minimum, up to the given
# limit or choices.
picked = [name for (name, _) in
heapq.nsmallest(max_choices, possible_names,
key=operator.itemgetter(1))]
return sorted(picked)
示例2: search
# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nsmallest [as 别名]
def search(self, text, num=20):
'''
Recommend some experts in the given text's field.
:param text: The text.
:param num: The number of the recommended experts.
:return: A list of dictionaries:
{
'id': The expert's ID in AMiner(http://www.aminer.cn/),
'url': The expert's AMiner homepage.
'L2 distance': Similarity. The smaller the L2 distance is , the more likely the expert is interested in the given text's field.
}
'''
vec = self.doc2vec(text)
dist_mat = self._index_mat - vec.T
dist = np.linalg.norm(dist_mat, axis=1)
ret = [{
'id': self._id2person[i],
'url': self.base_url.format(self._id2person[i]),
'L2 distance': d
} for i, d in enumerate(dist)]
return heapq.nsmallest(num, ret, lambda x: x['L2 distance'])
示例3: pickBestPatch
# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nsmallest [as 别名]
def pickBestPatch(self, ty, tx, coords):
"""Iterate over a random selection of patches (e.g. 100) and pick a random
sample of the best (e.g. top 5). Distance metric is used to rank the patches.
"""
results = []
for sy, sx in random.sample(list(coords), min(len(coords), PATCH_COUNT)):
d = self.D(sy, sx, ty, tx)
heapq.heappush(results, (d, len(results), (sy,sx)))
# Some unlucky cases with special images cause no patches to be found
# at all, in this case we just bail out.
if not results:
return -1, -1
choices = heapq.nsmallest(BEST_COUNT, results)
return random.choice(choices)[2]
示例4: prune_conv_layers
# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nsmallest [as 别名]
def prune_conv_layers(self, num=1):
"""Prune one conv2d filter.
"""
self.register_conv_hooks()
before_loss, before_accuracy = self.train_fun(self.model)
ranks = []
for path, output in self.outputs.items():
output = output.data
grad = self.grads[path].data
v = grad * output
v = v.sum(0).sum(1).sum(1) # sum to the channel axis.
v = torch.abs(v)
v = v / torch.sqrt(torch.sum(v * v)) # normalize
for i, e in enumerate(v):
ranks.append((path, i, e))
to_prune = nsmallest(num, ranks, key=lambda t: t[2])
to_prune = sorted(to_prune, key=lambda t: (t[0], -t[1])) # prune the filters with bigger indexes first to avoid rearrangement.
for path, filter_index, value in to_prune:
self.remove_conv_filter(path, filter_index)
self.deregister_hooks()
after_loss, after_accuracy = self.train_fun(self.model)
return after_loss - before_loss, after_accuracy - before_accuracy
示例5: prune_linear_layers
# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nsmallest [as 别名]
def prune_linear_layers(self, num=1):
self.register_linear_hooks()
before_loss, before_accuracy = self.train_fun(self.model)
ranks = []
for path, output in self.outputs.items():
output = output.data
grad = self.grads[path].data
v = grad * output
v = v.sum(0) # sum to the channel axis.
v = torch.abs(v)
v = v / torch.sqrt(torch.sum(v * v)) # normalize
for i, e in enumerate(v):
ranks.append((path, i, e))
to_prune = nsmallest(num, ranks, key=lambda t: t[2])
to_prune = sorted(to_prune, key=lambda t: (t[0], -t[1]))
for path, feature_index, value in to_prune:
self.remove_linear_feature(path, feature_index)
self.deregister_hooks()
after_loss, after_accuracy = self.train_fun(self.model)
return after_loss - before_loss, after_accuracy - before_accuracy
示例6: takeOrdered
# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nsmallest [as 别名]
def takeOrdered(self, num, key=None):
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
示例7: search
# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nsmallest [as 别名]
def search(lattice, ngrams, queues, beam_size, viterbi_size):
for i in range(len(lattice)):
for j in range(len(lattice[i])):
for target, source in lattice[i][j]:
word_queue = []
for previous_cost, previous_history in queues[j]:
history = previous_history + [(target, source)]
cost = previous_cost + get_ngram_cost(ngrams, tuple(history[-3:]))
hypothesis = (cost, history)
word_queue.append(hypothesis)
# prune word_queue to viterbi size
if viterbi_size > 0:
word_queue = heapq.nsmallest(viterbi_size, word_queue, key=operator.itemgetter(0))
queues[i] += word_queue
# prune queues[i] to beam size
if beam_size > 0:
queues[i] = heapq.nsmallest(beam_size, queues[i], key=operator.itemgetter(0))
return queues
示例8: update
# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nsmallest [as 别名]
def update(self, key=None, l_priority=None, full_sort=False, keep_top_k=None):
"""
Sort the petridish queue using func
Args
func : a function that maps from (idx, petridish_queue_entry) to a float; the default
is lambda i, _ : i, which means we follow the FIFO order.
"""
if len(self.entries) == 0:
return
assert bool(key) != bool(l_priority), "only one option should be used for updating priority"
if key:
for i in range(self.size()):
self.entries[i][IDX_PV] = key(self.entries[i][IDX_PQE])
else:
for i in range(self.size()):
self.entries[i][IDX_PV] = l_priority[i]
if full_sort:
self.entries.sort()
if keep_top_k is not None:
self.entries[keep_top_k:] = []
elif keep_top_k is not None:
self.entries = heapq.nsmallest(keep_top_k, self.entries)
else:
self._update()
示例9: kNN_entity
# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nsmallest [as 别名]
def kNN_entity(self, vec, topk=10, method=0, self_vec_id=None):
q = []
for i in range(len(self.vec_e)):
#skip self
if self_vec_id != None and i == self_vec_id:
continue
if method == 1:
dist = SP.distance.cosine(vec, self.vec_e[i])
else:
dist = LA.norm(vec - self.vec_e[i])
if len(q) < topk:
HP.heappush(q, self.index_dist(i, dist))
else:
#indeed it fetches the biggest
tmp = HP.nsmallest(1, q)[0]
if tmp.dist > dist:
HP.heapreplace(q, self.index_dist(i, dist) )
rst = []
while len(q) > 0:
item = HP.heappop(q)
rst.insert(0, (self.vocab_e[self.vec2e[item.index]], item.dist))
return rst
#given entity name, find kNN
示例10: kNN_relation
# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nsmallest [as 别名]
def kNN_relation(self, vec, topk=10, method=0, self_vec_id=None):
q = []
for i in range(len(self.vec_r)):
#skip self
if self_vec_id != None and i == self_vec_id:
continue
if method == 1:
dist = SP.distance.cosine(vec, self.vec_r[i])
else:
dist = LA.norm(vec - self.vec_r[i])
if len(q) < topk:
HP.heappush(q, self.index_dist(i, dist))
else:
#indeed it fetches the biggest
tmp = HP.nsmallest(1, q)[0]
if tmp.dist > dist:
HP.heapreplace(q, self.index_dist(i, dist) )
rst = []
while len(q) > 0:
item = HP.heappop(q)
rst.insert(0, (self.vocab_r[self.vec2r[item.index]], item.dist))
return rst
#given relation name, find kNN
示例11: new_wrapper
# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nsmallest [as 别名]
def new_wrapper(cls, func, cache):
'''Create a new wrapper that will determine the correct function to call.'''
# define the wrapper...
def F(*arguments, **keywords):
heap = [res for _, res in heapq.nsmallest(len(cache), cache)]
f, (a, w, k) = cls.match((arguments[:], keywords), heap)
return f(*arguments, **keywords)
#return f(*(arguments + tuple(w)), **keywords)
# swap out the original code object with our wrapper's
f, c = F, F.func_code
cargs = c.co_argcount, c.co_nlocals, c.co_stacksize, c.co_flags, \
c.co_code, c.co_consts, c.co_names, c.co_varnames, \
c.co_filename, '.'.join((func.__module__, func.func_name)), \
c.co_firstlineno, c.co_lnotab, c.co_freevars, c.co_cellvars
newcode = types.CodeType(*cargs)
res = types.FunctionType(newcode, f.func_globals, f.func_name, f.func_defaults, f.func_closure)
res.func_name, res.func_doc = func.func_name, func.func_doc
# assign the specified cache to it
setattr(res, cls.cache_name, cache)
# ...and finally add a default docstring
setattr(res, '__doc__', '')
return res
示例12: lookup
# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nsmallest [as 别名]
def lookup(self, key: Any, func: Callable[[Any], Any]) -> Any:
""" Lookup a key in the cache, calling func(key)
to obtain the data if not already there """
with self.lock:
self.use_count[key] += 1
# Get cache entry or compute if not found
try:
result = self.cache[key]
self.hits += 1
except KeyError:
result = func(key)
self.cache[key] = result
self.misses += 1
# Purge the 10% least frequently used cache entries
if len(self.cache) > self.maxsize:
for key, _ in nsmallest(self.maxsize // 10,
self.use_count.items(), key = itemgetter(1)):
del self.cache[key], self.use_count[key]
return result
示例13: lowest_ranking_filters
# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nsmallest [as 别名]
def lowest_ranking_filters(self, num):
data_1 = []
for i in sorted(self.filter_ranks_1.keys()):
for j in range(self.filter_ranks_1[i].size(0)):
data_1.append((self.activation_to_layer_1[i], j, self.filter_ranks_1[i][j]))
data_2 = []
for i in sorted(self.filter_ranks_2.keys()):
for j in range(self.filter_ranks_2[i].size(0)):
data_2.append((self.activation_to_layer_2[i], j, self.filter_ranks_2[i][j]))
data_3 = []
data_3.extend(data_1)
data_3.extend(data_2)
dic = {}
c = nsmallest(num*2, data_3, itemgetter(2))
for i in range(len(c)):
nm = str(c[i][0]) + '_' + str(c[i][1])
if dic.get(nm)!=None:
dic[nm] = min(dic[nm], c[i][2].item())
else:
dic[nm] = c[i][2].item()
newc = []
for i in range(len(list(dic.items()))):
lyer = int(list(dic.items())[i][0].split('_')[0])
filt = int(list(dic.items())[i][0].split('_')[1])
val = torch.tensor(list(dic.items())[i][1])
newc.append((lyer, filt, val))
return nsmallest(num, newc, itemgetter(2))
示例14: _similar_names
# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nsmallest [as 别名]
def _similar_names(owner, attrname, distance_threshold, max_choices):
"""Given an owner and a name, try to find similar names
The similar names are searched given a distance metric and only
a given number of choices will be returned.
"""
possible_names = []
names = _node_names(owner)
for name in names:
if name == attrname:
continue
distance = _string_distance(attrname, name)
if distance <= distance_threshold:
possible_names.append((name, distance))
# Now get back the values with a minimum, up to the given
# limit or choices.
picked = [
name
for (name, _) in heapq.nsmallest(
max_choices, possible_names, key=operator.itemgetter(1)
)
]
return sorted(picked)
示例15: _gen_loopblocking_perprocess
# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nsmallest [as 别名]
def _gen_loopblocking_perprocess(
nested_loop_desc, resource, bufshr, constraint, cost, options,
gen_tifm, gen_tofm, gen_tbat, gen_ords):
def _gen_bl_ts():
'''
Generator for blocking factors.
Transpose LoopEnum-major to BL-major.
'''
gen_lp_ts = [None] * le.NUM
gen_lp_ts[le.IFM], gen_lp_ts[le.OFM], gen_lp_ts[le.BAT] = \
constraint.filter_gen_ts(gen_tifm, gen_tofm, gen_tbat)
for lp_ts in itertools.product(*gen_lp_ts):
bl_ts = tuple(zip(*lp_ts))
yield bl_ts
def _sweep():
''' Sweep all. '''
is_conv_loops = (nested_loop_desc.data_loops == ConvLayer.data_loops())
for bl_ts, bl_ords in itertools.product(_gen_bl_ts(), gen_ords):
if is_conv_loops and skip_conv(bl_ts, bl_ords):
continue
if not constraint.is_valid_top_bl(bl_ts[0], bl_ords[0]):
continue
lbs = LoopBlockingScheme(
nested_loop_desc, bl_ts, bl_ords, resource, bufshr,
options)
yield lbs
return heapq.nsmallest(options.ntops, _sweep(),
key=_loop_blocking_cmp_key(options, cost))