本文整理匯總了Python中heapq.nlargest方法的典型用法代碼示例。如果您正苦於以下問題:Python heapq.nlargest方法的具體用法?Python heapq.nlargest怎麽用?Python heapq.nlargest使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類heapq
的用法示例。
在下文中一共展示了heapq.nlargest方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: get_summary
# 需要導入模塊: import heapq [as 別名]
# 或者: from heapq import nlargest [as 別名]
def get_summary(self, number_of_sentences=5):
'''
generates summary based on weighted word frequencies
:param number_of_sentences: total number of sentences to return in summary
:return: string of summary
'''
sentence_value = {}
for sentence in self.__sentence:
for word in self.__word_freq.keys():
if word in word_tokenize(sentence.lower()):
if sentence in sentence_value:
sentence_value[sentence] += self.__word_freq.get(word)
else:
sentence_value[sentence] = self.__word_freq.get(word, 0)
summary_sentences = heapq.nlargest(number_of_sentences, sentence_value, key=sentence_value.get)
summary = ' '.join(summary_sentences)
return summary
示例2: largest_export_versions
# 需要導入模塊: import heapq [as 別名]
# 或者: from heapq import nlargest [as 別名]
def largest_export_versions(n):
"""Creates a filter that keeps the largest n export versions.
Args:
n: number of versions to keep.
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
heap = []
for idx, path in enumerate(paths):
if path.export_version is not None:
heapq.heappush(heap, (path.export_version, idx))
keepers = [paths[i] for _, i in heapq.nlargest(n, heap)]
return sorted(keepers)
return keep
示例3: build_dictionary
# 需要導入模塊: import heapq [as 別名]
# 或者: from heapq import nlargest [as 別名]
def build_dictionary(sentences, size):
"""
Create dictionary containing most frequent words in the sentences
:param sentences: sequence of sentence that contains words
Caution: the sequence might be exhausted after calling this function!
:param size: size of dictionary you want
:return: dictionary that maps word to index (starting from 1)
"""
dictionary = defaultdict(int)
for sentence in sentences:
for token in sentence:
dictionary[token] += 1
frequent_pairs = nlargest(size, dictionary.items(), itemgetter(1))
words, frequencies = zip(*frequent_pairs)
result = {word: index + 1 for index, word in enumerate(words)}
return result
示例4: select_slaves
# 需要導入模塊: import heapq [as 別名]
# 或者: from heapq import nlargest [as 別名]
def select_slaves(self, candidates, P_U_S, class_vector, X):
R = []
for candidate in candidates:
aggregates = []
for other in P_U_S:
if not _tree_contains(candidate[1], other):
aggregates.extend([ Inner(a, [ candidate[1], other ]) for a in OPERATORS ])
R.extend(_select_candidates(aggregates, self.num_slaves, class_vector, self.similarity_measure, X))
R = sorted(R, key=lambda x: x[0])
RR = []
used_nodes = set()
for candidate in R:
inner_node = candidate[1]
found = False
for tree in inner_node.branches_:
if tree in used_nodes:
found = True
if not found:
used_nodes.update(inner_node.branches_)
RR.append(candidate)
return heapq.nlargest(self.num_slaves, RR, key=lambda x: x[0])
示例5: get_onehot_label_topk
# 需要導入模塊: import heapq [as 別名]
# 或者: from heapq import nlargest [as 別名]
def get_onehot_label_topk(scores, top_num=1):
"""
Get the predicted onehot labels based on the topK number.
Args:
scores: The all classes predicted scores provided by network
top_num: The max topK number (default: 5)
Returns:
predicted_onehot_labels: The predicted labels (onehot)
"""
predicted_onehot_labels = []
scores = np.ndarray.tolist(scores)
for score in scores:
onehot_labels_list = [0] * len(score)
max_num_index_list = list(map(score.index, heapq.nlargest(top_num, score)))
for i in max_num_index_list:
onehot_labels_list[i] = 1
predicted_onehot_labels.append(onehot_labels_list)
return predicted_onehot_labels
示例6: tag
# 需要導入模塊: import heapq [as 別名]
# 或者: from heapq import nlargest [as 別名]
def tag(self, data):
now = [(('BOS', 'BOS'), 0.0, [])]
for w in data:
stage = {}
samples = self.status
if w in self.word:
samples = self.word[w]
for s in samples:
wd = log(self.wd.get((s, w))[1])-log(self.uni.get(s)[1])
for pre in now:
p = pre[1]+wd+self.trans[(pre[0][0], pre[0][1], s)]
if (pre[0][1], s) not in stage or p > stage[(pre[0][1],
s)][0]:
stage[(pre[0][1], s)] = (p, pre[2]+[s])
stage = list(map(lambda x: (x[0], x[1][0], x[1][1]), stage.items()))
now = heapq.nlargest(self.N, stage, key=lambda x: x[1])
now = heapq.nlargest(1, stage, key=lambda x: x[1]+self.geteos(x[0][1]))
return zip(data, now[0][2])
示例7: eval_one_rating
# 需要導入模塊: import heapq [as 別名]
# 或者: from heapq import nlargest [as 別名]
def eval_one_rating(idx):
# obtain test items and suers
rating = _testRatings[idx]
items = _testNegatives[idx]
u = rating[0]
gtItem = rating[1]
items.append(gtItem)
users = np.full(len(items), u, dtype='int32')
# obtain prediction scores
map_item_score = {}
predictions = _model.predict([users, np.array(items)], batch_size=100, verbose=0)
for i in range(len(items)):
item = items[i]
map_item_score[item] = predictions[i]
items.pop()
# evaluate topk list
ranklist = heapq.nlargest(_K, map_item_score, key=map_item_score.get)
hr = getHitRatio(ranklist, gtItem)
ndcg = getNDCG(ranklist, gtItem)
return (hr, ndcg)
示例8: estimate
# 需要導入模塊: import heapq [as 別名]
# 或者: from heapq import nlargest [as 別名]
def estimate(self, u, i):
if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)):
raise PredictionImpossible('User and/or item is unknown.')
x, y = self.switch(u, i)
neighbors = [(self.sim[x, x2], r) for (x2, r) in self.yr[y]]
k_neighbors = heapq.nlargest(self.k, neighbors, key=lambda t: t[0])
# compute weighted average
sum_sim = sum_ratings = actual_k = 0
for (sim, r) in k_neighbors:
if sim > 0:
sum_sim += sim
sum_ratings += sim * r
actual_k += 1
if actual_k < self.min_k:
raise PredictionImpossible('Not enough neighbors.')
est = sum_ratings / sum_sim
details = {'actual_k': actual_k}
return est, details
示例9: _get_top_eigen_vectors
# 需要導入模塊: import heapq [as 別名]
# 或者: from heapq import nlargest [as 別名]
def _get_top_eigen_vectors(data: ndarray, n_components: int) -> ndarray:
"""The eigen vectors according to top n_components large eigen values.
Arguments:
data {ndarray} -- Training data.
n_components {int} -- Number of components to keep.
Returns:
ndarray -- eigen vectors with shape(n_cols, n_components).
"""
# Calculate eigen values and eigen vectors of covariance matrix.
eigen_values, eigen_vectors = eig(data)
# The indexes of top n_components large eigen values.
_indexes = heapq.nlargest(n_components, enumerate(eigen_values),
key=lambda x: x[1])
indexes = [x[0] for x in _indexes]
return eigen_vectors[:, indexes]
示例10: top
# 需要導入模塊: import heapq [as 別名]
# 或者: from heapq import nlargest [as 別名]
def top(self, num, key=None):
"""
Get the top N elements from an RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
.. note:: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
示例11: most_common
# 需要導入模塊: import heapq [as 別名]
# 或者: from heapq import nlargest [as 別名]
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
示例12: most_common
# 需要導入模塊: import heapq [as 別名]
# 或者: from heapq import nlargest [as 別名]
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abracadabra').most_common(3)
[('a', 5), ('r', 2), ('b', 2)]
'''
if n is None:
return sorted(self.iteritems(), key=itemgetter(1), reverse=True)
return nlargest(n, self.iteritems(), key=itemgetter(1))
示例13: nearest
# 需要導入模塊: import heapq [as 別名]
# 或者: from heapq import nlargest [as 別名]
def nearest(self, v, n=10, exclude=None, candidates=None):
"""Return nearest n words and similarities for given word or vector,
excluding given words.
If v is a string, look up the corresponding word vector.
If exclude is None and v is a string, exclude v.
If candidates is not None, only consider (word, vector)
values from iterable candidates.
Return value is a list of (word, similarity) pairs.
"""
if isinstance(v, StringTypes):
v, w = self.word_to_unit_vector(v), v
else:
v, w = v/numpy.linalg.norm(v), None
if exclude is None:
exclude = [] if w is None else set([w])
if not self._normalized:
sim = partial(self._item_similarity, v=v)
else:
sim = partial(self._item_similarity_normalized, v=v)
if candidates is None:
candidates = self.word_to_vector_mapping().iteritems()
nearest = heapq.nlargest(n+len(exclude), candidates, sim)
wordsim = [(p[0], sim(p)) for p in nearest if p[0] not in exclude]
return wordsim[:n]
示例14: most_common
# 需要導入模塊: import heapq [as 別名]
# 或者: from heapq import nlargest [as 別名]
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.iteritems(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.iteritems(), key=_itemgetter(1))
示例15: reg_get_last_modified
# 需要導入模塊: import heapq [as 別名]
# 或者: from heapq import nlargest [as 別名]
def reg_get_last_modified(self, hive_name, count = 1, user = None, start = None, end = None, reg = False):
'''
Wrapper function using reg_get_all_keys. These functions can take a WHILE since all
subkeys have to be collected before you can compare lastwrite times.
'''
data = nlargest(count, self.reg_get_all_keys(hive_name, user, start, end, reg))
if reg:
for t, regname, name in data:
yield (t, regname, name)
else:
for t, name in data:
yield (t, name)