当前位置: 首页>>代码示例>>Python>>正文


Python heapq.nlargest方法代码示例

本文整理汇总了Python中heapq.nlargest方法的典型用法代码示例。如果您正苦于以下问题:Python heapq.nlargest方法的具体用法?Python heapq.nlargest怎么用?Python heapq.nlargest使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在heapq的用法示例。


在下文中一共展示了heapq.nlargest方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_summary

# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nlargest [as 别名]
def get_summary(self, number_of_sentences=5):
        '''
            generates summary based on weighted word frequencies

            :param number_of_sentences: total number of sentences to return in summary
            :return: string of summary
        '''
        sentence_value = {}
        for sentence in self.__sentence:
            for word in self.__word_freq.keys():
                if word in word_tokenize(sentence.lower()):
                    if sentence in sentence_value:
                        sentence_value[sentence] += self.__word_freq.get(word)
                    else:
                        sentence_value[sentence] = self.__word_freq.get(word, 0)
        
        summary_sentences = heapq.nlargest(number_of_sentences, sentence_value, key=sentence_value.get)
        summary = ' '.join(summary_sentences)
        return summary 
开发者ID:OmkarPathak,项目名称:Django-Bookworm,代码行数:21,代码来源:summarize.py

示例2: largest_export_versions

# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nlargest [as 别名]
def largest_export_versions(n):
  """Creates a filter that keeps the largest n export versions.

  Args:
    n: number of versions to keep.

  Returns:
    A filter function that keeps the n largest paths.
  """
  def keep(paths):
    heap = []
    for idx, path in enumerate(paths):
      if path.export_version is not None:
        heapq.heappush(heap, (path.export_version, idx))
    keepers = [paths[i] for _, i in heapq.nlargest(n, heap)]
    return sorted(keepers)

  return keep 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:20,代码来源:gc.py

示例3: build_dictionary

# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nlargest [as 别名]
def build_dictionary(sentences, size):
    """
    Create dictionary containing most frequent words in the sentences
    :param sentences: sequence of sentence that contains words
        Caution: the sequence might be exhausted after calling this function!
    :param size: size of dictionary you want
    :return: dictionary that maps word to index (starting from 1)
    """
    dictionary = defaultdict(int)
    for sentence in sentences:
        for token in sentence:
            dictionary[token] += 1
    frequent_pairs = nlargest(size, dictionary.items(), itemgetter(1))
    words, frequencies = zip(*frequent_pairs)
    result = {word: index + 1 for index, word in enumerate(words)}
    return result 
开发者ID:yohokuno,项目名称:dl4nlp,代码行数:18,代码来源:preprocessing.py

示例4: select_slaves

# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nlargest [as 别名]
def select_slaves(self, candidates, P_U_S, class_vector, X):
        R = []
        for candidate in candidates:
            aggregates = []
            for other in P_U_S:
                if not _tree_contains(candidate[1], other):
                    aggregates.extend([ Inner(a, [ candidate[1], other ]) for a in OPERATORS ])

            R.extend(_select_candidates(aggregates, self.num_slaves, class_vector, self.similarity_measure, X))

        R = sorted(R, key=lambda x: x[0])

        RR = []
        used_nodes = set()
        for candidate in R:
            inner_node = candidate[1]
            found = False
            for tree in inner_node.branches_:
                if tree in used_nodes:
                    found = True
            if not found:
                used_nodes.update(inner_node.branches_)
                RR.append(candidate)

        return heapq.nlargest(self.num_slaves, RR, key=lambda x: x[0]) 
开发者ID:sorend,项目名称:fylearn,代码行数:27,代码来源:fpt.py

示例5: get_onehot_label_topk

# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nlargest [as 别名]
def get_onehot_label_topk(scores, top_num=1):
    """
    Get the predicted onehot labels based on the topK number.

    Args:
        scores: The all classes predicted scores provided by network
        top_num: The max topK number (default: 5)
    Returns:
        predicted_onehot_labels: The predicted labels (onehot)
    """
    predicted_onehot_labels = []
    scores = np.ndarray.tolist(scores)
    for score in scores:
        onehot_labels_list = [0] * len(score)
        max_num_index_list = list(map(score.index, heapq.nlargest(top_num, score)))
        for i in max_num_index_list:
            onehot_labels_list[i] = 1
        predicted_onehot_labels.append(onehot_labels_list)
    return predicted_onehot_labels 
开发者ID:RandolphVI,项目名称:Multi-Label-Text-Classification,代码行数:21,代码来源:data_helpers.py

示例6: tag

# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nlargest [as 别名]
def tag(self, data):
        now = [(('BOS', 'BOS'), 0.0, [])]
        for w in data:
            stage = {}
            samples = self.status
            if w in self.word:
                samples = self.word[w]
            for s in samples:
                wd = log(self.wd.get((s, w))[1])-log(self.uni.get(s)[1])
                for pre in now:
                    p = pre[1]+wd+self.trans[(pre[0][0], pre[0][1], s)]
                    if (pre[0][1], s) not in stage or p > stage[(pre[0][1],
                                                                 s)][0]:
                        stage[(pre[0][1], s)] = (p, pre[2]+[s])
            stage = list(map(lambda x: (x[0], x[1][0], x[1][1]), stage.items()))
            now = heapq.nlargest(self.N, stage, key=lambda x: x[1])
        now = heapq.nlargest(1, stage, key=lambda x: x[1]+self.geteos(x[0][1]))
        return zip(data, now[0][2]) 
开发者ID:bighuang624,项目名称:sentiment-analysis-webapp,代码行数:20,代码来源:tnt.py

示例7: eval_one_rating

# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nlargest [as 别名]
def eval_one_rating(idx):
    
    # obtain test items and suers
    rating = _testRatings[idx]
    items = _testNegatives[idx]
    u = rating[0]
    gtItem = rating[1]
    items.append(gtItem)
    users = np.full(len(items), u, dtype='int32')
    
    # obtain prediction scores
    map_item_score = {}
    predictions = _model.predict([users, np.array(items)], batch_size=100, verbose=0)
    for i in range(len(items)):
        item = items[i]
        map_item_score[item] = predictions[i]
    items.pop()
    
    # evaluate topk list
    ranklist = heapq.nlargest(_K, map_item_score, key=map_item_score.get)
    hr = getHitRatio(ranklist, gtItem)
    ndcg = getNDCG(ranklist, gtItem)
    return (hr, ndcg) 
开发者ID:wyl6,项目名称:Recommender-Systems-Samples,代码行数:25,代码来源:evaluate.py

示例8: estimate

# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nlargest [as 别名]
def estimate(self, u, i):

        if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)):
            raise PredictionImpossible('User and/or item is unknown.')

        x, y = self.switch(u, i)

        neighbors = [(self.sim[x, x2], r) for (x2, r) in self.yr[y]]
        k_neighbors = heapq.nlargest(self.k, neighbors, key=lambda t: t[0])

        # compute weighted average
        sum_sim = sum_ratings = actual_k = 0
        for (sim, r) in k_neighbors:
            if sim > 0:
                sum_sim += sim
                sum_ratings += sim * r
                actual_k += 1

        if actual_k < self.min_k:
            raise PredictionImpossible('Not enough neighbors.')

        est = sum_ratings / sum_sim

        details = {'actual_k': actual_k}
        return est, details 
开发者ID:NicolasHug,项目名称:Surprise,代码行数:27,代码来源:knns.py

示例9: _get_top_eigen_vectors

# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nlargest [as 别名]
def _get_top_eigen_vectors(data: ndarray, n_components: int) -> ndarray:
        """The eigen vectors according to top n_components large eigen values.

        Arguments:
            data {ndarray} -- Training data.
            n_components {int} -- Number of components to keep.

        Returns:
            ndarray -- eigen vectors with shape(n_cols, n_components).
        """

        # Calculate eigen values and eigen vectors of covariance matrix.
        eigen_values, eigen_vectors = eig(data)
        # The indexes of top n_components large eigen values.
        _indexes = heapq.nlargest(n_components, enumerate(eigen_values),
                                  key=lambda x: x[1])
        indexes = [x[0] for x in _indexes]
        return eigen_vectors[:, indexes] 
开发者ID:tushushu,项目名称:imylu,代码行数:20,代码来源:pca.py

示例10: top

# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nlargest [as 别名]
def top(self, num, key=None):
        """
        Get the top N elements from an RDD.

        .. note:: This method should only be used if the resulting array is expected
            to be small, as all the data is loaded into the driver's memory.

        .. note:: It returns the list sorted in descending order.

        >>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
        [12]
        >>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
        [6, 5]
        >>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
        [4, 3, 2]
        """
        def topIterator(iterator):
            yield heapq.nlargest(num, iterator, key=key)

        def merge(a, b):
            return heapq.nlargest(num, a + b, key=key)

        return self.mapPartitions(topIterator).reduce(merge) 
开发者ID:runawayhorse001,项目名称:LearningApacheSpark,代码行数:25,代码来源:rdd.py

示例11: most_common

# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nlargest [as 别名]
def most_common(self, n=None):
        '''List the n most common elements and their counts from the most
        common to the least.  If n is None, then list all element counts.

        >>> Counter('abcdeabcdabcaba').most_common(3)
        [('a', 5), ('b', 4), ('c', 3)]

        '''
        # Emulate Bag.sortedByCount from Smalltalk
        if n is None:
            return sorted(self.items(), key=_itemgetter(1), reverse=True)
        return _heapq.nlargest(n, self.items(), key=_itemgetter(1)) 
开发者ID:war-and-code,项目名称:jawfish,代码行数:14,代码来源:__init__.py

示例12: most_common

# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nlargest [as 别名]
def most_common(self, n=None):
                '''List the n most common elements and their counts from the most
                common to the least.  If n is None, then list all element counts.

                >>> Counter('abracadabra').most_common(3)
                [('a', 5), ('r', 2), ('b', 2)]

                '''
                if n is None:
                    return sorted(self.iteritems(), key=itemgetter(1), reverse=True)
                return nlargest(n, self.iteritems(), key=itemgetter(1)) 
开发者ID:rafasashi,项目名称:razzy-spinner,代码行数:13,代码来源:compat.py

示例13: nearest

# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nlargest [as 别名]
def nearest(self, v, n=10, exclude=None, candidates=None):
        """Return nearest n words and similarities for given word or vector,
        excluding given words.

        If v is a string, look up the corresponding word vector.
        If exclude is None and v is a string, exclude v.
        If candidates is not None, only consider (word, vector)
        values from iterable candidates.
        Return value is a list of (word, similarity) pairs.
        """

        if isinstance(v, StringTypes):
            v, w = self.word_to_unit_vector(v), v
        else:
            v, w = v/numpy.linalg.norm(v), None
        if exclude is None:
            exclude = [] if w is None else set([w])
        if not self._normalized:
            sim = partial(self._item_similarity, v=v)
        else:
            sim = partial(self._item_similarity_normalized, v=v)
        if candidates is None:
            candidates = self.word_to_vector_mapping().iteritems()
        nearest = heapq.nlargest(n+len(exclude), candidates, sim)
        wordsim = [(p[0], sim(p)) for p in nearest if p[0] not in exclude]
        return wordsim[:n] 
开发者ID:cambridgeltl,项目名称:link-prediction_with_deep-learning,代码行数:28,代码来源:wvlib.py

示例14: most_common

# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nlargest [as 别名]
def most_common(self, n=None):
        '''List the n most common elements and their counts from the most
        common to the least.  If n is None, then list all element counts.

        >>> Counter('abcdeabcdabcaba').most_common(3)
        [('a', 5), ('b', 4), ('c', 3)]

        '''
        # Emulate Bag.sortedByCount from Smalltalk
        if n is None:
            return sorted(self.iteritems(), key=_itemgetter(1), reverse=True)
        return _heapq.nlargest(n, self.iteritems(), key=_itemgetter(1)) 
开发者ID:glmcdona,项目名称:meddle,代码行数:14,代码来源:collections.py

示例15: reg_get_last_modified

# 需要导入模块: import heapq [as 别名]
# 或者: from heapq import nlargest [as 别名]
def reg_get_last_modified(self, hive_name, count = 1, user = None, start = None, end = None, reg = False):
        '''
        Wrapper function using reg_get_all_keys. These functions can take a WHILE since all 
        subkeys have to be collected before you can compare lastwrite times.
        '''
        data = nlargest(count, self.reg_get_all_keys(hive_name, user, start, end, reg))
        if reg:
            for t, regname, name in data:
                yield (t, regname, name)
        else:
            for t, name in data: 
                yield (t, name) 
开发者ID:virtualrealitysystems,项目名称:aumfor,代码行数:14,代码来源:registryapi.py


注:本文中的heapq.nlargest方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。