當前位置: 首頁>>代碼示例>>Python>>正文


Python metrics.adjusted_mutual_info_score方法代碼示例

本文整理匯總了Python中sklearn.metrics.adjusted_mutual_info_score方法的典型用法代碼示例。如果您正苦於以下問題:Python metrics.adjusted_mutual_info_score方法的具體用法?Python metrics.adjusted_mutual_info_score怎麽用?Python metrics.adjusted_mutual_info_score使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sklearn.metrics的用法示例。


在下文中一共展示了metrics.adjusted_mutual_info_score方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_metrics_from_list

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_mutual_info_score [as 別名]
def test_metrics_from_list():
    """
    Check getting functions from a list of metric names
    """
    default = ModelBuilder.metrics_from_list()
    assert default == [
        metrics.explained_variance_score,
        metrics.r2_score,
        metrics.mean_squared_error,
        metrics.mean_absolute_error,
    ]

    specifics = ModelBuilder.metrics_from_list(
        ["sklearn.metrics.adjusted_mutual_info_score", "sklearn.metrics.r2_score"]
    )
    assert specifics == [metrics.adjusted_mutual_info_score, metrics.r2_score] 
開發者ID:equinor,項目名稱:gordo,代碼行數:18,代碼來源:test_builder.py

示例2: evaluate_groups

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_mutual_info_score [as 別名]
def evaluate_groups(true_groups, predicted):
    """ Compute the AMI score and corresponding mean confidence for given gammas.
    :param true_groups: (B, 1, W, H, 1)
    :param predicted: (B, K, W, H, 1)
    :return: scores, confidences (B,)
    """
    scores, confidences = [], []
    assert true_groups.ndim == predicted.ndim == 5, true_groups.shape
    batch_size, K = predicted.shape[:2]
    true_groups = true_groups.reshape(batch_size, -1)
    predicted = predicted.reshape(batch_size, K, -1)
    predicted_groups = predicted.argmax(1)
    predicted_conf = predicted.max(1)
    for i in range(batch_size):
        true_group = true_groups[i]
        idxs = np.where(true_group != 0.0)[0]
        scores.append(adjusted_mutual_info_score(true_group[idxs], predicted_groups[i, idxs]))
        confidences.append(np.mean(predicted_conf[i, idxs]))

    return scores, confidences 
開發者ID:sjoerdvansteenkiste,項目名稱:Neural-EM,代碼行數:22,代碼來源:utils.py

示例3: bench_k_means

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_mutual_info_score [as 別名]
def bench_k_means(estimator, name, data):
    estimator.fit(data)
    # A short explanation for every score:
    # homogeneity:          each cluster contains only members of a single class (range 0 - 1)
    # completeness:         all members of a given class are assigned to the same cluster (range 0 - 1)
    # v_measure:            harmonic mean of homogeneity and completeness
    # adjusted_rand:        similarity of the actual values and their predictions,
    #                       ignoring permutations and with chance normalization
    #                       (range -1 to 1, -1 being bad, 1 being perfect and 0 being random)
    # adjusted_mutual_info: agreement of the actual values and predictions, ignoring permutations
    #                       (range 0 - 1, with 0 being random agreement and 1 being perfect agreement)
    # silhouette:           uses the mean distance between a sample and all other points in the same class,
    #                       as well as the mean distance between a sample and all other points in the nearest cluster
    #                       to calculate a score (range: -1 to 1, with the former being incorrect,
    #                       and the latter standing for highly dense clustering.
    #                       0 indicates overlapping clusters.
    print('%-9s \t%i \thomogeneity: %.3f \tcompleteness: %.3f \tv-measure: %.3f \tadjusted-rand: %.3f \t'
          'adjusted-mutual-info: %.3f \tsilhouette: %.3f'
          % (name, estimator.inertia_,
             metrics.homogeneity_score(y, estimator.labels_),
             metrics.completeness_score(y, estimator.labels_),
             metrics.v_measure_score(y, estimator.labels_),
             metrics.adjusted_rand_score(y, estimator.labels_),
             metrics.adjusted_mutual_info_score(y,  estimator.labels_),
             metrics.silhouette_score(data, estimator.labels_,
                                      metric='euclidean'))) 
開發者ID:HoussemCharf,項目名稱:FunUtils,代碼行數:28,代碼來源:k_means_clustering.py

示例4: evalClusteringOnLabels

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_mutual_info_score [as 別名]
def evalClusteringOnLabels(clusters, groupLabels, verbose=True):
    """
    Evaluates clustering against labels
    Alternative methodology to label prediction for testing
    """
    results = []
    results.append(metrics.adjusted_mutual_info_score(clusters, groupLabels))
    results.append(metrics.adjusted_rand_score(clusters, groupLabels))
    results.append(metrics.fowlkes_mallows_score(clusters, groupLabels))
    if verbose:
        print(f"MI: {results[0]:.2f}, RAND {results[2]:.2f}, FM: {results[2]:.2f}")
    return dict(zip(['MI', 'RAND', 'FM'], np.array(results))) 
開發者ID:VHRanger,項目名稱:nodevectors,代碼行數:14,代碼來源:graph_eval.py

示例5: benchmarking

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_mutual_info_score [as 別名]
def benchmarking(gtlabels, labels):
    # TODO: Please note that the AMI definition used in the paper differs from that in the sklearn python package.
    # TODO: Please modify it accordingly.
    numeval = len(gtlabels)
    ari = metrics.adjusted_rand_score(gtlabels[:numeval], labels[:numeval])
    ami = metrics.adjusted_mutual_info_score(gtlabels[:numeval], labels[:numeval])
    nmi = metrics.normalized_mutual_info_score(gtlabels[:numeval], labels[:numeval])
    acc = clustering_accuracy(gtlabels[:numeval], labels[:numeval])

    return ari, ami, nmi, acc 
開發者ID:shahsohil,項目名稱:DCC,代碼行數:12,代碼來源:DCCComputation.py

示例6: mutual_info_kmeans_scorer

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_mutual_info_score [as 別名]
def mutual_info_kmeans_scorer(self, min_similarity):
        return self.kmeans_scorer(
            metrics.adjusted_mutual_info_score,
            min_similarity
        ) 
開發者ID:EricSchles,項目名稱:drifter_ml,代碼行數:7,代碼來源:structural_tests.py

示例7: mutual_info_dbscan_scorer

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_mutual_info_score [as 別名]
def mutual_info_dbscan_scorer(self, min_similarity):
        return self.dbscan_scorer(
            metrics.adjusted_mutual_info_score,
            min_similarity
        ) 
開發者ID:EricSchles,項目名稱:drifter_ml,代碼行數:7,代碼來源:structural_tests.py

示例8: _compute_ami_score

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_mutual_info_score [as 別名]
def _compute_ami_score(labels, predictions):
  ami_score = math_ops.to_float(
      script_ops.py_func(
          metrics.adjusted_mutual_info_score, [labels, predictions],
          [dtypes.float64],
          name='ami'))
  return math_ops.maximum(0.0, ami_score) 
開發者ID:CongWeilin,項目名稱:cluster-loss-tensorflow,代碼行數:9,代碼來源:metric_loss_ops.py

示例9: adjusted_mutual_information

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_mutual_info_score [as 別名]
def adjusted_mutual_information(x, tx, y, ty, ffactor=3, maxdev=3):
    x, y = discretized_sequences(x, tx, y, ty, ffactor, maxdev)
    try:
        return adjusted_mutual_info_score(x, y)
    except ValueError:
        return adjusted_mutual_info_score(x.squeeze(1), y.squeeze(1)) 
開發者ID:FenTechSolutions,項目名稱:CausalDiscoveryToolbox,代碼行數:8,代碼來源:features.py

示例10: predict

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_mutual_info_score [as 別名]
def predict(self, a, b, **kwargs):
        """Perform the independence test.

        :param a: input data
        :param b: input data
        :type a: array-like, numerical data
        :type b: array-like, numerical data
        :return: dependency statistic (1=Highly dependent, 0=Not dependent)
        :rtype: float
        """
        binning_alg = kwargs.get('bins', 'fd')
        return metrics.adjusted_mutual_info_score(bin_variable(a, bins=binning_alg),
                                                  bin_variable(b, bins=binning_alg)) 
開發者ID:FenTechSolutions,項目名稱:CausalDiscoveryToolbox,代碼行數:15,代碼來源:all_types.py

示例11: bench_k_means

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_mutual_info_score [as 別名]
def bench_k_means(estimator, name, data):
    t0 = time()
    estimator.fit(data)
    print('% 9s   %.2fs    %i   %.3f   %.3f   %.3f   %.3f   %.3f    %.3f'
          % (name, (time() - t0), estimator.inertia_,
             metrics.homogeneity_score(labels, estimator.labels_),
             metrics.completeness_score(labels, estimator.labels_),
             metrics.v_measure_score(labels, estimator.labels_),
             metrics.adjusted_rand_score(labels, estimator.labels_),
             metrics.adjusted_mutual_info_score(labels,  estimator.labels_),
             metrics.silhouette_score(data, estimator.labels_,
                                      metric='euclidean',
                                      sample_size=sample_size))) 
開發者ID:PacktPublishing,項目名稱:Computer-Vision-with-Python-3,代碼行數:15,代碼來源:plot_kmeans_digits.py

示例12: test_adjusted_mutual_info_score

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_mutual_info_score [as 別名]
def test_adjusted_mutual_info_score(self):
        result = self.df.metrics.adjusted_mutual_info_score()
        expected = metrics.adjusted_mutual_info_score(self.target, self.pred)
        self.assertEqual(result, expected) 
開發者ID:pandas-ml,項目名稱:pandas-ml,代碼行數:6,代碼來源:test_metrics.py

示例13: calculate_AMI

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_mutual_info_score [as 別名]
def calculate_AMI(self, query_labels, cluster_labels, **kwargs):
        return adjusted_mutual_info_score(query_labels, cluster_labels) 
開發者ID:KevinMusgrave,項目名稱:pytorch-metric-learning,代碼行數:4,代碼來源:accuracy_calculator.py

示例14: evaluate_clustering_performance

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_mutual_info_score [as 別名]
def evaluate_clustering_performance(clusters, labels):
    set_of_dimensionality = set()
    for cluster in clusters:
        set_of_dimensionality.add(frozenset(cluster.dimensions))

    # Evaluating performance in all dimensionality
    for dim in set_of_dimensionality:
        print("\nEvaluating clusters in dimension: ", list(dim))
        # Finding clusters with same dimensions
        clusters_in_dim = []
        for c in clusters:
            if c.dimensions == dim:
                clusters_in_dim.append(c)
        clustering_labels = np.zeros(np.shape(labels))
        for i, c in enumerate(clusters_in_dim):
            clustering_labels[list(c.data_point_ids)] = i + 1

        print("Number of clusters: ", len(clusters_in_dim))
        print("Adjusted Rand index: ", metrics.adjusted_rand_score(
            labels, clustering_labels))
        print("Mutual Information: ", metrics.adjusted_mutual_info_score(
            labels, clustering_labels))

        print("Homogeneity, completeness, V-measure: ",
              metrics.homogeneity_completeness_v_measure(labels, clustering_labels))

        print("Fowlkes-Mallows: ",
              metrics.fowlkes_mallows_score(labels, clustering_labels)) 
開發者ID:georgekatona,項目名稱:Clique,代碼行數:30,代碼來源:Clique.py

示例15: compute_adjusted_evaluations

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_mutual_info_score [as 別名]
def compute_adjusted_evaluations(self, labels_families,
                                     predicted_clusters):
        if labels_families is None:
            self.adjusted_rand_score = 0
            self.adjusted_mutual_info_score = 0
            return
        self.adjusted_rand_score = metrics.adjusted_rand_score(
            labels_families, predicted_clusters)
        self.adjusted_mutual_info_score = metrics.adjusted_mutual_info_score(
            labels_families, predicted_clusters, average_method='arithmetic') 
開發者ID:ANSSI-FR,項目名稱:SecuML,代碼行數:12,代碼來源:perf_indicators.py


注:本文中的sklearn.metrics.adjusted_mutual_info_score方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。