当前位置: 首页>>代码示例>>Python>>正文


Python cluster.adjusted_mutual_info_score方法代码示例

本文整理汇总了Python中sklearn.metrics.cluster.adjusted_mutual_info_score方法的典型用法代码示例。如果您正苦于以下问题:Python cluster.adjusted_mutual_info_score方法的具体用法?Python cluster.adjusted_mutual_info_score怎么用?Python cluster.adjusted_mutual_info_score使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.metrics.cluster的用法示例。


在下文中一共展示了cluster.adjusted_mutual_info_score方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_perfect_matches

# 需要导入模块: from sklearn.metrics import cluster [as 别名]
# 或者: from sklearn.metrics.cluster import adjusted_mutual_info_score [as 别名]
def test_perfect_matches():
    for score_func in score_funcs:
        assert_equal(score_func([], []), 1.0)
        assert_equal(score_func([0], [1]), 1.0)
        assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
        assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
        assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
        assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
        assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
    score_funcs_with_changing_means = [
        normalized_mutual_info_score,
        adjusted_mutual_info_score,
    ]
    means = {"min", "geometric", "arithmetic", "max"}
    for score_func in score_funcs_with_changing_means:
        for mean in means:
            assert score_func([], [], mean) == 1.0
            assert score_func([0], [1], mean) == 1.0
            assert score_func([0, 0, 0], [0, 0, 0], mean) == 1.0
            assert score_func([0, 1, 0], [42, 7, 42], mean) == 1.0
            assert score_func([0., 1., 0.], [42., 7., 42.], mean) == 1.0
            assert score_func([0., 1., 2.], [42., 7., 2.], mean) == 1.0
            assert score_func([0, 1, 2], [42, 7, 2], mean) == 1.0 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:25,代码来源:test_supervised.py

示例2: test_future_warning

# 需要导入模块: from sklearn.metrics import cluster [as 别名]
# 或者: from sklearn.metrics.cluster import adjusted_mutual_info_score [as 别名]
def test_future_warning():
    score_funcs_with_changing_means = [
        normalized_mutual_info_score,
        adjusted_mutual_info_score,
    ]
    warning_msg = "The behavior of "
    args = [0, 0, 0], [0, 0, 0]
    for score_func in score_funcs_with_changing_means:
        assert_warns_message(FutureWarning, warning_msg, score_func, *args) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:11,代码来源:test_supervised.py

示例3: test_adjusted_mutual_info_score

# 需要导入模块: from sklearn.metrics import cluster [as 别名]
# 或者: from sklearn.metrics.cluster import adjusted_mutual_info_score [as 别名]
def test_adjusted_mutual_info_score():
    # Compute the Adjusted Mutual Information and test against known values
    labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
    labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
    # Mutual information
    mi = mutual_info_score(labels_a, labels_b)
    assert_almost_equal(mi, 0.41022, 5)
    # with provided sparse contingency
    C = contingency_matrix(labels_a, labels_b, sparse=True)
    mi = mutual_info_score(labels_a, labels_b, contingency=C)
    assert_almost_equal(mi, 0.41022, 5)
    # with provided dense contingency
    C = contingency_matrix(labels_a, labels_b)
    mi = mutual_info_score(labels_a, labels_b, contingency=C)
    assert_almost_equal(mi, 0.41022, 5)
    # Expected mutual information
    n_samples = C.sum()
    emi = expected_mutual_information(C, n_samples)
    assert_almost_equal(emi, 0.15042, 5)
    # Adjusted mutual information
    ami = adjusted_mutual_info_score(labels_a, labels_b)
    assert_almost_equal(ami, 0.27502, 5)
    ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
    assert_equal(ami, 1.0)
    # Test with a very large array
    a110 = np.array([list(labels_a) * 110]).flatten()
    b110 = np.array([list(labels_b) * 110]).flatten()
    ami = adjusted_mutual_info_score(a110, b110)
    # This is not accurate to more than 2 places
    assert_almost_equal(ami, 0.37, 2) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:32,代码来源:test_supervised.py

示例4: test_exactly_zero_info_score

# 需要导入模块: from sklearn.metrics import cluster [as 别名]
# 或者: from sklearn.metrics.cluster import adjusted_mutual_info_score [as 别名]
def test_exactly_zero_info_score():
    # Check numerical stability when information is exactly zero
    for i in np.logspace(1, 4, 4).astype(np.int):
        labels_a, labels_b = (np.ones(i, dtype=np.int),
                              np.arange(i, dtype=np.int))
        assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
        assert_equal(v_measure_score(labels_a, labels_b), 0.0)
        assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
        assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
        for method in ["min", "geometric", "arithmetic", "max"]:
            assert adjusted_mutual_info_score(labels_a, labels_b,
                                              method) == 0.0
            assert normalized_mutual_info_score(labels_a, labels_b,
                                                method) == 0.0 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:16,代码来源:test_supervised.py

示例5: validate

# 需要导入模块: from sklearn.metrics import cluster [as 别名]
# 或者: from sklearn.metrics.cluster import adjusted_mutual_info_score [as 别名]
def validate( measure, classes, clustering ):
	if measure == "nmi":
		return normalized_mutual_info_score( classes, clustering )
	elif measure == "ami":
		return adjusted_mutual_info_score( classes, clustering )
	elif measure == "ari":
		return adjusted_rand_score( classes, clustering )
	log.error("Unknown validation measure: %s" % measure )
	return None

# -------------------------------------------------------------- 
开发者ID:derekgreene,项目名称:topic-ensemble,代码行数:13,代码来源:eval-partition-accuracy.py

示例6: clusterscores

# 需要导入模块: from sklearn.metrics import cluster [as 别名]
# 或者: from sklearn.metrics.cluster import adjusted_mutual_info_score [as 别名]
def clusterscores(self):
        target,pred = self.conf2label()
        NMI = normalized_mutual_info_score(target,pred)
        ARI = adjusted_rand_score(target,pred)
        AMI = adjusted_mutual_info_score(target,pred)
        return {'NMI':NMI,'ARI':ARI,'AMI':AMI} 
开发者ID:GT-RIPL,项目名称:L2C,代码行数:8,代码来源:metric.py

示例7: test_exactly_zero_info_score

# 需要导入模块: from sklearn.metrics import cluster [as 别名]
# 或者: from sklearn.metrics.cluster import adjusted_mutual_info_score [as 别名]
def test_exactly_zero_info_score():
    # Check numerical stability when information is exactly zero
    for i in np.logspace(1, 4, 4).astype(np.int):
        labels_a, labels_b = (np.ones(i, dtype=np.int),
                              np.arange(i, dtype=np.int))
        assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
        assert_equal(v_measure_score(labels_a, labels_b), 0.0)
        assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
        assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0) 
开发者ID:alvarobartt,项目名称:twitter-stock-recommendation,代码行数:11,代码来源:test_supervised.py

示例8: evaluate

# 需要导入模块: from sklearn.metrics import cluster [as 别名]
# 或者: from sklearn.metrics.cluster import adjusted_mutual_info_score [as 别名]
def evaluate( self, partition, clustered_ids ):
		# no class info?
		if not self.has_class_info():
			return {}
		# get two clusterings that we can compare
		n = len(clustered_ids)
		classes_subset = np.zeros( n )
		for row in range(n):
			classes_subset[row] = self.class_map[clustered_ids[row]]		
		scores = {}
		scores["external-nmi"] = normalized_mutual_info_score( classes_subset, partition )
		scores["external-ami"] = adjusted_mutual_info_score( classes_subset, partition )
		scores["external-ari"] = adjusted_rand_score( classes_subset, partition )
		return scores 
开发者ID:derekgreene,项目名称:topic-stability,代码行数:16,代码来源:validation.py


注:本文中的sklearn.metrics.cluster.adjusted_mutual_info_score方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。