当前位置: 首页>>代码示例>>Python>>正文


Python cluster.v_measure_score函数代码示例

本文整理汇总了Python中sklearn.metrics.cluster.v_measure_score函数的典型用法代码示例。如果您正苦于以下问题:Python v_measure_score函数的具体用法?Python v_measure_score怎么用?Python v_measure_score使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了v_measure_score函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_v_measure_and_mutual_information

def test_v_measure_and_mutual_information(seed=36):
    # Check relation between v_measure, entropy and mutual information
    for i in np.logspace(1, 4, 4).astype(np.int):
        random_state = np.random.RandomState(seed)
        labels_a, labels_b = (random_state.randint(0, 10, i),
                              random_state.randint(0, 10, i))
        assert_almost_equal(v_measure_score(labels_a, labels_b),
                            2.0 * mutual_info_score(labels_a, labels_b) /
                            (entropy(labels_a) + entropy(labels_b)), 0)
        avg = 'arithmetic'
        assert_almost_equal(v_measure_score(labels_a, labels_b),
                            normalized_mutual_info_score(labels_a, labels_b,
                                                         average_method=avg)
                            )
开发者ID:MartinThoma,项目名称:scikit-learn,代码行数:14,代码来源:test_supervised.py

示例2: test_k_means_function

def test_k_means_function():
    # test calling the k_means function directly
    # catch output
    from cStringIO import StringIO
    import sys
    old_stdout = sys.stdout
    sys.stdout = StringIO()
    cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
                                               verbose=True)
    sys.stdout = old_stdout
    centers = cluster_centers
    assert_equal(centers.shape, (n_clusters, n_features))

    labels = labels
    assert_equal(np.unique(labels).shape[0], n_clusters)

    # check that the labels assignements are perfect (up to a permutation)
    assert_equal(v_measure_score(true_labels, labels), 1.0)
    assert_greater(inertia, 0.0)

    # check warning when centers are passed
    with warnings.catch_warnings(record=True) as w:
        k_means(X, n_clusters=n_clusters, init=centers)
        assert_equal(len(w), 1)

    # to many clusters desired
    assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
开发者ID:Jetafull,项目名称:scikit-learn,代码行数:27,代码来源:test_k_means.py

示例3: test_k_means_function

def test_k_means_function():
    # test calling the k_means function directly
    # catch output
    old_stdout = sys.stdout
    sys.stdout = StringIO()
    try:
        cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
                                                   sample_weight=None,
                                                   verbose=True)
    finally:
        sys.stdout = old_stdout
    centers = cluster_centers
    assert_equal(centers.shape, (n_clusters, n_features))

    labels = labels
    assert_equal(np.unique(labels).shape[0], n_clusters)

    # check that the labels assignment are perfect (up to a permutation)
    assert_equal(v_measure_score(true_labels, labels), 1.0)
    assert_greater(inertia, 0.0)

    # check warning when centers are passed
    assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
                 sample_weight=None, init=centers)

    # to many clusters desired
    assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1,
                  sample_weight=None)

    # kmeans for algorithm='elkan' raises TypeError on sparse matrix
    assert_raise_message(TypeError, "algorithm='elkan' not supported for "
                         "sparse input X", k_means, X=X_csr, n_clusters=2,
                         sample_weight=None, algorithm="elkan")
开发者ID:daniel-perry,项目名称:scikit-learn,代码行数:33,代码来源:test_k_means.py

示例4: test_fitted_model

    def test_fitted_model(self):

        # non centered, sparse centers to check the
        centers = np.array([
            [0.0, 5.0, 0.0, 0.0, 0.0],
            [1.0, 1.0, 4.0, 0.0, 0.0],
            [1.0, 0.0, 0.0, 5.0, 1.0],
            ])
        n_samples = 100
        n_clusters, n_features = centers.shape
        X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
                                    cluster_std=1., random_state=42)

        cbook = CoodeBook(n_words=3)
        cbook = cbook.fit(X) # TODO: Is it neaded to reasign? or it can be just cbook.fit(X)

        # check that the number of clusters centers and distinct labels match
        # the expectation
        centers = cbook.get_dictionary()
        assert_equal(centers.shape, (n_clusters, n_features))

        labels = cbook.predict(X)
        assert_equal(np.unique(labels).shape[0], n_clusters)

        # check that the labels assignment are perfect (up to a permutation)
        assert_equal(v_measure_score(true_labels, labels), 1.0)
        assert_greater(cbook.cluster_core.inertia_, 0.0)

        # check that the descriptor looks like the homogenous PDF used
        # to create the original samples
        cbook_hist = cbook.get_BoF_descriptor(X)
        expected_value = float(1)/cbook.n_words
        for bin_value in cbook_hist[0]:
            assert_less(round(bin_value-expected_value,3), 0.01)
开发者ID:massich,项目名称:oct_image_classif,代码行数:34,代码来源:test_codebook.py

示例5: test_int_input

def test_int_input():
    X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
    for dtype in [np.int32, np.int64]:
        X_int = np.array(X_list, dtype=dtype)
        X_int_csr = sp.csr_matrix(X_int)
        init_int = X_int[:2]

        fitted_models = [
            KMeans(n_clusters=2).fit(X_int),
            KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
            # mini batch kmeans is very unstable on such a small dataset hence
            # we use many inits
            MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
            MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
            MiniBatchKMeans(n_clusters=2, batch_size=2,
                            init=init_int, n_init=1).fit(X_int),
            MiniBatchKMeans(n_clusters=2, batch_size=2,
                            init=init_int, n_init=1).fit(X_int_csr),
        ]

        for km in fitted_models:
            assert_equal(km.cluster_centers_.dtype, np.float64)

        expected_labels = [0, 1, 1, 0, 0, 1]
        scores = np.array([v_measure_score(expected_labels, km.labels_)
                           for km in fitted_models])
        assert_array_equal(scores, np.ones(scores.shape[0]))
开发者ID:Lavanya-Basavaraju,项目名称:scikit-learn,代码行数:27,代码来源:test_k_means.py

示例6: test_k_means_function

def test_k_means_function():
    # test calling the k_means function directly
    # catch output
    old_stdout = sys.stdout
    sys.stdout = StringIO()
    try:
        cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
                                                   verbose=True)
    finally:
        sys.stdout = old_stdout
    centers = cluster_centers
    assert_equal(centers.shape, (n_clusters, n_features))

    labels = labels
    assert_equal(np.unique(labels).shape[0], n_clusters)

    # check that the labels assignment are perfect (up to a permutation)
    assert_equal(v_measure_score(true_labels, labels), 1.0)
    assert_greater(inertia, 0.0)

    # check warning when centers are passed
    assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
                 init=centers)

    # to many clusters desired
    assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
开发者ID:Lavanya-Basavaraju,项目名称:scikit-learn,代码行数:26,代码来源:test_k_means.py

示例7: test_k_means_perfect_init

def test_k_means_perfect_init():
    try:
        p_suite = []#PY_suite(suite_name=u'perfect_init')
        for i in range(10):
            X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
                                    cluster_std=1., random_state=42)
            km = KMeans(init=centers.copy(), n_clusters=n_clusters,
                        random_state=42,n_init=1).fit(X)
            p_suite+=[PY_raises(ValueError,km.fit,[[0.,1.]]),
                              PY_equals(v_measure_score(true_labels, km.labels_),1.0),
                              PY_equals(km.cluster_centers_.shape,(n_clusters,n_features)),
                              PY_equals(v_measure_score(true_labels,km.labels_), 1.0),
                              PY_greater(km.inertia_,0.0)
                              ]
        return p_suite
    except Exception:
        return 50
开发者ID:saemundo,项目名称:scikit-learn,代码行数:17,代码来源:test_k_means.py

示例8: test_k_means_plus_plus_init_not_precomputed

def test_k_means_plus_plus_init_not_precomputed():
    try:
        p_suite = []#PY_suite(suite_name=u'plus_plus_init_not_precomputed')
        for i in range(10):
            X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
                                    cluster_std=1., random_state=42)
            km = KMeans(init="k-means++", n_clusters=n_clusters,
                        random_state=42,precompute_distances=False).fit(X)
            p_suite+=[PY_raises(ValueError,km.fit,[[0.,1.]]),
                              PY_equals(v_measure_score(true_labels, km.labels_),1.0),
                              PY_equals(km.cluster_centers_.shape,(n_clusters,n_features)),
                              PY_equals(v_measure_score(true_labels,km.labels_), 1.0),
                              PY_greater(km.inertia_,0.0)
                              ]
        return p_suite
    except Exception:
        return 50
开发者ID:saemundo,项目名称:scikit-learn,代码行数:17,代码来源:test_k_means.py

示例9: calculate_scores

 def calculate_scores(self):
   x, c, labels = self.x, self.c, self.labels
   self.v_measure = v_measure_score(c, labels)
   self.complete = completeness_score(c, labels)
   self.adjusted_mutual = adjusted_mutual_info_score(c, labels)
   self.adjusted_rand = adjusted_rand_score(c, labels)
   self.silhouette = silhouette_score(x, c)
   self.purity, self.partial_purity = self.__purity__()
开发者ID:jakobjoachim,项目名称:text-mining-haw-bachelor,代码行数:8,代码来源:cluster_eval.py

示例10: test_exactly_zero_info_score

def test_exactly_zero_info_score():
    """Check numerical stability when information is exactly zero"""
    for i in np.logspace(1, 4, 4).astype(np.int):
        labels_a, labels_b = np.ones(i, dtype=np.int), np.arange(i, dtype=np.int)
        assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
        assert_equal(v_measure_score(labels_a, labels_b), 0.0)
        assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
        assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
开发者ID:JinguoGao,项目名称:scikit-learn,代码行数:8,代码来源:test_supervised.py

示例11: test_k_means_random_init_sparse

def test_k_means_random_init_sparse():
    try:
        p_suite = []#PY_suite(suite_name=u'init_random_sparse')
        for i in range(10):
            X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
                                    cluster_std=1., random_state=42)
            X_csr = sp.csr_matrix(X)
            km = KMeans(init="random", n_clusters=n_clusters,
                        random_state=42).fit(X_csr)
            p_suite+=[PY_raises(ValueError,km.fit,[[0.,1.]]),
                              PY_equals(v_measure_score(true_labels, km.labels_),1.0),
                              PY_equals(km.cluster_centers_.shape,(n_clusters,n_features)),
                              PY_equals(v_measure_score(true_labels,km.labels_), 1.0),
                              PY_greater(km.inertia_,0.0)
                              ]
        return p_suite
    except Exception:
        return 50
开发者ID:saemundo,项目名称:scikit-learn,代码行数:18,代码来源:test_k_means.py

示例12: test_v_measure_and_mutual_information

def test_v_measure_and_mutual_information(seed=36):
    """Check relation between v_measure, entropy and mutual information"""
    for i in np.logspace(1, 4, 4):
        random_state = np.random.RandomState(seed)
        labels_a, labels_b = random_state.random_integers(0, 10, i),\
            random_state.random_integers(0, 10, i)
        assert_almost_equal(v_measure_score(labels_a, labels_b),
                            2.0 * mutual_info_score(labels_a, labels_b) /
                            (entropy(labels_a) + entropy(labels_b)), 0)
开发者ID:Jetafull,项目名称:scikit-learn,代码行数:9,代码来源:test_supervised.py

示例13: test_accuracy

    def test_accuracy(self):
        from sklearn.cluster import KMeans as skKMeans
        n_samples = 100000
        centers = 10
        X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
                                    cluster_std=1., random_state=42)

        kmeans_h2o = KMeans(n_gpus=1, n_clusters=centers, random_state=42)
        kmeans_h2o.fit(X)
        kmeans_sk = skKMeans(n_init=1, n_clusters=centers, init='random',
                             random_state=42)
        kmeans_sk.fit(X)

        accuracy_h2o = v_measure_score(kmeans_h2o.labels_, true_labels)
        accuracy_sk = v_measure_score(kmeans_sk.labels_, true_labels)
        # We also want to be either better or at most 10% worse than SKLearn
        # Everything else is horrible and we probably should fix something
        assert accuracy_h2o - accuracy_sk >= -0.1
开发者ID:wamsiv,项目名称:h2o4gpu,代码行数:18,代码来源:test_kmeans.py

示例14: test_mini_batch_k_means_random_init_partial_fit

def test_mini_batch_k_means_random_init_partial_fit():
    km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)

    # use the partial_fit API for online learning
    for X_minibatch in np.array_split(X, 10):
        km.partial_fit(X_minibatch)

    # compute the labeling on the complete dataset
    labels = km.predict(X)
    assert_equal(v_measure_score(true_labels, labels), 1.0)
开发者ID:Lavanya-Basavaraju,项目名称:scikit-learn,代码行数:10,代码来源:test_k_means.py

示例15: test_scaled_weights

def test_scaled_weights():
    # scaling all sample weights by a common factor
    # shouldn't change the result
    sample_weight = np.ones(n_samples)
    for estimator in [KMeans(n_clusters=n_clusters, random_state=42),
                      MiniBatchKMeans(n_clusters=n_clusters, random_state=42)]:
        est_1 = clone(estimator).fit(X)
        est_2 = clone(estimator).fit(X, sample_weight=0.5*sample_weight)
        assert_almost_equal(v_measure_score(est_1.labels_, est_2.labels_), 1.0)
        assert_almost_equal(_sort_centers(est_1.cluster_centers_),
                            _sort_centers(est_2.cluster_centers_))
开发者ID:daniel-perry,项目名称:scikit-learn,代码行数:11,代码来源:test_k_means.py


注:本文中的sklearn.metrics.cluster.v_measure_score函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。