当前位置: 首页>>代码示例>>Python>>正文


Python TSNE.fit_transform方法代码示例

本文整理汇总了Python中sklearn.manifold.t_sne.TSNE.fit_transform方法的典型用法代码示例。如果您正苦于以下问题:Python TSNE.fit_transform方法的具体用法?Python TSNE.fit_transform怎么用?Python TSNE.fit_transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.manifold.t_sne.TSNE的用法示例。


在下文中一共展示了TSNE.fit_transform方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_accessible_kl_divergence

# 需要导入模块: from sklearn.manifold.t_sne import TSNE [as 别名]
# 或者: from sklearn.manifold.t_sne.TSNE import fit_transform [as 别名]
def test_accessible_kl_divergence():
    # Ensures that the accessible kl_divergence matches the computed value
    random_state = check_random_state(0)
    X = random_state.randn(100, 2)
    tsne = TSNE(n_iter_without_progress=2, verbose=2,
                random_state=0, method='exact')

    old_stdout = sys.stdout
    sys.stdout = StringIO()
    try:
        tsne.fit_transform(X)
    finally:
        out = sys.stdout.getvalue()
        sys.stdout.close()
        sys.stdout = old_stdout

    # The output needs to contain the accessible kl_divergence as the error at
    # the last iteration
    for line in out.split('\n')[::-1]:
        if 'Iteration' in line:
            _, _, error = line.partition('error = ')
            if error:
                error, _, _ = error.partition(',')
                break
    assert_almost_equal(tsne.kl_divergence_, float(error), decimal=5)
开发者ID:BasilBeirouti,项目名称:scikit-learn,代码行数:27,代码来源:test_t_sne.py

示例2: check_uniform_grid

# 需要导入模块: from sklearn.manifold.t_sne import TSNE [as 别名]
# 或者: from sklearn.manifold.t_sne.TSNE import fit_transform [as 别名]
def check_uniform_grid(method, seeds=[0, 1, 2], n_iter=1000):
    """Make sure that TSNE can approximately recover a uniform 2D grid

    Due to ties in distances between point in X_2d_grid, this test is platform
    dependent for ``method='barnes_hut'`` due to numerical imprecision.

    Also, t-SNE is not assured to converge to the right solution because bad
    initialization can lead to convergence to bad local minimum (the
    optimization problem is non-convex). To avoid breaking the test too often,
    we re-run t-SNE from the final point when the convergence is not good
    enough.
    """
    for seed in seeds:
        tsne = TSNE(n_components=2, init='random', random_state=seed,
                    perplexity=20, n_iter=n_iter, method=method)
        Y = tsne.fit_transform(X_2d_grid)

        try_name = "{}_{}".format(method, seed)
        try:
            assert_uniform_grid(Y, try_name)
        except AssertionError:
            # If the test fails a first time, re-run with init=Y to see if
            # this was caused by a bad initialization. Note that this will
            # also run an early_exaggeration step.
            try_name += ":rerun"
            tsne.init = Y
            Y = tsne.fit_transform(X_2d_grid)
            assert_uniform_grid(Y, try_name)
开发者ID:BasilBeirouti,项目名称:scikit-learn,代码行数:30,代码来源:test_t_sne.py

示例3: test_64bit

# 需要导入模块: from sklearn.manifold.t_sne import TSNE [as 别名]
# 或者: from sklearn.manifold.t_sne.TSNE import fit_transform [as 别名]
def test_64bit():
    # Ensure 64bit arrays are handled correctly.
    random_state = check_random_state(0)
    methods = ["barnes_hut", "exact"]
    for method in methods:
        for dt in [np.float32, np.float64]:
            X = random_state.randn(100, 2).astype(dt)
            tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0, random_state=0, method=method)
            tsne.fit_transform(X)
开发者ID:sofianehaddad,项目名称:scikit-learn,代码行数:11,代码来源:test_t_sne.py

示例4: test_optimization_minimizes_kl_divergence

# 需要导入模块: from sklearn.manifold.t_sne import TSNE [as 别名]
# 或者: from sklearn.manifold.t_sne.TSNE import fit_transform [as 别名]
def test_optimization_minimizes_kl_divergence():
    """t-SNE should give a lower KL divergence with more iterations."""
    random_state = check_random_state(0)
    X, _ = make_blobs(n_features=3, random_state=random_state)
    kl_divergences = []
    for n_iter in [200, 250, 300]:
        tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0, n_iter=n_iter, random_state=0)
        tsne.fit_transform(X)
        kl_divergences.append(tsne.kl_divergence_)
    assert_less_equal(kl_divergences[1], kl_divergences[0])
    assert_less_equal(kl_divergences[2], kl_divergences[1])
开发者ID:sofianehaddad,项目名称:scikit-learn,代码行数:13,代码来源:test_t_sne.py

示例5: test_kl_divergence_not_nan

# 需要导入模块: from sklearn.manifold.t_sne import TSNE [as 别名]
# 或者: from sklearn.manifold.t_sne.TSNE import fit_transform [as 别名]
def test_kl_divergence_not_nan(method):
    # Ensure kl_divergence_ is computed at last iteration
    # even though n_iter % n_iter_check != 0, i.e. 1003 % 50 != 0
    random_state = check_random_state(0)

    X = random_state.randn(50, 2)
    tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
                random_state=0, method=method, verbose=0, n_iter=1003)
    tsne.fit_transform(X)

    assert not np.isnan(tsne.kl_divergence_)
开发者ID:amueller,项目名称:scikit-learn,代码行数:13,代码来源:test_t_sne.py

示例6: test_n_iter_used

# 需要导入模块: from sklearn.manifold.t_sne import TSNE [as 别名]
# 或者: from sklearn.manifold.t_sne.TSNE import fit_transform [as 别名]
def test_n_iter_used():
    # check that the ``n_iter`` parameter has an effect
    random_state = check_random_state(0)
    n_components = 2
    methods = ['exact', 'barnes_hut']
    X = random_state.randn(25, n_components).astype(np.float32)
    for method in methods:
        for n_iter in [251, 500]:
            tsne = TSNE(n_components=n_components, perplexity=1,
                        learning_rate=0.5, init="random", random_state=0,
                        method=method, early_exaggeration=1.0, n_iter=n_iter)
            tsne.fit_transform(X)

            assert tsne.n_iter_ == n_iter - 1
开发者ID:BasilBeirouti,项目名称:scikit-learn,代码行数:16,代码来源:test_t_sne.py

示例7: test_reduction_to_one_component

# 需要导入模块: from sklearn.manifold.t_sne import TSNE [as 别名]
# 或者: from sklearn.manifold.t_sne.TSNE import fit_transform [as 别名]
def test_reduction_to_one_component():
    """t-SNE should allow reduction to one component (issue #4154)."""
    random_state = check_random_state(0)
    tsne = TSNE(n_components=1)
    X = random_state.randn(5, 2)
    X_embedded = tsne.fit_transform(X)
    assert(np.all(np.isfinite(X_embedded)))
开发者ID:HapeMask,项目名称:scikit-learn,代码行数:9,代码来源:test_t_sne.py

示例8: test_preserve_trustworthiness_approximately_with_precomputed_distances

# 需要导入模块: from sklearn.manifold.t_sne import TSNE [as 别名]
# 或者: from sklearn.manifold.t_sne.TSNE import fit_transform [as 别名]
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
    # Nearest neighbors should be preserved approximately.
    random_state = check_random_state(0)
    X = random_state.randn(100, 2)
    D = squareform(pdist(X), "sqeuclidean")
    tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0, metric="precomputed", random_state=0, verbose=0)
    X_embedded = tsne.fit_transform(D)
    assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1, precomputed=True), 1.0, decimal=1)
开发者ID:sofianehaddad,项目名称:scikit-learn,代码行数:10,代码来源:test_t_sne.py

示例9: test_early_exaggeration_used

# 需要导入模块: from sklearn.manifold.t_sne import TSNE [as 别名]
# 或者: from sklearn.manifold.t_sne.TSNE import fit_transform [as 别名]
def test_early_exaggeration_used():
    # check that the ``early_exaggeration`` parameter has an effect
    random_state = check_random_state(0)
    n_components = 2
    methods = ['exact', 'barnes_hut']
    X = random_state.randn(25, n_components).astype(np.float32)
    for method in methods:
        tsne = TSNE(n_components=n_components, perplexity=1,
                    learning_rate=100.0, init="pca", random_state=0,
                    method=method, early_exaggeration=1.0)
        X_embedded1 = tsne.fit_transform(X)
        tsne = TSNE(n_components=n_components, perplexity=1,
                    learning_rate=100.0, init="pca", random_state=0,
                    method=method, early_exaggeration=10.0)
        X_embedded2 = tsne.fit_transform(X)

        assert not np.allclose(X_embedded1, X_embedded2)
开发者ID:BasilBeirouti,项目名称:scikit-learn,代码行数:19,代码来源:test_t_sne.py

示例10: test_fit_csr_matrix

# 需要导入模块: from sklearn.manifold.t_sne import TSNE [as 别名]
# 或者: from sklearn.manifold.t_sne.TSNE import fit_transform [as 别名]
def test_fit_csr_matrix():
    # X can be a sparse matrix.
    random_state = check_random_state(0)
    X = random_state.randn(100, 2)
    X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
    X_csr = sp.csr_matrix(X)
    tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0, random_state=0, method="exact")
    X_embedded = tsne.fit_transform(X_csr)
    assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0, decimal=1)
开发者ID:sofianehaddad,项目名称:scikit-learn,代码行数:11,代码来源:test_t_sne.py

示例11: test_preserve_trustworthiness_approximately

# 需要导入模块: from sklearn.manifold.t_sne import TSNE [as 别名]
# 或者: from sklearn.manifold.t_sne.TSNE import fit_transform [as 别名]
def test_preserve_trustworthiness_approximately():
    """Nearest neighbors should be preserved approximately."""
    random_state = check_random_state(0)
    X = random_state.randn(100, 2)
    for init in ('random', 'pca'):
        tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
                    init=init, random_state=0)
        X_embedded = tsne.fit_transform(X)
        assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
                            decimal=1)
开发者ID:HapeMask,项目名称:scikit-learn,代码行数:12,代码来源:test_t_sne.py

示例12: test_n_iter_without_progress

# 需要导入模块: from sklearn.manifold.t_sne import TSNE [as 别名]
# 或者: from sklearn.manifold.t_sne.TSNE import fit_transform [as 别名]
def test_n_iter_without_progress():
    # Use a dummy negative n_iter_without_progress and check output on stdout
    random_state = check_random_state(0)
    X = random_state.randn(100, 2)
    tsne = TSNE(n_iter_without_progress=-1, verbose=2,
                random_state=1, method='exact')

    old_stdout = sys.stdout
    sys.stdout = StringIO()
    try:
        tsne.fit_transform(X)
    finally:
        out = sys.stdout.getvalue()
        sys.stdout.close()
        sys.stdout = old_stdout

    # The output needs to contain the value of n_iter_without_progress
    assert_in("did not make any progress during the "
              "last -1 episodes. Finished.", out)
开发者ID:AlexandreAbraham,项目名称:scikit-learn,代码行数:21,代码来源:test_t_sne.py

示例13: test_n_iter_without_progress

# 需要导入模块: from sklearn.manifold.t_sne import TSNE [as 别名]
# 或者: from sklearn.manifold.t_sne.TSNE import fit_transform [as 别名]
def test_n_iter_without_progress():
    # Make sure that the parameter n_iter_without_progress is used correctly
    random_state = check_random_state(0)
    X = random_state.randn(100, 2)
    tsne = TSNE(n_iter_without_progress=2, verbose=2,
                random_state=0, method='exact')

    old_stdout = sys.stdout
    sys.stdout = StringIO()
    try:
        tsne.fit_transform(X)
    finally:
        out = sys.stdout.getvalue()
        sys.stdout.close()
        sys.stdout = old_stdout

    # The output needs to contain the value of n_iter_without_progress
    assert_in("did not make any progress during the "
              "last 2 episodes. Finished.", out)
开发者ID:ManrajGrover,项目名称:scikit-learn,代码行数:21,代码来源:test_t_sne.py

示例14: test_verbose

# 需要导入模块: from sklearn.manifold.t_sne import TSNE [as 别名]
# 或者: from sklearn.manifold.t_sne.TSNE import fit_transform [as 别名]
def test_verbose():
    # Verbose options write to stdout.
    random_state = check_random_state(0)
    tsne = TSNE(verbose=2)
    X = random_state.randn(5, 2)

    old_stdout = sys.stdout
    sys.stdout = StringIO()
    try:
        tsne.fit_transform(X)
    finally:
        out = sys.stdout.getvalue()
        sys.stdout.close()
        sys.stdout = old_stdout

    assert("[t-SNE]" in out)
    assert("nearest neighbors..." in out)
    assert("Computed conditional probabilities" in out)
    assert("Mean sigma" in out)
    assert("early exaggeration" in out)
开发者ID:BasilBeirouti,项目名称:scikit-learn,代码行数:22,代码来源:test_t_sne.py

示例15: test_min_grad_norm

# 需要导入模块: from sklearn.manifold.t_sne import TSNE [as 别名]
# 或者: from sklearn.manifold.t_sne.TSNE import fit_transform [as 别名]
def test_min_grad_norm():
    # Make sure that the parameter min_grad_norm is used correctly
    random_state = check_random_state(0)
    X = random_state.randn(100, 2)
    min_grad_norm = 0.002
    tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2,
                random_state=0, method='exact')

    old_stdout = sys.stdout
    sys.stdout = StringIO()
    try:
        tsne.fit_transform(X)
    finally:
        out = sys.stdout.getvalue()
        sys.stdout.close()
        sys.stdout = old_stdout

    lines_out = out.split('\n')

    # extract the gradient norm from the verbose output
    gradient_norm_values = []
    for line in lines_out:
        # When the computation is Finished just an old gradient norm value
        # is repeated that we do not need to store
        if 'Finished' in line:
            break

        start_grad_norm = line.find('gradient norm')
        if start_grad_norm >= 0:
            line = line[start_grad_norm:]
            line = line.replace('gradient norm = ', '').split(' ')[0]
            gradient_norm_values.append(float(line))

    # Compute how often the gradient norm is smaller than min_grad_norm
    gradient_norm_values = np.array(gradient_norm_values)
    n_smaller_gradient_norms = \
        len(gradient_norm_values[gradient_norm_values <= min_grad_norm])

    # The gradient norm can be smaller than min_grad_norm at most once,
    # because in the moment it becomes smaller the optimization stops
    assert_less_equal(n_smaller_gradient_norms, 1)
开发者ID:BasilBeirouti,项目名称:scikit-learn,代码行数:43,代码来源:test_t_sne.py


注:本文中的sklearn.manifold.t_sne.TSNE.fit_transform方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。