當前位置: 首頁>>代碼示例>>Python>>正文


Python decomposition.SparsePCA方法代碼示例

本文整理匯總了Python中sklearn.decomposition.SparsePCA方法的典型用法代碼示例。如果您正苦於以下問題:Python decomposition.SparsePCA方法的具體用法?Python decomposition.SparsePCA怎麽用?Python decomposition.SparsePCA使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sklearn.decomposition的用法示例。


在下文中一共展示了decomposition.SparsePCA方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: generate_toy_data

# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import SparsePCA [as 別名]
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
    n_features = image_size[0] * image_size[1]

    rng = check_random_state(random_state)
    U = rng.randn(n_samples, n_components)
    V = rng.randn(n_components, n_features)

    centers = [(3, 3), (6, 7), (8, 1)]
    sz = [1, 2, 1]
    for k in range(n_components):
        img = np.zeros(image_size)
        xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
        ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
        img[xmin:xmax][:, ymin:ymax] = 1.0
        V[k, :] = img.ravel()

    # Y is defined by : Y = UV + noise
    Y = np.dot(U, V)
    Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1])  # Add noise
    return Y, U, V

# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:25,代碼來源:test_sparse_pca.py

示例2: test_pca_vs_spca

# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import SparsePCA [as 別名]
def test_pca_vs_spca():
    rng = np.random.RandomState(0)
    Y, _, _ = generate_toy_data(3, 1000, (8, 8), random_state=rng)
    Z, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng)
    spca = SparsePCA(alpha=0, ridge_alpha=0, n_components=2,
                     normalize_components=True)
    pca = PCA(n_components=2)
    pca.fit(Y)
    spca.fit(Y)
    results_test_pca = pca.transform(Z)
    results_test_spca = spca.transform(Z)
    assert_allclose(np.abs(spca.components_.dot(pca.components_.T)),
                    np.eye(2), atol=1e-5)
    results_test_pca *= np.sign(results_test_pca[0, :])
    results_test_spca *= np.sign(results_test_spca[0, :])
    assert_allclose(results_test_pca, results_test_spca) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:18,代碼來源:test_sparse_pca.py

示例3: dim_reduction_method

# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import SparsePCA [as 別名]
def dim_reduction_method(self):
        """
        select dimensionality reduction method
        """
        if self.dim_reduction=='pca':
            return PCA()
        elif self.dim_reduction=='factor-analysis':
            return FactorAnalysis()
        elif self.dim_reduction=='fast-ica':
            return FastICA()
        elif self.dim_reduction=='kernel-pca':
            return KernelPCA()
        elif self.dim_reduction=='sparse-pca':
            return SparsePCA()
        elif self.dim_reduction=='truncated-svd':
            return TruncatedSVD()
        elif self.dim_reduction!=None:
            raise ValueError('%s is not a supported dimensionality reduction method. Valid inputs are: \
                             "pca","factor-analysis","fast-ica,"kernel-pca","sparse-pca","truncated-svd".' 
                             %(self.dim_reduction)) 
開發者ID:arnaudvl,項目名稱:ml-parameter-optimization,代碼行數:22,代碼來源:ml_tune.py

示例4: test_objectmapper

# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import SparsePCA [as 別名]
def test_objectmapper(self):
        df = pdml.ModelFrame([])
        self.assertIs(df.decomposition.PCA, decomposition.PCA)
        self.assertIs(df.decomposition.IncrementalPCA,
                      decomposition.IncrementalPCA)
        self.assertIs(df.decomposition.KernelPCA, decomposition.KernelPCA)
        self.assertIs(df.decomposition.FactorAnalysis,
                      decomposition.FactorAnalysis)
        self.assertIs(df.decomposition.FastICA, decomposition.FastICA)
        self.assertIs(df.decomposition.TruncatedSVD, decomposition.TruncatedSVD)
        self.assertIs(df.decomposition.NMF, decomposition.NMF)
        self.assertIs(df.decomposition.SparsePCA, decomposition.SparsePCA)
        self.assertIs(df.decomposition.MiniBatchSparsePCA,
                      decomposition.MiniBatchSparsePCA)
        self.assertIs(df.decomposition.SparseCoder, decomposition.SparseCoder)
        self.assertIs(df.decomposition.DictionaryLearning,
                      decomposition.DictionaryLearning)
        self.assertIs(df.decomposition.MiniBatchDictionaryLearning,
                      decomposition.MiniBatchDictionaryLearning)

        self.assertIs(df.decomposition.LatentDirichletAllocation,
                      decomposition.LatentDirichletAllocation) 
開發者ID:pandas-ml,項目名稱:pandas-ml,代碼行數:24,代碼來源:test_decomposition.py

示例5: test_fit_transform

# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import SparsePCA [as 別名]
def test_fit_transform():
    alpha = 1
    rng = np.random.RandomState(0)
    Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng)  # wide array
    spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
                          random_state=0)
    spca_lars.fit(Y)

    # Test that CD gives similar results
    spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
                           alpha=alpha)
    spca_lasso.fit(Y)
    assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)

    # Test that deprecated ridge_alpha parameter throws warning
    warning_msg = "The ridge_alpha parameter on transform()"
    assert_warns_message(DeprecationWarning, warning_msg, spca_lars.transform,
                         Y, ridge_alpha=0.01)
    assert_warns_message(DeprecationWarning, warning_msg, spca_lars.transform,
                         Y, ridge_alpha=None) 
開發者ID:alvarobartt,項目名稱:twitter-stock-recommendation,代碼行數:22,代碼來源:test_sparse_pca.py

示例6: test_correct_shapes

# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import SparsePCA [as 別名]
def test_correct_shapes(norm_comp):
    rng = np.random.RandomState(0)
    X = rng.randn(12, 10)
    spca = SparsePCA(n_components=8, random_state=rng,
                     normalize_components=norm_comp)
    U = spca.fit_transform(X)
    assert_equal(spca.components_.shape, (8, 10))
    assert_equal(U.shape, (12, 8))
    # test overcomplete decomposition
    spca = SparsePCA(n_components=13, random_state=rng,
                     normalize_components=norm_comp)
    U = spca.fit_transform(X)
    assert_equal(spca.components_.shape, (13, 10))
    assert_equal(U.shape, (12, 13)) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:16,代碼來源:test_sparse_pca.py

示例7: test_fit_transform

# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import SparsePCA [as 別名]
def test_fit_transform(norm_comp):
    alpha = 1
    rng = np.random.RandomState(0)
    Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng)  # wide array
    spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
                          random_state=0, normalize_components=norm_comp)
    spca_lars.fit(Y)

    # Test that CD gives similar results
    spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
                           alpha=alpha, normalize_components=norm_comp)
    spca_lasso.fit(Y)
    assert_array_almost_equal(spca_lasso.components_, spca_lars.components_) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:15,代碼來源:test_sparse_pca.py

示例8: test_fit_transform_parallel

# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import SparsePCA [as 別名]
def test_fit_transform_parallel(norm_comp):
    alpha = 1
    rng = np.random.RandomState(0)
    Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng)  # wide array
    spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
                          random_state=0, normalize_components=norm_comp)
    spca_lars.fit(Y)
    U1 = spca_lars.transform(Y)
    # Test multiple CPUs
    spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
                     random_state=0, normalize_components=norm_comp).fit(Y)
    U2 = spca.transform(Y)
    assert not np.all(spca_lars.components_ == 0)
    assert_array_almost_equal(U1, U2) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:16,代碼來源:test_sparse_pca.py

示例9: test_transform_nan

# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import SparsePCA [as 別名]
def test_transform_nan(norm_comp):
    # Test that SparsePCA won't return NaN when there is 0 feature in all
    # samples.
    rng = np.random.RandomState(0)
    Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng)  # wide array
    Y[:, 0] = 0
    estimator = SparsePCA(n_components=8, normalize_components=norm_comp)
    assert not np.any(np.isnan(estimator.fit_transform(Y))) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:10,代碼來源:test_sparse_pca.py

示例10: test_initialization

# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import SparsePCA [as 別名]
def test_initialization(norm_comp):
    rng = np.random.RandomState(0)
    U_init = rng.randn(5, 3)
    V_init = rng.randn(3, 4)
    model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
                      random_state=rng, normalize_components=norm_comp)
    model.fit(rng.randn(5, 4))
    if norm_comp:
        assert_allclose(model.components_,
                        V_init / np.linalg.norm(V_init, axis=1)[:, None])
    else:
        assert_allclose(model.components_, V_init) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:14,代碼來源:test_sparse_pca.py

示例11: test_scaling_fit_transform

# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import SparsePCA [as 別名]
def test_scaling_fit_transform():
    alpha = 1
    rng = np.random.RandomState(0)
    Y, _, _ = generate_toy_data(3, 1000, (8, 8), random_state=rng)
    spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
                          random_state=rng, normalize_components=True)
    results_train = spca_lars.fit_transform(Y)
    results_test = spca_lars.transform(Y[:10])
    assert_allclose(results_train[0], results_test[0]) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:11,代碼來源:test_sparse_pca.py

示例12: spca

# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import SparsePCA [as 別名]
def spca(components, train_matrix, test_matrix):
    """Sparse principal component analysis routine.

    Parameters
    ----------
    components : int
        The number of components to be returned.
    train_matrix : array
        The training features.
    test_matrix : array
        The test features.

    Returns
    -------
    new_train : array
        Extracted training features.
    new_test : array
        Extracted test features.
    """
    msg = 'The number of components must be a positive int greater than 0.'
    assert components > 0, msg

    pca = SparsePCA(n_components=components)
    model = pca.fit(X=train_matrix)
    new_train = model.transform(train_matrix)
    new_test = model.transform(test_matrix)

    return new_train, new_test 
開發者ID:SUNCAT-Center,項目名稱:CatLearn,代碼行數:30,代碼來源:feature_extraction.py

示例13: test_correct_shapes

# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import SparsePCA [as 別名]
def test_correct_shapes():
    rng = np.random.RandomState(0)
    X = rng.randn(12, 10)
    spca = SparsePCA(n_components=8, random_state=rng)
    U = spca.fit_transform(X)
    assert_equal(spca.components_.shape, (8, 10))
    assert_equal(U.shape, (12, 8))
    # test overcomplete decomposition
    spca = SparsePCA(n_components=13, random_state=rng)
    U = spca.fit_transform(X)
    assert_equal(spca.components_.shape, (13, 10))
    assert_equal(U.shape, (12, 13)) 
開發者ID:alvarobartt,項目名稱:twitter-stock-recommendation,代碼行數:14,代碼來源:test_sparse_pca.py

示例14: test_fit_transform_parallel

# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import SparsePCA [as 別名]
def test_fit_transform_parallel():
    alpha = 1
    rng = np.random.RandomState(0)
    Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng)  # wide array
    spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
                          random_state=0)
    spca_lars.fit(Y)
    U1 = spca_lars.transform(Y)
    # Test multiple CPUs
    spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
                     random_state=0).fit(Y)
    U2 = spca.transform(Y)
    assert_true(not np.all(spca_lars.components_ == 0))
    assert_array_almost_equal(U1, U2) 
開發者ID:alvarobartt,項目名稱:twitter-stock-recommendation,代碼行數:16,代碼來源:test_sparse_pca.py

示例15: test_transform_nan

# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import SparsePCA [as 別名]
def test_transform_nan():
    # Test that SparsePCA won't return NaN when there is 0 feature in all
    # samples.
    rng = np.random.RandomState(0)
    Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng)  # wide array
    Y[:, 0] = 0
    estimator = SparsePCA(n_components=8)
    assert_false(np.any(np.isnan(estimator.fit_transform(Y)))) 
開發者ID:alvarobartt,項目名稱:twitter-stock-recommendation,代碼行數:10,代碼來源:test_sparse_pca.py


注:本文中的sklearn.decomposition.SparsePCA方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。