當前位置: 首頁>>代碼示例>>Python>>正文


Python decomposition.fastica方法代碼示例

本文整理匯總了Python中sklearn.decomposition.fastica方法的典型用法代碼示例。如果您正苦於以下問題:Python decomposition.fastica方法的具體用法?Python decomposition.fastica怎麽用?Python decomposition.fastica使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sklearn.decomposition的用法示例。


在下文中一共展示了decomposition.fastica方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_fastica_errors

# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import fastica [as 別名]
def test_fastica_errors():
    n_features = 3
    n_samples = 10
    rng = np.random.RandomState(0)
    X = rng.random_sample((n_samples, n_features))
    w_init = rng.randn(n_features + 1, n_features + 1)
    assert_raises_regex(ValueError, 'max_iter should be greater than 1',
                        FastICA, max_iter=0)
    assert_raises_regex(ValueError, r'alpha must be in \[1,2\]',
                        fastica, X, fun_args={'alpha': 0})
    assert_raises_regex(ValueError, 'w_init has invalid shape.+'
                        r'should be \(3L?, 3L?\)',
                        fastica, X, w_init=w_init)
    assert_raises_regex(ValueError,
                        'Invalid algorithm.+must be.+parallel.+or.+deflation',
                        fastica, X, algorithm='pizza') 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:18,代碼來源:test_fastica.py

示例2: test_non_square_fastica

# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import fastica [as 別名]
def test_non_square_fastica(add_noise=False):
    # Test the FastICA algorithm on very simple data.
    rng = np.random.RandomState(0)

    n_samples = 1000
    # Generate two sources:
    t = np.linspace(0, 100, n_samples)
    s1 = np.sin(t)
    s2 = np.ceil(np.sin(np.pi * t))
    s = np.c_[s1, s2].T
    center_and_norm(s)
    s1, s2 = s

    # Mixing matrix
    mixing = rng.randn(6, 2)
    m = np.dot(mixing, s)

    if add_noise:
        m += 0.1 * rng.randn(6, n_samples)

    center_and_norm(m)

    k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
    s_ = s_.T

    # Check that the mixing model described in the docstring holds:
    assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))

    center_and_norm(s_)
    s1_, s2_ = s_
    # Check to see if the sources have been estimated
    # in the wrong order
    if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
        s2_, s1_ = s_
    s1_ *= np.sign(np.dot(s1_, s1))
    s2_ *= np.sign(np.dot(s2_, s2))

    # Check that we have estimated the original sources
    if not add_noise:
        assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
        assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:43,代碼來源:test_fastica.py

示例3: test_fastica

# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import fastica [as 別名]
def test_fastica(self):
        iris = datasets.load_iris()
        df = pdml.ModelFrame(iris)

        result = df.decomposition.fastica(random_state=self.random_state)
        expected = decomposition.fastica(iris.data,
                                         random_state=self.random_state)

        self.assertEqual(len(result), 3)
        self.assertIsInstance(result[0], pdml.ModelFrame)
        tm.assert_index_equal(result[0].index, df.data.columns)
        self.assert_numpy_array_almost_equal(result[0].values, expected[0])

        self.assertIsInstance(result[1], pdml.ModelFrame)
        self.assert_numpy_array_almost_equal(result[1].values, expected[1])

        self.assertIsInstance(result[2], pdml.ModelFrame)
        tm.assert_index_equal(result[2].index, df.index)
        self.assert_numpy_array_almost_equal(result[2].values, expected[2])

        result = df.decomposition.fastica(return_X_mean=True,
                                          random_state=self.random_state)
        expected = decomposition.fastica(iris.data, return_X_mean=True,
                                         random_state=self.random_state)

        self.assertEqual(len(result), 4)
        self.assertIsInstance(result[0], pdml.ModelFrame)
        tm.assert_index_equal(result[0].index, df.data.columns)
        self.assert_numpy_array_almost_equal(result[0].values, expected[0])

        self.assertIsInstance(result[1], pdml.ModelFrame)
        self.assert_numpy_array_almost_equal(result[1].values, expected[1])

        self.assertIsInstance(result[2], pdml.ModelFrame)
        tm.assert_index_equal(result[2].index, df.index)
        self.assert_numpy_array_almost_equal(result[2].values, expected[2])

        self.assert_numpy_array_almost_equal(result[3], expected[3]) 
開發者ID:pandas-ml,項目名稱:pandas-ml,代碼行數:40,代碼來源:test_decomposition.py

示例4: _discover_structure

# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import fastica [as 別名]
def _discover_structure(data):

        # Add a random noise uniformly distributed to avoid singularity
        # when performing the ICA
        data += np.random.random_sample(data.shape)

        # Create the ICA node to get the inverse of the mixing matrix
        k, w, _ = decomposition.fastica(data)

        w = np.dot(w, k)
        n = w.shape[0]
        best_nzd = float("inf")
        best_slt = float("inf")
        best_w_permuted = w
        causality_matrix = None
        causal_perm = None

        if n < 9:
            perm = LiNGAM._perms(n)

            for i in range(perm.shape[1]):
                perm_matrix = np.eye(n)
                perm_matrix = perm_matrix[:, perm[:, i]]
                w_permuted = perm_matrix.dot(w)
                cost = LiNGAM._cost_non_zero_diag(w_permuted)
                if cost < best_nzd:
                    best_nzd = cost
                    best_w_permuted = w_permuted

            w_opt = best_w_permuted

            w_opt = w_opt / np.diag(w_opt).reshape((n, 1))
            b_matrix = np.eye(n) - w_opt
            best_b_permuted = b_matrix
            best_i = 0

            for i in range(perm.shape[1]):
                b_permuted = b_matrix[:, perm[:, i]][perm[:, i], :]
                cost = LiNGAM._cost_strictly_lower_triangular(
                    b_permuted)
                if cost < best_slt:
                    best_slt = cost
                    best_i = i
                    best_b_permuted = b_permuted

            causal_perm = perm[:, best_i]
            causality_matrix = b_matrix

            percent_upper = best_slt / np.sum(best_b_permuted ** 2)

            if percent_upper > 0.2:
                # TODO(David): Change that code to raise an exception instead
                logger.error("LiNGAM failed to run on the data set")
                logger.error(
                    "--> B permuted matrix is at best {}% lower triangular"
                    .format(percent_upper))

        return causality_matrix, causal_perm 
開發者ID:openstack,項目名稱:monasca-analytics,代碼行數:60,代碼來源:lingam.py


注:本文中的sklearn.decomposition.fastica方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。