当前位置: 首页>>代码示例>>Python>>正文


Python decomposition.fastica方法代码示例

本文整理汇总了Python中sklearn.decomposition.fastica方法的典型用法代码示例。如果您正苦于以下问题:Python decomposition.fastica方法的具体用法?Python decomposition.fastica怎么用?Python decomposition.fastica使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.decomposition的用法示例。


在下文中一共展示了decomposition.fastica方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_fastica_errors

# 需要导入模块: from sklearn import decomposition [as 别名]
# 或者: from sklearn.decomposition import fastica [as 别名]
def test_fastica_errors():
    n_features = 3
    n_samples = 10
    rng = np.random.RandomState(0)
    X = rng.random_sample((n_samples, n_features))
    w_init = rng.randn(n_features + 1, n_features + 1)
    assert_raises_regex(ValueError, 'max_iter should be greater than 1',
                        FastICA, max_iter=0)
    assert_raises_regex(ValueError, r'alpha must be in \[1,2\]',
                        fastica, X, fun_args={'alpha': 0})
    assert_raises_regex(ValueError, 'w_init has invalid shape.+'
                        r'should be \(3L?, 3L?\)',
                        fastica, X, w_init=w_init)
    assert_raises_regex(ValueError,
                        'Invalid algorithm.+must be.+parallel.+or.+deflation',
                        fastica, X, algorithm='pizza') 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:18,代码来源:test_fastica.py

示例2: test_non_square_fastica

# 需要导入模块: from sklearn import decomposition [as 别名]
# 或者: from sklearn.decomposition import fastica [as 别名]
def test_non_square_fastica(add_noise=False):
    # Test the FastICA algorithm on very simple data.
    rng = np.random.RandomState(0)

    n_samples = 1000
    # Generate two sources:
    t = np.linspace(0, 100, n_samples)
    s1 = np.sin(t)
    s2 = np.ceil(np.sin(np.pi * t))
    s = np.c_[s1, s2].T
    center_and_norm(s)
    s1, s2 = s

    # Mixing matrix
    mixing = rng.randn(6, 2)
    m = np.dot(mixing, s)

    if add_noise:
        m += 0.1 * rng.randn(6, n_samples)

    center_and_norm(m)

    k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
    s_ = s_.T

    # Check that the mixing model described in the docstring holds:
    assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))

    center_and_norm(s_)
    s1_, s2_ = s_
    # Check to see if the sources have been estimated
    # in the wrong order
    if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
        s2_, s1_ = s_
    s1_ *= np.sign(np.dot(s1_, s1))
    s2_ *= np.sign(np.dot(s2_, s2))

    # Check that we have estimated the original sources
    if not add_noise:
        assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
        assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:43,代码来源:test_fastica.py

示例3: test_fastica

# 需要导入模块: from sklearn import decomposition [as 别名]
# 或者: from sklearn.decomposition import fastica [as 别名]
def test_fastica(self):
        iris = datasets.load_iris()
        df = pdml.ModelFrame(iris)

        result = df.decomposition.fastica(random_state=self.random_state)
        expected = decomposition.fastica(iris.data,
                                         random_state=self.random_state)

        self.assertEqual(len(result), 3)
        self.assertIsInstance(result[0], pdml.ModelFrame)
        tm.assert_index_equal(result[0].index, df.data.columns)
        self.assert_numpy_array_almost_equal(result[0].values, expected[0])

        self.assertIsInstance(result[1], pdml.ModelFrame)
        self.assert_numpy_array_almost_equal(result[1].values, expected[1])

        self.assertIsInstance(result[2], pdml.ModelFrame)
        tm.assert_index_equal(result[2].index, df.index)
        self.assert_numpy_array_almost_equal(result[2].values, expected[2])

        result = df.decomposition.fastica(return_X_mean=True,
                                          random_state=self.random_state)
        expected = decomposition.fastica(iris.data, return_X_mean=True,
                                         random_state=self.random_state)

        self.assertEqual(len(result), 4)
        self.assertIsInstance(result[0], pdml.ModelFrame)
        tm.assert_index_equal(result[0].index, df.data.columns)
        self.assert_numpy_array_almost_equal(result[0].values, expected[0])

        self.assertIsInstance(result[1], pdml.ModelFrame)
        self.assert_numpy_array_almost_equal(result[1].values, expected[1])

        self.assertIsInstance(result[2], pdml.ModelFrame)
        tm.assert_index_equal(result[2].index, df.index)
        self.assert_numpy_array_almost_equal(result[2].values, expected[2])

        self.assert_numpy_array_almost_equal(result[3], expected[3]) 
开发者ID:pandas-ml,项目名称:pandas-ml,代码行数:40,代码来源:test_decomposition.py

示例4: _discover_structure

# 需要导入模块: from sklearn import decomposition [as 别名]
# 或者: from sklearn.decomposition import fastica [as 别名]
def _discover_structure(data):

        # Add a random noise uniformly distributed to avoid singularity
        # when performing the ICA
        data += np.random.random_sample(data.shape)

        # Create the ICA node to get the inverse of the mixing matrix
        k, w, _ = decomposition.fastica(data)

        w = np.dot(w, k)
        n = w.shape[0]
        best_nzd = float("inf")
        best_slt = float("inf")
        best_w_permuted = w
        causality_matrix = None
        causal_perm = None

        if n < 9:
            perm = LiNGAM._perms(n)

            for i in range(perm.shape[1]):
                perm_matrix = np.eye(n)
                perm_matrix = perm_matrix[:, perm[:, i]]
                w_permuted = perm_matrix.dot(w)
                cost = LiNGAM._cost_non_zero_diag(w_permuted)
                if cost < best_nzd:
                    best_nzd = cost
                    best_w_permuted = w_permuted

            w_opt = best_w_permuted

            w_opt = w_opt / np.diag(w_opt).reshape((n, 1))
            b_matrix = np.eye(n) - w_opt
            best_b_permuted = b_matrix
            best_i = 0

            for i in range(perm.shape[1]):
                b_permuted = b_matrix[:, perm[:, i]][perm[:, i], :]
                cost = LiNGAM._cost_strictly_lower_triangular(
                    b_permuted)
                if cost < best_slt:
                    best_slt = cost
                    best_i = i
                    best_b_permuted = b_permuted

            causal_perm = perm[:, best_i]
            causality_matrix = b_matrix

            percent_upper = best_slt / np.sum(best_b_permuted ** 2)

            if percent_upper > 0.2:
                # TODO(David): Change that code to raise an exception instead
                logger.error("LiNGAM failed to run on the data set")
                logger.error(
                    "--> B permuted matrix is at best {}% lower triangular"
                    .format(percent_upper))

        return causality_matrix, causal_perm 
开发者ID:openstack,项目名称:monasca-analytics,代码行数:60,代码来源:lingam.py


注:本文中的sklearn.decomposition.fastica方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。