当前位置: 首页>>代码示例>>Python>>正文


Python RandomizedPCA.mean_方法代码示例

本文整理汇总了Python中sklearn.decomposition.RandomizedPCA.mean_方法的典型用法代码示例。如果您正苦于以下问题:Python RandomizedPCA.mean_方法的具体用法?Python RandomizedPCA.mean_怎么用?Python RandomizedPCA.mean_使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.decomposition.RandomizedPCA的用法示例。


在下文中一共展示了RandomizedPCA.mean_方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: whitening

# 需要导入模块: from sklearn.decomposition import RandomizedPCA [as 别名]
# 或者: from sklearn.decomposition.RandomizedPCA import mean_ [as 别名]
def whitening(data, npc=None, explainedVar=None):

    """
    routine to perform whitening prior to Infomax ICA application
    (whitening is based on Principal Component Analysis from the
    RandomizedPCA package from sklearn.decomposition)

        Parameters
        ----------
        X : data array [ntsl, nchan] for decomposition.
        npc : int | None
            The number of components used for PCA decomposition. If None, no
            dimension reduction will be applied and max_pca_components will equal
            the number of channels supplied on decomposing data.
            default: npc = None
        explainedVar : float | None
            Must be between 0 and 1. If float, the number of components
            selected matches the number of components with a cumulative
            explained variance of 'explainedVar'
            default: explainedVar = None

        Returns
        -------
        whitened_data : data array [nchan, ntsl] of decomposed sources
        ica : instance of ICA
            Returns the instance of ICA where all information about the
            PCA decomposition are updated.
        sel : array containing the indices of the selected ICs
            (depends on the variable npc)
    """

    # -------------------------------------------
    # import necessary modules
    # -------------------------------------------
    from sklearn.decomposition import RandomizedPCA


    # -------------------------------------------
    # check input data
    # -------------------------------------------
    ntsl, nchan = data.shape

    if (nchan < 2) or (ntsl < nchan):
        raise ValueError('Data size too small!')


    # -------------------------------------------
    # perform PCA decomposition
    # -------------------------------------------
    X = data.copy()
    whiten = False
    n_components = npc
    dmean = X.mean(axis=0)
    stddev = np.std(X, axis=0)
    X = (X - dmean[np.newaxis, :]) / stddev[np.newaxis, :]

    pca = RandomizedPCA(n_components=n_components, whiten=whiten,
                        copy=True)

    # -------------------------------------------
    # perform whitening
    # -------------------------------------------
    whitened_data = pca.fit_transform(X)


    # -------------------------------------------
    # update PCA structure
    # -------------------------------------------
    pca.mean_ = dmean
    pca.stddev_ = stddev

    # -------------------------------------------
    # check explained variance
    # -------------------------------------------
    if explainedVar:
        # compute explained variance manually
        explained_variance_ratio_ = pca.explained_variance_
        explained_variance_ratio_ /= explained_variance_ratio_.sum()
        npc = np.sum(explained_variance_ratio_.cumsum() <= explainedVar)
    elif npc is None:
        npc = nchan

    # return results
    return whitened_data[:, :(npc+1)], pca
开发者ID:VolkanChen,项目名称:jumeg,代码行数:86,代码来源:ica.py

示例2: whitening

# 需要导入模块: from sklearn.decomposition import RandomizedPCA [as 别名]
# 或者: from sklearn.decomposition.RandomizedPCA import mean_ [as 别名]
def whitening(data, dim_reduction='',
              npc=None, explainedVar=1.0):

    """
    routine to perform whitening prior to Infomax ICA application
    (whitening is based on Principal Component Analysis from the
    RandomizedPCA package from sklearn.decomposition)

        Parameters
        ----------
        X : data array [ntsl, nchan] for decomposition.
        dim_reduction : {'', 'AIC', 'BIC', 'GAP', 'MDL', 'MIBS', 'explVar'}
            Method for dimension selection. For further information about
            the methods please check the script 'dimension_selection.py'.
            default: dim_reduction='' --> no dimension reduction is performed as
                                          long as not the parameter 'npc' is set.
        npc : int | None
            The number of components used for PCA decomposition. If None, no
            dimension reduction will be applied and max_pca_components will equal
            the number of channels supplied on decomposing data. Only of interest
            when dim_reduction=''
            default: npc = None
        explainedVar : float | None
            Must be between 0 and 1. If float, the number of components
            selected matches the number of components with a cumulative
            explained variance of 'explainedVar'
            default: explainedVar = None

        Returns
        -------
        whitened_data : data array [nchan, ntsl] of decomposed sources
        ica : instance of ICA
            Returns the instance of ICA where all information about the
            PCA decomposition are updated.
        sel : array containing the indices of the selected ICs
            (depends on the variable npc)
    """

    # -------------------------------------------
    # import necessary modules
    # -------------------------------------------
    from sklearn.decomposition import RandomizedPCA
    import dimension_selection as dim_sel


    # -------------------------------------------
    # check input data
    # -------------------------------------------
    ntsl, nchan = data.shape

    if (nchan < 2) or (ntsl < nchan):
        raise ValueError('Data size too small!')


    # -------------------------------------------
    # perform PCA decomposition
    # -------------------------------------------
    X = data.copy()
    whiten = False
    dmean = X.mean(axis=0)
    stddev = np.std(X, axis=0)
    X = (X - dmean[np.newaxis, :]) / stddev[np.newaxis, :]

    pca = RandomizedPCA(n_components=None, whiten=whiten,
                        copy=True)

    # -------------------------------------------
    # perform whitening
    # -------------------------------------------
    whitened_data = pca.fit_transform(X)


    # -------------------------------------------
    # update PCA structure
    # -------------------------------------------
    pca.mean_ = dmean
    pca.stddev_ = stddev

    # -------------------------------------------
    # check dimension selection
    # -------------------------------------------
    if dim_reduction == 'AIC':
        npc, _ = dim_sel.aic_mdl(pca.explained_variance_)
    elif dim_reduction == 'BIC':
        npc = dim_sel.mibs(pca.explained_variance_, ntsl,
                           use_bic=True)
    elif dim_reduction == 'GAP':
        npc = dim_sel.gap(pca.explained_variance_)
    elif dim_reduction == 'MDL':
        _, npc = dim_sel.aic_mdl(pca.explained_variance_)
    elif dim_reduction == 'MIBS':
        npc = dim_sel.mibs(pca.explained_variance_, ntsl,
                           use_bic=False)
    elif dim_reduction == 'explVar':
        # compute explained variance manually
        explained_variance_ratio_ = pca.explained_variance_
        explained_variance_ratio_ /= explained_variance_ratio_.sum()
        npc = np.sum(explained_variance_ratio_.cumsum() <= explainedVar)
    elif npc is None:
        npc = nchan
#.........这里部分代码省略.........
开发者ID:d-van-de-velden,项目名称:jumeg,代码行数:103,代码来源:ica.py


注:本文中的sklearn.decomposition.RandomizedPCA.mean_方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。