本文整理汇总了Python中sklearn.decomposition.RandomizedPCA.stddev_方法的典型用法代码示例。如果您正苦于以下问题:Python RandomizedPCA.stddev_方法的具体用法?Python RandomizedPCA.stddev_怎么用?Python RandomizedPCA.stddev_使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.decomposition.RandomizedPCA
的用法示例。
在下文中一共展示了RandomizedPCA.stddev_方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: whitening
# 需要导入模块: from sklearn.decomposition import RandomizedPCA [as 别名]
# 或者: from sklearn.decomposition.RandomizedPCA import stddev_ [as 别名]
def whitening(data, npc=None, explainedVar=None):
"""
routine to perform whitening prior to Infomax ICA application
(whitening is based on Principal Component Analysis from the
RandomizedPCA package from sklearn.decomposition)
Parameters
----------
X : data array [ntsl, nchan] for decomposition.
npc : int | None
The number of components used for PCA decomposition. If None, no
dimension reduction will be applied and max_pca_components will equal
the number of channels supplied on decomposing data.
default: npc = None
explainedVar : float | None
Must be between 0 and 1. If float, the number of components
selected matches the number of components with a cumulative
explained variance of 'explainedVar'
default: explainedVar = None
Returns
-------
whitened_data : data array [nchan, ntsl] of decomposed sources
ica : instance of ICA
Returns the instance of ICA where all information about the
PCA decomposition are updated.
sel : array containing the indices of the selected ICs
(depends on the variable npc)
"""
# -------------------------------------------
# import necessary modules
# -------------------------------------------
from sklearn.decomposition import RandomizedPCA
# -------------------------------------------
# check input data
# -------------------------------------------
ntsl, nchan = data.shape
if (nchan < 2) or (ntsl < nchan):
raise ValueError('Data size too small!')
# -------------------------------------------
# perform PCA decomposition
# -------------------------------------------
X = data.copy()
whiten = False
n_components = npc
dmean = X.mean(axis=0)
stddev = np.std(X, axis=0)
X = (X - dmean[np.newaxis, :]) / stddev[np.newaxis, :]
pca = RandomizedPCA(n_components=n_components, whiten=whiten,
copy=True)
# -------------------------------------------
# perform whitening
# -------------------------------------------
whitened_data = pca.fit_transform(X)
# -------------------------------------------
# update PCA structure
# -------------------------------------------
pca.mean_ = dmean
pca.stddev_ = stddev
# -------------------------------------------
# check explained variance
# -------------------------------------------
if explainedVar:
# compute explained variance manually
explained_variance_ratio_ = pca.explained_variance_
explained_variance_ratio_ /= explained_variance_ratio_.sum()
npc = np.sum(explained_variance_ratio_.cumsum() <= explainedVar)
elif npc is None:
npc = nchan
# return results
return whitened_data[:, :(npc+1)], pca
示例2: whitening
# 需要导入模块: from sklearn.decomposition import RandomizedPCA [as 别名]
# 或者: from sklearn.decomposition.RandomizedPCA import stddev_ [as 别名]
def whitening(data, dim_reduction='',
npc=None, explainedVar=1.0):
"""
routine to perform whitening prior to Infomax ICA application
(whitening is based on Principal Component Analysis from the
RandomizedPCA package from sklearn.decomposition)
Parameters
----------
X : data array [ntsl, nchan] for decomposition.
dim_reduction : {'', 'AIC', 'BIC', 'GAP', 'MDL', 'MIBS', 'explVar'}
Method for dimension selection. For further information about
the methods please check the script 'dimension_selection.py'.
default: dim_reduction='' --> no dimension reduction is performed as
long as not the parameter 'npc' is set.
npc : int | None
The number of components used for PCA decomposition. If None, no
dimension reduction will be applied and max_pca_components will equal
the number of channels supplied on decomposing data. Only of interest
when dim_reduction=''
default: npc = None
explainedVar : float | None
Must be between 0 and 1. If float, the number of components
selected matches the number of components with a cumulative
explained variance of 'explainedVar'
default: explainedVar = None
Returns
-------
whitened_data : data array [nchan, ntsl] of decomposed sources
ica : instance of ICA
Returns the instance of ICA where all information about the
PCA decomposition are updated.
sel : array containing the indices of the selected ICs
(depends on the variable npc)
"""
# -------------------------------------------
# import necessary modules
# -------------------------------------------
from sklearn.decomposition import RandomizedPCA
import dimension_selection as dim_sel
# -------------------------------------------
# check input data
# -------------------------------------------
ntsl, nchan = data.shape
if (nchan < 2) or (ntsl < nchan):
raise ValueError('Data size too small!')
# -------------------------------------------
# perform PCA decomposition
# -------------------------------------------
X = data.copy()
whiten = False
dmean = X.mean(axis=0)
stddev = np.std(X, axis=0)
X = (X - dmean[np.newaxis, :]) / stddev[np.newaxis, :]
pca = RandomizedPCA(n_components=None, whiten=whiten,
copy=True)
# -------------------------------------------
# perform whitening
# -------------------------------------------
whitened_data = pca.fit_transform(X)
# -------------------------------------------
# update PCA structure
# -------------------------------------------
pca.mean_ = dmean
pca.stddev_ = stddev
# -------------------------------------------
# check dimension selection
# -------------------------------------------
if dim_reduction == 'AIC':
npc, _ = dim_sel.aic_mdl(pca.explained_variance_)
elif dim_reduction == 'BIC':
npc = dim_sel.mibs(pca.explained_variance_, ntsl,
use_bic=True)
elif dim_reduction == 'GAP':
npc = dim_sel.gap(pca.explained_variance_)
elif dim_reduction == 'MDL':
_, npc = dim_sel.aic_mdl(pca.explained_variance_)
elif dim_reduction == 'MIBS':
npc = dim_sel.mibs(pca.explained_variance_, ntsl,
use_bic=False)
elif dim_reduction == 'explVar':
# compute explained variance manually
explained_variance_ratio_ = pca.explained_variance_
explained_variance_ratio_ /= explained_variance_ratio_.sum()
npc = np.sum(explained_variance_ratio_.cumsum() <= explainedVar)
elif npc is None:
npc = nchan
#.........这里部分代码省略.........