本文整理汇总了Python中sklearn.decomposition.IncrementalPCA.R方法的典型用法代码示例。如果您正苦于以下问题:Python IncrementalPCA.R方法的具体用法?Python IncrementalPCA.R怎么用?Python IncrementalPCA.R使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.decomposition.IncrementalPCA
的用法示例。
在下文中一共展示了IncrementalPCA.R方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: decomposition
# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import R [as 别名]
def decomposition(self,
output_dimension,
normalize_poissonian_noise=False,
algorithm='PCA',
signal_mask=None,
navigation_mask=None,
get=threaded.get,
num_chunks=None,
reproject=True,
bounds=True,
**kwargs):
"""Perform Incremental (Batch) decomposition on the data, keeping n
significant components.
Parameters
----------
output_dimension : int
the number of significant components to keep
normalize_poissonian_noise : bool
If True, scale the SI to normalize Poissonian noise
algorithm : str
One of ('PCA', 'ORPCA', 'ONMF'). By default ('PCA') IncrementalPCA
from scikit-learn is run.
get : dask scheduler
the dask scheduler to use for computations;
default `dask.threaded.get`
num_chunks : int
the number of dask chunks to pass to the decomposition model.
More chunks require more memory, but should run faster. Will be
increased to contain atleast output_dimension signals.
navigation_mask : {BaseSignal, numpy array, dask array}
The navigation locations marked as True are not used in the
decompostion.
signal_mask : {BaseSignal, numpy array, dask array}
The signal locations marked as True are not used in the
decomposition.
reproject : bool
Reproject data on the learnt components (factors) after learning.
bounds : {tuple, bool}
The (min, max) values of the data to normalize before learning.
If tuple (min, max), those values will be used for normalization.
If True, extremes will be looked up (expensive), default.
If False, no normalization is done (learning may be very slow).
If normalize_poissonian_noise is True, this cannot be True.
**kwargs
passed to the partial_fit/fit functions.
Notes
-----
Various algorithm parameters and their default values:
ONMF:
lambda1=1,
kappa=1,
robust=False,
store_r=False
batch_size=None
ORPCA:
fast=True,
lambda1=None,
lambda2=None,
method=None,
learning_rate=None,
init=None,
training_samples=None,
momentum=None
PCA:
batch_size=None,
copy=True,
white=False
"""
explained_variance = None
explained_variance_ratio = None
_al_data = self._data_aligned_with_axes
nav_chunks = _al_data.chunks[:self.axes_manager.navigation_dimension]
sig_chunks = _al_data.chunks[self.axes_manager.navigation_dimension:]
num_chunks = 1 if num_chunks is None else num_chunks
blocksize = np.min([multiply(ar) for ar in product(*nav_chunks)])
nblocks = multiply([len(c) for c in nav_chunks])
if blocksize / output_dimension < num_chunks:
num_chunks = np.ceil(blocksize / output_dimension)
blocksize *= num_chunks
## LEARN
if algorithm == 'PCA':
from sklearn.decomposition import IncrementalPCA
obj = IncrementalPCA(n_components=output_dimension)
method = partial(obj.partial_fit, **kwargs)
reproject = True
elif algorithm == 'ORPCA':
from hyperspy.learn.rpca import ORPCA
kwg = {'fast': True}
kwg.update(kwargs)
obj = ORPCA(output_dimension, **kwg)
method = partial(obj.fit, iterating=True)
elif algorithm == 'ONMF':
#.........这里部分代码省略.........
示例2: decomposition
# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import R [as 别名]
def decomposition(self,
normalize_poissonian_noise=False,
algorithm='svd',
output_dimension=None,
signal_mask=None,
navigation_mask=None,
get=threaded.get,
num_chunks=None,
reproject=True,
bounds=False,
**kwargs):
"""Perform Incremental (Batch) decomposition on the data, keeping n
significant components.
Parameters
----------
normalize_poissonian_noise : bool
If True, scale the SI to normalize Poissonian noise
algorithm : str
One of ('svd', 'PCA', 'ORPCA', 'ONMF'). By default 'svd',
lazy SVD decomposition from dask.
output_dimension : int
the number of significant components to keep. If None, keep all
(only valid for SVD)
get : dask scheduler
the dask scheduler to use for computations;
default `dask.threaded.get`
num_chunks : int
the number of dask chunks to pass to the decomposition model.
More chunks require more memory, but should run faster. Will be
increased to contain atleast output_dimension signals.
navigation_mask : {BaseSignal, numpy array, dask array}
The navigation locations marked as True are not used in the
decompostion.
signal_mask : {BaseSignal, numpy array, dask array}
The signal locations marked as True are not used in the
decomposition.
reproject : bool
Reproject data on the learnt components (factors) after learning.
**kwargs
passed to the partial_fit/fit functions.
Notes
-----
Various algorithm parameters and their default values:
ONMF:
lambda1=1,
kappa=1,
robust=False,
store_r=False
batch_size=None
ORPCA:
fast=True,
lambda1=None,
lambda2=None,
method=None,
learning_rate=None,
init=None,
training_samples=None,
momentum=None
PCA:
batch_size=None,
copy=True,
white=False
"""
if bounds:
msg = (
"The `bounds` keyword is deprecated and will be removed "
"in v2.0. Since version > 1.3 this has no effect.")
warnings.warn(msg, VisibleDeprecationWarning)
explained_variance = None
explained_variance_ratio = None
_al_data = self._data_aligned_with_axes
nav_chunks = _al_data.chunks[:self.axes_manager.navigation_dimension]
sig_chunks = _al_data.chunks[self.axes_manager.navigation_dimension:]
num_chunks = 1 if num_chunks is None else num_chunks
blocksize = np.min([multiply(ar) for ar in product(*nav_chunks)])
nblocks = multiply([len(c) for c in nav_chunks])
if algorithm != "svd" and output_dimension is None:
raise ValueError("With the %s the output_dimension "
"must be specified" % algorithm)
if output_dimension and blocksize / output_dimension < num_chunks:
num_chunks = np.ceil(blocksize / output_dimension)
blocksize *= num_chunks
# LEARN
if algorithm == 'PCA':
from sklearn.decomposition import IncrementalPCA
obj = IncrementalPCA(n_components=output_dimension)
method = partial(obj.partial_fit, **kwargs)
reproject = True
elif algorithm == 'ORPCA':
from hyperspy.learn.rpca import ORPCA
kwg = {'fast': True}
kwg.update(kwargs)
obj = ORPCA(output_dimension, **kwg)
method = partial(obj.fit, iterating=True)
#.........这里部分代码省略.........