本文整理汇总了Python中sklearn.decomposition.DictionaryLearning.fit_transform方法的典型用法代码示例。如果您正苦于以下问题:Python DictionaryLearning.fit_transform方法的具体用法?Python DictionaryLearning.fit_transform怎么用?Python DictionaryLearning.fit_transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.decomposition.DictionaryLearning
的用法示例。
在下文中一共展示了DictionaryLearning.fit_transform方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: enumerate
# 需要导入模块: from sklearn.decomposition import DictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.DictionaryLearning import fit_transform [as 别名]
pca.fit(mov)
#%%
import cv2
comps = np.reshape(pca.components_, [n_comps, 30, 30])
for count, comp in enumerate(comps):
pl.subplot(4, 4, count + 1)
blur = cv2.GaussianBlur(comp.astype(np.float32), (5, 5), 0)
blur = np.array(blur / np.max(blur) * 255, dtype=np.uint8)
ret3, th3 = cv2.threshold(
blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
pl.imshow((th3 * comp).T)
#%%
n_comps = 3
dl = DictionaryLearning(n_comps, alpha=1, verbose=True)
comps = dl.fit_transform(Yr.T)
comps = np.reshape(comps, [30, 30, n_comps]).transpose([2, 0, 1])
for count, comp in enumerate(comps):
pl.subplot(4, 4, count + 1)
pl.imshow(comp)
#%%
N_ICA_COMPS = 8
ica = FastICA(N_ICA_COMPS, max_iter=10000, tol=10e-8)
ica.fit(pca.components_)
#%
comps = np.reshape(ica.components_, [N_ICA_COMPS, 30, 30])
for count, comp in enumerate(comps):
idx = np.argmax(np.abs(comp))
comp = comp * np.sign(comp.flatten()[idx])
pl.subplot(4, 4, count + 1)
pl.imshow(comp.T)
示例2: SC
# 需要导入模块: from sklearn.decomposition import DictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.DictionaryLearning import fit_transform [as 别名]
class SC(object):
"""
Wrapper for sklearn package. Performs sparse coding
Sparse Coding, or Dictionary Learning has 5 methods:
- fit(waveforms)
update class instance with Sparse Coding fit
- fit_transform()
do what fit() does, but additionally return the projection onto new basis space
- inverse_transform(A)
inverses the decomposition, returns waveforms for an input A, using Z^\dagger
- get_basis()
returns the basis vectors Z^\dagger
- get_params()
returns metadata used for fits.
"""
def __init__(self, num_components=10,
catalog_name='unknown',
alpha = 0.001,
transform_alpha = 0.01,
max_iter = 2000,
tol = 1e-9,
n_jobs = 1,
verbose = True,
random_state = None):
self._decomposition = 'Sparse Coding'
self._num_components = num_components
self._catalog_name = catalog_name
self._alpha = alpha
self._transform_alpha = 0.001
self._n_jobs = n_jobs
self._random_state = random_state
self._DL = DictionaryLearning(n_components=self._num_components,
alpha = self._alpha,
transform_alpha = self._transform_alpha,
n_jobs = self._n_jobs,
verbose = verbose,
random_state = self._random_state)
def fit(self,waveforms):
# TODO make sure there are more columns than rows (transpose if not)
# normalize waveforms
self._waveforms = waveforms
self._DL.fit(self._waveforms)
def fit_transform(self,waveforms):
# TODO make sure there are more columns than rows (transpose if not)
# normalize waveforms
self._waveforms = waveforms
self._A = self._DL.fit_transform(self._waveforms)
return self._A
def inverse_transform(self,A):
# convert basis back to waveforms using fit
new_waveforms = self._DL.inverse_transform(A)
return new_waveforms
def get_params(self):
# TODO know what catalog was used! (include waveform metadata)
params = self._DL.get_params()
params['num_components'] = params.pop('n_components')
params['Decompositon'] = self._decomposition
return params
def get_basis(self):
""" Return the SPCA basis vectors (Z^\dagger)"""
return self._DL.components_
示例3: TruncatedSVD
# 需要导入模块: from sklearn.decomposition import DictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.DictionaryLearning import fit_transform [as 别名]
#array([-2.20719466, -3.16170819, -4.11622173])
tsvd = TruncatedSVD(2)
tsvd.fit(iris_data)
tsvd.transform(iris_data)
#One advantage of TruncatedSVD over PCA is that TruncatedSVD can operate on sparse
#matrices while PCA cannot
#Decomposition分解 to classify分类 with DictionaryLearning
from sklearn.decomposition import DictionaryLearning
dl = DictionaryLearning(3)
transformed = dl.fit_transform(iris_data[::2])
transformed[:5]
#array([[ 0. , 6.34476574, 0. ],
#[ 0. , 5.83576461, 0. ],
#[ 0. , 6.32038375, 0. ],
#[ 0. , 5.89318572, 0. ],
#[ 0. , 5.45222715, 0. ]])
#Next, let's fit (not fit_transform) the testing set:
transformed = dl.transform(iris_data[1::2])
#Putting it all together with Pipelines
#Let's briefly load the iris dataset and seed it with some missing values:
from sklearn.datasets import load_iris