本文整理汇总了Python中sklearn.decomposition.MiniBatchDictionaryLearning.partial_fit方法的典型用法代码示例。如果您正苦于以下问题:Python MiniBatchDictionaryLearning.partial_fit方法的具体用法?Python MiniBatchDictionaryLearning.partial_fit怎么用?Python MiniBatchDictionaryLearning.partial_fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.decomposition.MiniBatchDictionaryLearning
的用法示例。
在下文中一共展示了MiniBatchDictionaryLearning.partial_fit方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_dict_learning_online_partial_fit
# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import partial_fit [as 别名]
def test_dict_learning_online_partial_fit():
# this test was not actually passing before!
raise SkipTest
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dico1 = MiniBatchDictionaryLearning(n_components, n_iter=10, batch_size=1,
shuffle=False, dict_init=V,
random_state=0).fit(X)
dico2 = MiniBatchDictionaryLearning(n_components, n_iter=1, dict_init=V,
random_state=0)
for ii, sample in enumerate(X):
dico2.partial_fit(sample, iter_offset=ii * dico2.n_iter)
# if ii == 1: break
assert_true(not np.all(sparse_encode(X, dico1.components_, alpha=100) ==
0))
assert_array_equal(dico1.components_, dico2.components_)
示例2: test_dict_learning_online_partial_fit
# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import partial_fit [as 别名]
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0)
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
示例3: fit
# 需要导入模块: from sklearn.decomposition import MiniBatchDictionaryLearning [as 别名]
# 或者: from sklearn.decomposition.MiniBatchDictionaryLearning import partial_fit [as 别名]
def fit(self, X, y, **dump_kwargs):
if self.debug_folder is not None:
self.dump_init()
X_ref = self.fm_decoder.fm_to_csr(X, y)
n_iter = X_ref.shape[0] * self.n_epochs // self.batch_size
random_state = check_random_state(self.random_state)
dict_init = random_state.randn(self.n_components, X_ref.shape[1])
dict_learning = MiniBatchDictionaryLearning(
n_components=self.n_components,
alpha=self.alpha,
transform_alpha=self.alpha,
fit_algorithm=self.algorithm,
transform_algorithm=self.algorithm,
dict_init=dict_init,
l1_ratio=self.l1_ratio,
batch_size=self.batch_size,
shuffle=True,
fit_intercept=self.fit_intercept,
n_iter=n_iter,
missing_values=0,
learning_rate=self.learning_rate,
learning_rate_offset=self.learning_rate_offset,
verbose=3,
debug_info=self.debug_folder is not None,
random_state=self.random_state)
if self.fit_intercept:
self.dictionary_ = np.r_[np.ones((1, dict_init.shape[1])),
dict_init]
self.code_ = np.zeros((X.shape[0], self.n_components + 1))
else:
self.dictionary_ = dict_init
self.code_ = np.zeros((X.shape[0], self.n_components))
if self.debug_folder is None:
(X_csr, self.global_mean_,
self.sample_mean_, self.feature_mean_) = csr_center_data(X_ref)
for i in range(self.n_epochs):
dict_learning.partial_fit(X_csr, deprecated=False)
if self.decreasing_batch_size:
dict_learning.set_params(batch_size=
dict_learning.batch_size // 2)
self.n_iter_ = dict_learning.n_iter_
self.dictionary_ = dict_learning.components_
self.code_ = dict_learning.transform(X_csr)
if self.debug_folder is not None:
(X_csr, self.global_mean_,
self.sample_mean_, self.feature_mean_) = csr_center_data(X_ref)
self.dump_inter(**dump_kwargs)
for i in range(self.n_epochs):
permutation = random_state.permutation(X_csr.shape[0])
batches = gen_batches(X_csr.shape[0],
X_csr.shape[0] // 5 + 1)
last_seen = 0
for batch in batches:
last_seen = max(batch.stop, last_seen)
dict_learning.partial_fit(X_csr[permutation[batch]],
deprecated=False)
self.dictionary_ = dict_learning.components_
self.code_[permutation[:last_seen]] = dict_learning.\
transform(X_csr[permutation[:last_seen]])
self.n_iter_ = dict_learning.n_iter_
self.dump_inter(debug_dict=dict_learning.debug_info_,
**dump_kwargs)
if self.decreasing_batch_size:
dict_learning.set_params(batch_size=
dict_learning.batch_size // 2)
self.dictionary_ = dict_learning.components_
self.code_ = dict_learning.transform(X_csr)
return self