本文整理汇总了Python中sklearn.decomposition.sparse_encode函数的典型用法代码示例。如果您正苦于以下问题:Python sparse_encode函数的具体用法?Python sparse_encode怎么用?Python sparse_encode使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sparse_encode函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_sparse_encode_input
def test_sparse_encode_input():
n_components = 100
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
Xf = check_array(X, order='F')
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
a = sparse_encode(X, V, algorithm=algo)
b = sparse_encode(Xf, V, algorithm=algo)
assert_array_almost_equal(a, b)
示例2: test_sparse_encode_shapes
def test_sparse_encode_shapes():
n_components = 12
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
示例3: test_sparse_encode_error
def test_sparse_encode_error():
n_components = 12
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
示例4: run
def run(dimension,raw_data_dir,out_dir):
with open('{}/filename.list'.format(raw_data_dir), 'r') as fp:
filenames = fp.read().splitlines()
sensor_data = list()
for filename in filenames:
path = '{}/{}'.format(raw_data_dir, filename)
with Timer('open {} with ALL sensors'.format(filename)):
#data = np.genfromtxt(path, usecols=range(1,49)
data = np.genfromtxt(path, usecols=[1, 4, 13, 16, 18, 26, 31, 32, 37, 38, 39, 40, 9, 11, 22, 23, 41, 10, 12, 24, 25, 29, 30, 42, 43, 44]
, delimiter=',').tolist()
print "# of data:", len(data)
sensor_data.extend(data)
with Timer('Sparse Coding...'):
print "# of ALL data as a whole:", len(sensor_data)
dl = sparse_coding(dimension, sensor_data,out_dir, 1, 10000, 0.00001)
with open('{}/atoms'.format(out_dir), "w") as op:
for component in dl.components_:
line = ', '.join(str(e) for e in component)
op.write( line + '\n')
code = sparse_encode(input_x, dl.components_)
with open('{}/codes'.format(out_dir), "w") as op:
for coefficient in code:
line = ', '.join(str(e) for e in coefficient)
op.write( line + '\n')
with open('{}/filename.list'.format(raw_data_dir), 'r') as fp:
filenames = fp.read().splitlines()
示例5: test_with_sparse_code
def test_with_sparse_code(components=np.loadtxt('components_of_convfeat.txt')):
(X_train, y_train), (X_test, y_test) = util.load_feat_vec()
X_train_codes = np.loadtxt('sparse_codes_of_convfeat.txt')
clf = LogisticRegression(penalty='l1', multi_class='ovr')
clf.fit(X_train_codes, y_train)
X_test_codes = sparse_encode(X_test, components)
print "mean accuracy", clf.score(X_test_codes, y_test)
示例6: test_sparse_encode_positivity
def test_sparse_encode_positivity(positive):
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'threshold'):
code = sparse_encode(X, V, algorithm=algo, positive=positive)
if positive:
assert_true((code >= 0).all())
else:
assert_true((code < 0).any())
try:
sparse_encode(X, V, algorithm='omp', positive=positive)
except ValueError:
if not positive:
raise
示例7: to_sparse
def to_sparse(X,dim):
sparse_dict = MiniBatchDictionaryLearning(dim)
sparse_dict.fit(X)
sparse_vectors = sparse_encode(X, sparse_dict.components_)
for i in sparse_vectors:
print i
return sparse_vectors
示例8: test_sparse_encode_shapes_omp
def test_sparse_encode_shapes_omp():
rng = np.random.RandomState(0)
algorithms = ['omp', 'lasso_lars', 'lasso_cd', 'lars', 'threshold']
for n_components, n_samples in itertools.product([1, 5], [1, 9]):
X_ = rng.randn(n_samples, n_features)
dictionary = rng.randn(n_components, n_features)
for algorithm, n_jobs in itertools.product(algorithms, [1, 3]):
code = sparse_encode(X_, dictionary, algorithm=algorithm,
n_jobs=n_jobs)
assert_equal(code.shape, (n_samples, n_components))
示例9: predict
def predict(self, imgs, neuron_idx=None, penalty_lambda=None, algorithm=None):
""" get neuron response to images
Parameters
----------
imgs
Returns
-------
"""
imgs_array = make_2d_input_matrix(imgs)
if neuron_idx is None:
dict_to_use = self.w
else:
dict_to_use = self.w[neuron_idx:(neuron_idx + 1), :]
if penalty_lambda is None:
_lambda = self._lambda
else:
_lambda = penalty_lambda
assert np.isscalar(_lambda)
if algorithm is None:
_algorithm = self.algorithm
else:
_algorithm = algorithm
# let's call sparse encoder to do it!
# no scaling at all!
# having /nsample in objective function is exactly the same as sovling each problem separately.
# the underlying function called is elastic net, and that function fits each column of y separately.
# each column of y is each stimulus. This is because when passing imgs_array and dict_to_use to Elastic Net,
# they are transposed. That is, y = imgs_array.T
#
# in the code there's also a subtle detail, where alpha is divided by number of pixels in each stimulus.
# I haven't figured that out, but seems that's simply a detail for using ElasticNet to do this.
if _algorithm in ['lasso_lars', 'lasso_cd']:
response = sparse_encode(imgs_array, dict_to_use, alpha=_lambda, algorithm=_algorithm, max_iter=10000)
else:
assert _algorithm == 'spams'
#print(imgs_array.dtype, dict_to_use.dtype, _lambda.shape)
response = lasso(np.asfortranarray(imgs_array.T), D=np.asfortranarray(dict_to_use.T), lambda1=_lambda,
mode=2)
response = response.T.toarray() # because lasso returns sparse matrix...
# this can be used for debugging, for comparison with SPAMS.
# notice here I give per sample cost.
self.last_cost_recon = 0.5 * np.sum((imgs_array - np.dot(response, dict_to_use)) ** 2, axis=1)
self.last_cost_sparsity = _lambda * np.abs(response).sum(axis=1)
assert self.last_cost_sparsity.shape == (imgs_array.shape[0], )
assert self.last_cost_recon.shape == (imgs_array.shape[0],)
self.last_cost = np.mean(self.last_cost_recon + self.last_cost_sparsity)
return response
示例10: test_sparse_encode
def test_sparse_encode(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
_, dictionary, _ = decomposition.dict_learning(iris.data, 2, 1,
random_state=self.random_state)
result = df.decomposition.sparse_encode(dictionary)
expected = decomposition.sparse_encode(iris.data, dictionary)
self.assertIsInstance(result, pdml.ModelFrame)
tm.assert_index_equal(result.index, df.data.index)
self.assert_numpy_array_almost_equal(result.values, expected)
示例11: learning_sparse_coding
def learning_sparse_coding(X, components=None):
"""
http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.DictionaryLearning.html
http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.sparse_encode.html
"""
if components is None:
print('Learning the dictionary...')
t0 = time()
diclearner = MiniBatchDictionaryLearning(n_components=100, verbose=True)
components = diclearner.fit(X).components_
np.savetxt('components_of_convfeat.txt', components)
dt = time() - t0
print('done in %.2fs.' % dt)
codes = sparse_encode(X, components)
np.savetxt('sparse_codes_of_convfeat.txt', codes)
示例12: sparse_coding
def sparse_coding(n_atom, input_x, out_dir):
dictionary = get_dictionary(n_atom, input_x)
code = sparse_encode(input_x, dictionary)
np.set_printoptions(precision=3, suppress=True)
#print code
#print dictionary
with open('{}/atoms'.format(out_dir), "w") as op:
for component in dictionary:
line = ', '.join(str(round(e,3)) for e in component)
op.write( line + '\n')
with open('{}/codes'.format(out_dir), "w") as op:
for coefficient in code:
line = ', '.join(str(round(e,3)) for e in coefficient)
op.write( line + '\n')
return code
示例13: test_dict_learning_online_partial_fit
def test_dict_learning_online_partial_fit():
# this test was not actually passing before!
raise SkipTest
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dico1 = MiniBatchDictionaryLearning(n_components, n_iter=10, batch_size=1,
shuffle=False, dict_init=V,
random_state=0).fit(X)
dico2 = MiniBatchDictionaryLearning(n_components, n_iter=1, dict_init=V,
random_state=0)
for ii, sample in enumerate(X):
dico2.partial_fit(sample, iter_offset=ii * dico2.n_iter)
# if ii == 1: break
assert_true(not np.all(sparse_encode(X, dico1.components_, alpha=100) ==
0))
assert_array_equal(dico1.components_, dico2.components_)
示例14: test_dict_learning_online_partial_fit
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0)
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
示例15: gabor_encode
def gabor_encode(self):
patches = extract_patches_2d(
self.img, (self.patch_size, self.patch_size)
)
patches = patches.reshape(patches.shape[0], -1)
# code = sparse_encode(patches, self.kernels, algorithm='threshold', alpha=1)
code = sparse_encode(
patches, self.kernels, algorithm='lars', n_nonzero_coefs=2)
idx = np.std(code, axis=1) > 0.3
selected_patches = patches #[idx]
selected_code = code #[idx]
min_code, max_code = np.min(selected_code), np.max(selected_code)
# print selected_patches
c = 0
s = 21
for i in xrange(selected_code.shape[0]):
print i
plt.subplot(s, s * 2, c)
plt.xticks(())
plt.gca().set_ylim([min_code, max_code])
plt.yticks(())
plt.plot(selected_code[i])
c += 1
plt.subplot(s, s * 2, c)
plt.xticks(())
plt.yticks(())
plt.imshow(selected_patches[i].reshape(
self.patch_size, self.patch_size), cmap='gray', interpolation='none')
c += 1
plt.show()
orientations = np.argmax(code, axis=1)
activations = np.std(code, axis=1)
orientations[activations < self.activation_threshold] = self.zero_value
# blank_batches = np.ones((patches.shape[0], self.patch_size, self.patch_size)) * orientations[:, None, None]
# recon = reconstruct_from_patches_2d(blank_batches, (self.img_height, self.img_width))
# return recon
return orientations.reshape(self.map_height, self.map_width)