本文整理汇总了Python中sklearn.utils.extmath.safe_sparse_dot方法的典型用法代码示例。如果您正苦于以下问题:Python extmath.safe_sparse_dot方法的具体用法?Python extmath.safe_sparse_dot怎么用?Python extmath.safe_sparse_dot使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.utils.extmath
的用法示例。
在下文中一共展示了extmath.safe_sparse_dot方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _fit_owl_fista
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import safe_sparse_dot [as 别名]
def _fit_owl_fista(X, y, w, loss, max_iter=500, max_linesearch=20, eta=2.0,
tol=1e-3, verbose=0):
# least squares loss
def sfunc(coef, grad=False):
y_scores = safe_sparse_dot(X, coef)
if grad:
obj, lp = loss(y, y_scores, return_derivative=True)
grad = safe_sparse_dot(X.T, lp)
return obj, grad
else:
return loss(y, y_scores)
def nsfunc(coef, L):
return prox_owl(coef, w / L)
coef = np.zeros(X.shape[1])
return fista(sfunc, nsfunc, coef, max_iter, max_linesearch,
eta, tol, verbose)
示例2: test_sparse_decision_function
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import safe_sparse_dot [as 别名]
def test_sparse_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
svc = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo')
clf = svc.fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
示例3: score
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import safe_sparse_dot [as 别名]
def score(self, user, candidates, context):
# i_mat is (n_item_context, n_item) for all possible items
# extract only target items
i_mat = self.i_mat[:, candidates]
n_target = len(candidates)
# u_mat will be (n_user_context, n_item) for the target user
u_vec = np.concatenate((user.feature, context))
u_vec = np.array([u_vec]).T
u_mat = sp.csr_matrix(np.repeat(u_vec, n_target, axis=1))
# stack them into (p, n_item) matrix
Y = sp.vstack((u_mat, i_mat))
Y = self.proj.reduce(Y)
Y = sp.csr_matrix(preprocessing.normalize(Y, norm='l2', axis=0))
X = np.identity(self.k) - np.dot(self.U_r, self.U_r.T)
A = safe_sparse_dot(X, Y, dense_output=True)
return ln.norm(A, axis=0, ord=2)
示例4: _compute_input_activations
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import safe_sparse_dot [as 别名]
def _compute_input_activations(self, X):
"""Compute input activations given X"""
n_samples = X.shape[0]
mlp_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_mlp_input):
b = self.components_['biases']
w = self.components_['weights']
mlp_acts = self.alpha * (safe_sparse_dot(X, w) + b)
rbf_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_rbf_input):
radii = self.components_['radii']
centers = self.components_['centers']
scale = self.rbf_width * (1.0 - self.alpha)
rbf_acts = scale * cdist(X, centers)/radii
self.input_activations_ = mlp_acts + rbf_acts
示例5: test_sparse_decision_function
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import safe_sparse_dot [as 别名]
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
svc = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo')
clf = svc.fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
示例6: _decision_function
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import safe_sparse_dot [as 别名]
def _decision_function(self, X):
return safe_sparse_dot(X, self.coef_)
示例7: test_svc_with_custom_kernel
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import safe_sparse_dot [as 别名]
def test_svc_with_custom_kernel():
def kfunc(x, y):
return safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(gamma='scale', kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
示例8: _rescale_data
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import safe_sparse_dot [as 别名]
def _rescale_data(X, y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sample_weight = sample_weight * np.ones(n_samples)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
示例9: sparse_johnson_lindenstrauss
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import safe_sparse_dot [as 别名]
def sparse_johnson_lindenstrauss(A, l, density=None, axis=1, random_state=None):
"""
Given an m x n matrix A, and an integer l, this scheme computes an m x l
orthonormal matrix Q whose range approximates the range of A
Parameters
----------
density : sparse matrix density
"""
random_state = check_random_state(random_state)
A = np.asarray(A)
if A.ndim != 2:
raise ValueError('A must be a 2D array, not %dD' % A.ndim)
if axis not in (0, 1):
raise ValueError('If supplied, axis must be in (0, 1)')
if density is None:
density = log(A.shape[0]) / A.shape[0]
# construct sparse sketch
Omega = _sketches.sparse_random_map(A, l, axis, density, random_state)
# project A onto Omega
if axis == 0:
return safe_sparse_dot(Omega.T, A)
return safe_sparse_dot(A, Omega)
示例10: _mean_hiddens
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import safe_sparse_dot [as 别名]
def _mean_hiddens(self, v, temperature=1.0):
"""Computes the probabilities P(h=1|v).
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T/temperature)
p += self.intercept_hidden_/(min(1.0, temperature) if BIASED_PRIOR else temperature)
return expit(p, out=p)
示例11: _free_energy
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import safe_sparse_dot [as 别名]
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
示例12: _fit
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import safe_sparse_dot [as 别名]
def _fit(self, v_pos):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
"""
h_pos = self._mean_hiddens(v_pos)
# TODO: Worth trying with visible probabilities rather than binary states.
# PG: it is common to use p_i instead of sampling a binary value'... 'it reduces
# sampling noise this allowing faster learning. There is some evidence that it leads
# to slightly worse density models'
# I'm confounded by the fact that we seem to get more effective models WITHOUT
# softmax visible units. The only explanation I can think of is that it's like
# a pseudo-version of using visible probabilities. Without softmax, v_neg
# can have multiple 1s per one-hot vector, which maybe somehow accelerates learning?
# Need to think about this some more.
v_neg = self._sample_visibles(self.h_samples_)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg) / self.fantasy_to_batch
# L2 weight penalty
update -= self.components_ * self.weight_cost
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0)/self.fantasy_to_batch)
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0)/self.fantasy_to_batch)
h_neg[self.rng_.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
示例13: reduce
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import safe_sparse_dot [as 别名]
def reduce(self, Y):
return safe_sparse_dot(self.E, Y)
示例14: score
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import safe_sparse_dot [as 别名]
def score(self, user, candidates, context):
# i_mat is (n_item_context, n_item) for all possible items
# extract only target items
i_mat = self.i_mat[:, candidates]
n_target = len(candidates)
u_vec = user.encode(dim=self.n_user,
index=self.use_index,
feature=True,
vertical=True)
u_vec = np.concatenate((u_vec, np.array([context]).T))
u_mat = sp.csr_matrix(np.repeat(u_vec, n_target, axis=1))
mat = sp.vstack((u_mat, i_mat))
# Matrix A and B should be dense (numpy array; rather than scipy CSR matrix) because V is dense.
V = sp.csr_matrix(self.V)
A = safe_sparse_dot(V.T, mat)
A.data[:] = A.data ** 2
sq_mat = mat.copy()
sq_mat.data[:] = sq_mat.data ** 2
sq_V = V.copy()
sq_V.data[:] = sq_V.data ** 2
B = safe_sparse_dot(sq_V.T, sq_mat)
interaction = (A - B).sum(axis=0)
interaction /= 2. # (1, n_item); numpy matrix form
pred = self.w0 + safe_sparse_dot(self.w, mat, dense_output=True) + interaction
return np.abs(1. - np.ravel(pred))
示例15: joint_feature
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import safe_sparse_dot [as 别名]
def joint_feature(self, x, y):
if isinstance(y, DocLabel):
Y_prop, Y_link, compat, second_order = self._marg_rounded(x, y)
else:
Y_prop, Y_link, compat, second_order = self._marg_fractional(x, y)
prop_acc = safe_sparse_dot(Y_prop.T, x.X_prop) # node_cls * node_feats
link_acc = safe_sparse_dot(Y_link.T, x.X_link) # link_cls * link_feats
f_sec_ord = []
if len(second_order):
second_order = second_order.reshape(-1, len(x.second_order))
if self.coparents:
f_sec_ord.append(safe_sparse_dot(second_order[0], x.X_sec_ord))
second_order = second_order[1:]
if self.grandparents:
f_sec_ord.append(safe_sparse_dot(second_order[0], x.X_sec_ord))
second_order = second_order[1:]
if self.siblings:
f_sec_ord.append(safe_sparse_dot(second_order[0], x.X_sec_ord))
elif self.n_second_order_factors_:
# document has no second order factors so the joint feature
# must be filled with zeros manually
f_sec_ord = [np.zeros(self.n_second_order_features_)
for _ in range(self.n_second_order_factors_)]
jf = np.concatenate([prop_acc.ravel(), link_acc.ravel(),
compat.ravel()] + f_sec_ord)
return jf
# basically reversing the joint feature