本文整理匯總了Python中cudamat.dot方法的典型用法代碼示例。如果您正苦於以下問題:Python cudamat.dot方法的具體用法?Python cudamat.dot怎麽用?Python cudamat.dot使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cudamat
的用法示例。
在下文中一共展示了cudamat.dot方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: dot
# 需要導入模塊: import cudamat [as 別名]
# 或者: from cudamat import dot [as 別名]
def dot(a1, a2):
# internally: for matrix-matrix multiplies only; vectors are treated like special cases.
a1 = as_garray(a1); a2 = as_garray(a2)
if a1.ndim==0 or a2.ndim==0: return a1*a2
if a1.ndim==a2.ndim==1:
if a1 is a2: return sum(a1**2)
else: return dot(a1.reshape(1, a1.size), a2.reshape(a2.size, 1)).item()
if a1.ndim==2 and a2.ndim==1: return dot(a1, a2.reshape(a2.size, 1)).ravel() # treat a2 like a column vector
if a1.ndim==1 and a2.ndim==2: return dot(a1._add_axes(2), a2)[0] # treat a1 like a row vector
if a1.shape[-1] != a2.shape[-2]: raise ValueError('arrays not aligned for dot product. a dot product was requested of arrays with shapes %s and %s' % (a1.shape, a2.shape))
if a1.ndim==a2.ndim==2:
retShape = (a1.shape[0], a2.shape[1])
if a1.shape[1]==0: return zeros(retShape) # cudamat bug workaround
ret = empty(retShape)
if ret.size!=0: _cudamat.dot(a2._base_as_2d(), a1._base_as_2d(), ret._base_as_2d())
return ret
if a1.ndim >= 2 and a2.ndim >= 2:
# this is not necessarily fast, because if a2.ndim>=3 then it involves a transpose
a12 = ( a1.reshape_2d(-1) if a1.ndim!=2 else a1)
a22 = ( a2.transpose((a2.ndim-2,) + tuple(xrange(a2.ndim-2)) + (a2.ndim-1,)).reshape_2d(1)
if a2.ndim!=2 else
a2)
retShape = _deleteT2(a1.shape, -1) + _deleteT2(a2.shape, -2)
return dot(a12, a22).reshape(retShape)
raise NotImplementedError('dot with arguments of shapes %s and %s' % (a1.shape, a2.shape))
示例2: _sig
# 需要導入模塊: import cudamat [as 別名]
# 或者: from cudamat import dot [as 別名]
def _sig(self, x, u):
"""Multiple the matrix u by the covariance matrix of x. We are interested in situations where
n_variables >> n_samples, so we do this without explicitly constructing the covariance matrix."""
if self.gpu:
y = cm.empty((self.n_samples, self.m))
uc = cm.CUDAMatrix(u)
cm.dot(x, uc.T, target=y)
del uc
tmp = cm.empty((self.nv, self.m))
cm.dot(x.T, y, target=tmp)
tmp_dot = tmp.asarray()
del y
del tmp
else:
y = x.dot(u.T)
tmp_dot = x.T.dot(y)
prod = (1 - self.eps**2) * tmp_dot.T / self.n_samples + self.eps**2 * u # nv by m, <X_i Y_j> / std Y_j
return prod
示例3: outer
# 需要導入模塊: import cudamat [as 別名]
# 或者: from cudamat import dot [as 別名]
def outer(vec1, vec2): return dot(vec1.ravel()[:, newaxis], vec2.ravel()[newaxis, :])
示例4: tensordot
# 需要導入模塊: import cudamat [as 別名]
# 或者: from cudamat import dot [as 別名]
def tensordot(a, b, axes=2):
if type(axes) in _numberTypes: return dot(a.reshape_2d(a.ndim-axes), b.reshape_2d(axes)).reshape(a.shape[:a.ndim-axes] + b.shape[axes:])
assert len(axes)==2 and len(axes[0])==len(axes[1]), 'the axes parameter to gnumpy.tensordot looks bad'
aRemove, bRemove = (tuple(axes[0]), tuple(axes[1]))
return tensordot(a.transpose(filter(lambda x: x not in aRemove, tuple(xrange(a.ndim))) + aRemove),
b.transpose(bRemove + filter(lambda x: x not in bRemove, tuple(xrange(b.ndim)))),
len(aRemove))
# ------------------------------------------------------------------------------- reductors
示例5: _covar_mstep_diag
# 需要導入模塊: import cudamat [as 別名]
# 或者: from cudamat import dot [as 別名]
def _covar_mstep_diag(gmm, X, posteriors, weighted_X_sum, inv_weights,
min_covar, temp_gpu_mem):
'''Performing the covariance M step for diagonal cases'''
X2 = temp_gpu_mem['temp_NxD']
X.mult(X, target=X2) # X2 = X*X
# avg_X2 = np.dot(posteriors.T, X2) * inv_weights # ([KxN]*[NxD]) * [Kx1]
avg_X2 = temp_gpu_mem['temp_KxD']
cm.dot(posteriors.T, X2, target=avg_X2) # [KxN]x[NxD] -> [KxD]
avg_X2.mult_by_col(inv_weights)
# avg_means2 = gmm.means_ ** 2
temp_KxD_2 = temp_gpu_mem['temp_KxD_2']
gmm.means.mult(gmm.means, target=temp_KxD_2)
# avg_X2 += avg_means2
avg_X2.add(temp_KxD_2)
# avg_X_means = gmm.means_ * weighted_X_sum * inv_weights
# [KxD]*[KxD]*[Kx1] -> [KxD]
gmm.means.mult(weighted_X_sum, target=temp_KxD_2)
temp_KxD_2.mult_by_col(inv_weights)
# avg_X2 -= 2*avg_X_means
# import pdb; pdb.set_trace()
avg_X2.add_mult(temp_KxD_2, alpha=-2.0)
# avg_X2 += min_covar
avg_X2.add(min_covar)
# return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
# [KxD] - 2*[KxD] + [KxD] + [1]
return avg_X2
示例6: _norm
# 需要導入模塊: import cudamat [as 別名]
# 或者: from cudamat import dot [as 別名]
def _norm(self, x, ws):
"""Calculate uj so that we can normalize it."""
if self.gpu:
y = cm.empty((self.n_samples, self.m))
wc = cm.CUDAMatrix(ws)
cm.dot(x, wc.T, target=y) # + noise, but it is included analytically
y_local = y.asarray()
del y
del wc
tmp_sum = np.einsum('lj,lj->j', y_local, y_local) # TODO: Should be able to do on gpu...
else:
y = x.dot(ws.T) # + noise / std Y_j^2, but it is included analytically
tmp_sum = np.einsum('lj,lj->j', y, y)
return np.sqrt((1 - self.eps**2) * tmp_sum / self.n_samples + self.eps**2 * np.sum(ws**2, axis=1))
示例7: _calculate_moments_syn
# 需要導入模塊: import cudamat [as 別名]
# 或者: from cudamat import dot [as 別名]
def _calculate_moments_syn(self, x, ws, quick=False):
"""Calculate moments based on the weights and samples. We also calculate and save MI, TC, additivity, and
the value of the objective. Note it is assumed that <X_i^2> = 1! """
m = {} # Dictionary of moments
if self.gpu:
y = cm.empty((self.n_samples, self.m))
wc = cm.CUDAMatrix(ws)
cm.dot(x, wc.T, target=y) # + noise, but it is included analytically
del wc
else:
y = x.dot(ws.T) # + noise, but it is included analytically
if self.gpu:
tmp_dot = cm.empty((self.nv, self.m))
cm.dot(x.T, y, target=tmp_dot)
m["X_i Y_j"] = tmp_dot.asarray() / self.n_samples # nv by m, <X_i Y_j>
del y
del tmp_dot
else:
m["X_i Y_j"] = x.T.dot(y) / self.n_samples
m["cy"] = ws.dot(m["X_i Y_j"]) + self.yscale ** 2 * np.eye(self.m) # cov(y.T), m by m
m["Y_j^2"] = np.diag(m["cy"]).copy()
m["ry"] = m["cy"] / (np.sqrt(m["Y_j^2"]) * np.sqrt(m["Y_j^2"][:, np.newaxis]))
m["rho"] = (m["X_i Y_j"] / np.sqrt(m["Y_j^2"])).T
m["invrho"] = 1. / (1. - m["rho"]**2)
m["rhoinvrho"] = m["rho"] * m["invrho"]
m["Qij"] = np.dot(m['ry'], m["rhoinvrho"])
m["Qi"] = np.einsum('ki,ki->i', m["rhoinvrho"], m["Qij"])
m["Si"] = np.sum(m["rho"] * m["rhoinvrho"], axis=0)
m["MI"] = - 0.5 * np.log1p(-m["rho"]**2)
m["X_i Z_j"] = np.linalg.solve(m["cy"], m["X_i Y_j"].T).T
m["X_i^2 | Y"] = (1. - np.einsum('ij,ij->i', m["X_i Z_j"], m["X_i Y_j"])).clip(1e-6)
mi_yj_x = 0.5 * np.log(m["Y_j^2"]) - 0.5 * np.log(self.yscale ** 2)
mi_xi_y = - 0.5 * np.log(m["X_i^2 | Y"])
m["TCs"] = m["MI"].sum(axis=1) - mi_yj_x
m["additivity"] = (m["MI"].sum(axis=0) - mi_xi_y).sum()
m["TC"] = np.sum(mi_xi_y) - np.sum(mi_yj_x)
return m
示例8: transform
# 需要導入模塊: import cudamat [as 別名]
# 或者: from cudamat import dot [as 別名]
def transform(self, x, details=False):
"""Transform an array of inputs, x, into an array of k latent factors, Y.
Optionally, you can get the remainder information and/or stop at a specified level."""
x = self.preprocess(x)
ns, nv = x.shape
assert self.nv == nv, "Incorrect number of variables in input, %d instead of %d" % (nv, self.nv)
if details:
moments = self._calculate_moments(x, self.ws)
return x.dot(self.ws.T), moments
return x.dot(self.ws.T)
示例9: predict
# 需要導入模塊: import cudamat [as 別名]
# 或者: from cudamat import dot [as 別名]
def predict(self, y):
return self.invert(np.dot(self.moments["X_i Z_j"], y.T).T)
示例10: get_covariance
# 需要導入模塊: import cudamat [as 別名]
# 或者: from cudamat import dot [as 別名]
def get_covariance(self):
# This uses E(Xi|Y) formula for non-synergistic relationships
m = self.moments
if self.discourage_overlap:
z = m['rhoinvrho'] / (1 + m['Si'])
cov = np.dot(z.T, z)
cov /= (1. - self.eps**2)
np.fill_diagonal(cov, 1)
return self.theta[1][:, np.newaxis] * self.theta[1] * cov
else:
cov = np.einsum('ij,kj->ik', m["X_i Z_j"], m["X_i Y_j"])
np.fill_diagonal(cov, 1)
return self.theta[1][:, np.newaxis] * self.theta[1] * cov
示例11: pinvh
# 需要導入模塊: import cudamat [as 別名]
# 或者: from cudamat import dot [as 別名]
def pinvh(a, cond=None, rcond=None, lower=True):
'''Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond, rcond : float or None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
'''
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
示例12: _do_mstep
# 需要導入模塊: import cudamat [as 別名]
# 或者: from cudamat import dot [as 別名]
def _do_mstep(
self, X, posteriors,
update_params, min_covar=0,
temp_gpu_mem=None):
''' Perform the Mstep of the EM algorithm and return the class weights.
'''
N = X.shape[0]
K, D = self.n_components, self.n_dimensions
X = return_CUDAMatrix(X)
if temp_gpu_mem is None:
temp_gpu_mem = TempGPUMem()
temp_gpu_mem.alloc(N, K, D)
weights = temp_gpu_mem['temp_Kx1']
weights.reshape((1, K))
posteriors.sum(axis=0, target=weights)
weights.reshape((K, 1))
weighted_X_sum = temp_gpu_mem['weighted_X_sum_KxD']
cm.dot(posteriors.T, X, target=weighted_X_sum) # [KxN]x[NxD] -> [KxD]
# inv_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
inv_weights = temp_gpu_mem['inv_weights_Kx1']
denom = temp_gpu_mem['temp_Kx1_2']
weights.add(10*EPS, target=denom)
inv_weights.assign(1.0)
inv_weights.divide(denom)
if 'w' in update_params:
# self.weights = (weights / (weights.sum() + 10 * EPS) + EPS)
weights.div_by_row(
weights.sum(axis=0).add(10*EPS),
target=self.weights)
weights.add(EPS)
if 'm' in update_params:
# self.means = weighted_X_sum * inv_weights
# [KxD].*[Kx1]
weighted_X_sum.mult_by_col(inv_weights, target=self.means)
if 'c' in update_params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
temp_result = covar_mstep_func(
self, X, posteriors, weighted_X_sum, inv_weights,
min_covar, temp_gpu_mem)
self.covars.assign(temp_result)
return weights
示例13: _log_multivariate_normal_density_diag
# 需要導入模塊: import cudamat [as 別名]
# 或者: from cudamat import dot [as 別名]
def _log_multivariate_normal_density_diag(X, means, covars, temp_gpu_mem):
'''Compute Gaussian log-density at X for a diagonal model'''
N, D = X.shape
K = means.shape[0]
lpr_NxK = temp_gpu_mem['posteriors_NxK']
inv_covars_KxD = temp_gpu_mem['temp_KxD_2']
temp_NxD = temp_gpu_mem['temp_NxD']
temp_KxD = temp_gpu_mem['temp_KxD']
temp_Kx1 = temp_gpu_mem['temp_Kx1']
# compute inverse variances
inv_covars_KxD.assign(1.0)
inv_covars_KxD.divide(covars)
# lpr = D * np.log(2*np.pi)
lpr_NxK.assign(D * np.log(2*np.pi))
# temp_Kx1 = np.sum(np.log(covars), 1)
cm.log(covars, target=temp_KxD)
temp_KxD.sum(axis=1, target=temp_Kx1)
# temp_Kx1 += np.sum((means**2)/covars, 1)
means.mult(means, target=temp_KxD)
temp_KxD.mult(inv_covars_KxD)
temp_Kx1.add_sums(temp_KxD, axis=1)
# lpr += temp_Kx1
temp_Kx1.reshape((1, K)) # transpose
lpr_NxK.add_row_vec(temp_Kx1)
temp_Kx1.reshape((K, 1)) # return to original shape
# lpr += -2*np.dot(X, (means / covars).T)
temp_KxD.assign(means)
temp_KxD.mult(inv_covars_KxD)
lpr_NxK.add_dot(X, temp_KxD.T, mult=-2.)
# lpr += np.dot(X**2, (1.0 / covars).T)
temp_NxD.assign(X)
temp_NxD.mult(temp_NxD)
lpr_NxK.add_dot(temp_NxD, inv_covars_KxD.T)
# lpr *= -0.5
lpr_NxK.mult(-0.5)
# lpr_NxK still in use
# lpr = -0.5 * (D * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
# + np.sum((means ** 2) / covars, 1)
# - 2 * np.dot(X, (means / covars).T)
# + np.dot(X ** 2, (1.0 / covars).T))
return lpr_NxK
示例14: costAndGrad
# 需要導入模塊: import cudamat [as 別名]
# 或者: from cudamat import dot [as 別名]
def costAndGrad(self,data,labels):
batchSize = data.shape[1]
self.setViews(batchSize)
# forward prop
self.hActs[0].assign(cm.CUDAMatrix(data))
i = 1
for w,b in self.stack:
cm.dot(w,self.hActs[i-1],self.hActs[i])
self.hActs[i].add_col_vec(b)
if i <= len(self.layerSizes):
# hard relu
self.hActs[i].maximum(0.0)
i += 1
# Subtract max activation
self.hActs[-1].max(axis=0,target=self.rowVec)
self.hActs[-1].add_row_mult(self.rowVec,-1.0,target=self.probs)
# Softmax
cm.exp(self.probs)
self.probs.sum(axis=0,target=self.rowVec)
cm.pow(self.rowVec,-1.0,target=self.rowVec)
self.probs.mult_by_row(self.rowVec)
self.probs.copy_to_host()
cost, deltas, skip = ctc.ctc_loss(self.probs.numpy_array.astype(np.float64),
labels,blank=0)
self.deltasC.assign(cm.CUDAMatrix(deltas))
if skip:
return cost,self.grad,skip
# back prop
nl = len(self.layerSizes)
i = nl
deltasIn,deltasOut = self.deltasC,self.deltasOut
for w,b in reversed(self.stack):
# compute gradient
cm.dot(deltasIn,self.hActs[i].T,target=self.grad[i][0])
deltasIn.sum(axis=1,target=self.grad[i][1])
# compute next layer deltas
if i > 0:
self.hActs[i].sign(target=self.tmpGrad)
cm.dot(w.T,deltasIn,target=deltasOut)
deltasOut.mult(self.tmpGrad)
if i == nl:
deltasIn = self.deltasIn
deltasIn,deltasOut = deltasOut,deltasIn
i -= 1
return cost,self.grad,skip
示例15: _calculate_moments_ns
# 需要導入模塊: import cudamat [as 別名]
# 或者: from cudamat import dot [as 別名]
def _calculate_moments_ns(self, x, ws, quick=False):
"""Calculate moments based on the weights and samples. We also calculate and save MI, TC, additivity, and
the value of the objective. Note it is assumed that <X_i^2> = 1! """
m = {} # Dictionary of moments
if self.gpu:
y = cm.empty((self.n_samples, self.m))
wc = cm.CUDAMatrix(ws)
cm.dot(x, wc.T, target=y) # + noise, but it is included analytically
del wc
tmp_sum = np.einsum('lj,lj->j', y.asarray(), y.asarray()) # TODO: Should be able to do on gpu...
else:
y = x.dot(ws.T)
tmp_sum = np.einsum('lj,lj->j', y, y)
m["uj"] = (1 - self.eps**2) * tmp_sum / self.n_samples + self.eps**2 * np.sum(ws**2, axis=1)
if quick and np.max(m["uj"]) >= 1.:
return False
if self.gpu:
tmp = cm.empty((self.nv, self.m))
cm.dot(x.T, y, target=tmp)
tmp_dot = tmp.asarray()
del tmp
del y
else:
tmp_dot = x.T.dot(y)
m["rho"] = (1 - self.eps**2) * tmp_dot.T / self.n_samples + self.eps**2 * ws # m by nv
m["ry"] = ws.dot(m["rho"].T) # normalized covariance of Y
m["Y_j^2"] = self.yscale ** 2 / (1. - m["uj"])
np.fill_diagonal(m["ry"], 1)
m["invrho"] = 1. / (1. - m["rho"]**2)
m["rhoinvrho"] = m["rho"] * m["invrho"]
m["Qij"] = np.dot(m['ry'], m["rhoinvrho"])
# m["Qi"] = np.einsum('ki,ki->i', m["rhoinvrho"], m["Qij"])
m["Si"] = np.sum(m["rho"] * m["rhoinvrho"], axis=0)
m["Qi-Si^2"] = np.einsum('ki,ki->i', m["rhoinvrho"], m["Qij"] - m["Si"] * m["rho"])
# This is the objective, a lower bound for TC
m["TC"] = np.sum(np.log(1 + m["Si"])) \
- 0.5 * np.sum(np.log(1 + m["Qi-Si^2"])) \
+ 0.5 * np.sum(np.log(1 - m["uj"]))
# - m["Si"]**2 + m["Qi"]))
if not quick:
m["MI"] = - 0.5 * np.log1p(-m["rho"]**2)
m["X_i Y_j"] = m["rho"].T * np.sqrt(m["Y_j^2"])
m["X_i Z_j"] = np.linalg.solve(m["ry"], m["rho"]).T
m["X_i^2 | Y"] = (1. - np.einsum('ij,ji->i', m["X_i Z_j"], m["rho"])).clip(1e-6)
m['I(Y_j ; X)'] = 0.5 * np.log(m["Y_j^2"]) - 0.5 * np.log(self.yscale ** 2)
m['I(X_i ; Y)'] = - 0.5 * np.log(m["X_i^2 | Y"])
m["TCs"] = m["MI"].sum(axis=1) - m['I(Y_j ; X)']
m["TC_no_overlap"] = m["MI"].max(axis=0).sum() - m['I(Y_j ; X)'].sum() # A direct calculation of TC where each variable is in exactly one group.
m["TC_direct"] = m['I(X_i ; Y)'].sum() - m['I(Y_j ; X)'] # A direct calculation of TC. Should be upper bound for "TC", "TC_no_overlap"
m["additivity"] = (m["MI"].sum(axis=0) - m['I(X_i ; Y)']).sum()
return m