本文整理汇总了Python中numpy.matmul函数的典型用法代码示例。如果您正苦于以下问题:Python matmul函数的具体用法?Python matmul怎么用?Python matmul使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了matmul函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: projectBackBFM_withExpr
def projectBackBFM_withExpr(model, features, expr_paras):
alpha = model.shapeEV * 0
for it in range(0, 99):
alpha[it] = model.shapeEV[it] * features[it]
S = np.matmul(model.shapePC, alpha)
expr = model.expEV * 0
for it in range(0, 29):
expr[it] = model.expEV[it] * expr_paras[it]
E = np.matmul(model.expPC, expr)
## Adding back average shape
S = model.shapeMU + S + model.expMU + E
numVert = S.shape[0]/3
# (Texture)
beta = model.texEV * 0
for it in range(0, 99):
beta[it] = model.texEV[it] * features[it+99]
T = np.matmul(model.texPC, beta)
## Adding back average texture
T = model.texMU + T
## Some filtering
T = [truncateUint8(value) for value in T]
## Final Saving for visualization
S = np.reshape(S,(numVert,3))
T = np.reshape(T,(numVert, 3))
return S,T
示例2: computeJonesRes
def computeJonesRes(self):
"""Compute the Jones that results from applying the E-Jones to the
right.
The structure of the jonesrbasis is [timeIdx, sphIdx, skycompIdx].
"""
idxshape = self.jonesrbasis.shape[0:-2]
jonesrbasis = np.reshape(self.jonesrbasis, (-1, 3, 3))
jonesrbasis_to = np.matmul(np.asarray(self.stnRot.T), jonesrbasis)
(az_from, el_from) = crt2sph(jonesrbasis[..., 0].squeeze().T)
theta_phi_view = (np.pi/2-el_from.flatten(), az_from.flatten())
ejones = self.dualPolElem.getJonesAlong(self.freqChan, theta_phi_view)
#(theta_lcl, phi_lcl) = self.dualPolElem.getBuildCoordinates(math.pi/2-r_sph[1], r_sph[0])
#print theta_lcl, phi_lcl
r_lcl = crt2sph(jonesrbasis_to[..., 0].squeeze().T)
#print np.rad2deg(r_lcl)
jonesbasisMat = getSph2CartTransfMat(jonesrbasis_to[..., 0].squeeze())
#paraRot = np.matmul(np.conjugate(jonesbasisMat), jonesrbasis_to)
self.jonesbasis = np.reshape(jonesbasisMat,
idxshape+jonesbasisMat.shape[1:])
# This is the actual MEq multiplication:
if ejones.ndim > 3:
frqdimsz = (ejones.shape[0],)
else:
frqdimsz = ()
self.jones = np.reshape(
np.matmul(ejones, np.reshape(self.jonesr, (-1, 2, 2))),
frqdimsz+idxshape+(2, 2)
)
self.thisjones = np.reshape(ejones, frqdimsz+idxshape+(2, 2))
示例3: predict_new
def predict_new(self, X, z):
first_layer_output = np.zeros(self.units)
for unit in range(self.units):
first_layer_output[unit] = self.activation(np.matmul(np.transpose(X), z[unit*(self.ar+len(self.X_names)+1):((unit+1)*(self.ar+len(self.X_names)+1))]))
params_used = ((self.units)*(self.ar+len(self.X_names)+1))
# Hidden layers
hidden_layer_output = np.zeros((self.units, self.layers-1))
for layer in range(1, self.layers):
for unit in range(self.units):
if layer == 1:
hidden_layer_output[unit,layer-1] = self.activation(np.matmul(first_layer_output,
z[params_used+unit*(self.units)+((layer-1)*self.units**2):((params_used+(unit+1)*self.units)+((layer-1)*self.units**2))]))
else:
hidden_layer_output[unit,layer-1] = self.activation(np.matmul(hidden_layer_output[:,layer-1],
z[params_used+unit*(self.units)+((layer-1)*self.units**2):((params_used+(unit+1)*self.units)+((layer-1)*self.units**2))]))
params_used = params_used + (self.layers-1)*self.units**2
# Output layer
if self.layers == 1:
mu = np.matmul(first_layer_output, z[params_used:params_used+self.units])
else:
mu = np.matmul(hidden_layer_output[:,-1], z[params_used:params_used+self.units])
return mu
示例4: _testSvdCorrectness
def _testSvdCorrectness(self, dtype, shape):
np.random.seed(1)
x_np = np.random.uniform(low=-1.0, high=1.0, size=shape).astype(dtype)
m, n = shape[-2], shape[-1]
_, s_np, _ = np.linalg.svd(x_np)
with self.cached_session() as sess:
x_tf = array_ops.placeholder(dtype)
with self.test_scope():
s, u, v = linalg_ops.svd(x_tf, full_matrices=True)
s_val, u_val, v_val = sess.run([s, u, v], feed_dict={x_tf: x_np})
u_diff = np.matmul(u_val, np.swapaxes(u_val, -1, -2)) - np.eye(m)
v_diff = np.matmul(v_val, np.swapaxes(v_val, -1, -2)) - np.eye(n)
# Check u_val and v_val are orthogonal matrices.
self.assertLess(np.linalg.norm(u_diff), 1e-2)
self.assertLess(np.linalg.norm(v_diff), 1e-2)
# Check that the singular values are correct, i.e., close to the ones from
# numpy.lingal.svd.
self.assertLess(np.linalg.norm(s_val - s_np), 1e-2)
# The tolerance is set based on our tests on numpy's svd. As our tests
# have batch dimensions and all our operations are on float32, we set the
# tolerance a bit larger. Numpy's svd calls LAPACK's svd, which operates
# on double precision.
self.assertLess(
np.linalg.norm(self._compute_usvt(s_val, u_val, v_val) - x_np), 2e-2)
# Check behavior with compute_uv=False. We expect to still see 3 outputs,
# with a sentinel scalar 0 in the last two outputs.
with self.test_scope():
no_uv_s, no_uv_u, no_uv_v = gen_linalg_ops.svd(
x_tf, full_matrices=True, compute_uv=False)
no_uv_s_val, no_uv_u_val, no_uv_v_val = sess.run(
[no_uv_s, no_uv_u, no_uv_v], feed_dict={x_tf: x_np})
self.assertAllClose(no_uv_s_val, s_val, atol=1e-4, rtol=1e-4)
self.assertEqual(no_uv_u_val, 0.0)
self.assertEqual(no_uv_v_val, 0.0)
示例5: forwardPropogation
def forwardPropogation(W,B,inputDataVector):
A = []
H = []
A.append(np.add(B[0],np.matmul(W[0],inputDataVector)))
if(activation=="sigmoid"):
H.append(sigmoidFunctionToVector(A[0]))
else:
H.append(tanhFunctionToVector(A[0]))
for k in range(1,num_hidden):
A.append(np.add(B[k],np.matmul(W[k],H[k-1])))
if(activation=="sigmoid"):
H.append(sigmoidFunctionToVector(A[k]))
else:
H.append(tanhFunctionToVector(A[k]))
A.append(np.add(B[-1],np.matmul(W[-1],H[-1])))
y_hat = softmax(A[-1])
return A,H,y_hat
示例6: BackpropXOR
def BackpropXOR(W1, W2, X, D):
alpha = 0.9
N = 4
for k in range(N):
x = X[k, :].T
d = D[k]
v1 = np.matmul(W1, x)
y1 = Sigmoid(v1)
v = np.matmul(W2, y1)
y = Sigmoid(v)
e = d - y
delta = y*(1-y) * e
e1 = np.matmul(W2.T, delta)
delta1 = y1*(1-y1) * e1
dW1 = (alpha*delta1).reshape(4, 1) * x.reshape(1, 3)
W1 = W1 + dW1
dW2 = alpha * delta * y1
W2 = W2 + dW2
return W1, W2
示例7: BackPropMmt
def BackPropMmt(W1, W2, X, D):
alpha = 0.9
beta = 0.9
mmt1 = np.zeros_like(W1)
mmt2 = np.zeros_like(W2)
N = 4
for k in range(N):
x = X[k, :].T
d = D[k]
v1 = np.matmul(W1, x)
y1 = Sigmoid(v1)
v = np.matmul(W2, y1)
y = Sigmoid(v)
e = d - y
delta = y*(1-y) * e
e1 = np.matmul(W2.T, delta)
delta1 = y1*(1-y1) * e1
dW1 = (alpha*delta1).reshape(4, 1) * x.reshape(1, 3)
mmt1 = dW1 + beta*mmt1
W1 = W1 + mmt1
dW2 = alpha * delta * y1
mmt2 = dW2 + beta*mmt2
W2 = W2 + mmt2
return W1, W2
示例8: geometric_distort
def geometric_distort (image0):
assert image0.shape[0] == image0.shape[1], 'need a square on input'
assert is_bgra(image0), image0.shape
# warp
shear1 = exp((np.random.rand()-0.5) * COEF_SHEAR)
rot = np.random.randn() * COEF_ROT
shear2 = exp((np.random.rand()-0.5) * COEF_SHEAR)
Shear1 = np.asarray([[shear1, 0], [0, 1.0/shear1]])
Rot = np.asarray([[cos(rot), sin(rot)], [-sin(rot), cos(rot)]])
Shear2 = np.asarray([[shear2, 0], [0, 1.0/shear2]])
#print shear1, rot, shear2
M = np.matmul(np.matmul(Shear2, Rot), Shear1)
image = warp_patch (image0, M, 2)
# crop to roi
nnz = np.nonzero(image[:,:,3])
# roi = [y1 x1 y2 x2)
roi = (min(nnz[0].tolist()), min(nnz[1].tolist()),
max(nnz[0].tolist()), max(nnz[1].tolist()))
#print roi
image = image[roi[0]:roi[2],roi[1]:roi[3],:]
return image
示例9: rotate_smooth
def rotate_smooth(self, current_up, current_angvel, target_rot, speed = 0.01):
for i in range(len(target_rot)):
if target_rot[i] > 360:
target_rot[i] -= 360
if target_rot[i] < 0:
target_rot[i] += 360
# direction = (np.array(target_rot) - np.array(current_rot))
# print str(target_rot)
# print str(current_rot)
# direction = speed * direction
target_rot = np.array(target_rot)
target_rot = np.deg2rad(target_rot)
# x axis rotation
th = target_rot[0]
rx = np.array([[1, 0, 0], [0, np.cos(th), np.sin(th)], [0, -np.sin(th), np.cos(th)]])
# y axis rotation
th = target_rot[1]
ry = np.array([[np.cos(th), 0, -np.sin(th)], [0, 1, 0], [np.sin(th), 0, np.cos(th)]])
# z axis rotation
th = target_rot[2]
rz = np.array([[np.cos(th), np.sin(th), 0], [-np.sin(th), np.cos(th), 0], [0, 0, 1]])
target_axis = np.matmul(np.matmul(np.matmul(rx,ry), rz), current_up)
# z rotation only does not work with [0, 0, 1] have to rotate around other axis
#if(target_axis == np.array([0, 0, 1])).all():
# current_up = [0, 1, 0]
# target_axis = np.matmul(np.matmul(np.matmul(rx,ry), rz), current_up)
return target_axis #self.stabilize(current_up, current_angvel, target_axis)
示例10: locallogisticHessian
def locallogisticHessian(self, theta, weights, reg_param):
"""
Hessian for regulatrized local logistic regression L2 loss
Args:
theta (np.array): Current lwlr parameters of shape
[1, n_features]
weights (np.array): training set weights of shape
[n_samples, 1]
reg_param (float): L2 regularization weight. If 0, no
no regulatrization is used.
Returns:
Hessian (np.ndarray): Hessian of shape [n_features, n_features]
"""
# Add bias to X
X = np.insert(self.X, 0, 1, axis=1)
D = []
for row in range(np.shape(X)[0]):
D.append(weights[row] *
self.logistic_function(np.dot(X[row, :],
np.transpose(theta))) *
(1 -
self.logistic_function(np.dot(X[row, :],
np.transpose(theta)))))
D = np.diag(D)
hessian = (np.matmul(np.matmul(X.T, D),
X) -
np.identity(np.shape(X)[1]) * reg_param)
return hessian
示例11: MRlogL_sandwichCov
def MRlogL_sandwichCov(dt, Ic, Is):
"""
Estimates the asymptotic covariance matrix with the sandwich method
evaluated at the Maximum Likelihood Estimates for Ic, Is
It's Cov_hessian * Cov_OPG^-1 * Cov_hessian
INPUTS:
dt: list of inter-arrival times [seconds]
Ic: The maximum likelihood estimate of Ic [1/second]
Is:
OUTPUTS:
covariance matrix for mle Ic, Is from sandwich method
[[cov(Ic,Ic), cov(Ic,Is)], [cov(Is,Ic), cov(Is,Is)]]
"""
h_cov = MRlogL_hessianCov(dt, Ic, Is)
grad_Ic = -1./(1./dt+Is) + 1./(Ic+Is+dt*Is**2.)
grad_Is = dt**2.*Ic/(1.+dt*Is)**2. - 3.*dt/(1.+dt*Is) + (1.+2.*dt*Is)/(Ic+Is+dt*Is**2.)
#n=1.0*len(dt)
grad_Ic2 = np.sum(grad_Ic**2.)
grad_Is2 = np.sum(grad_Is**2.)
grad_IcIs = np.sum(grad_Ic*grad_Is)
opg_cov_inv = np.asarray([[grad_Ic2, grad_IcIs], [grad_IcIs, grad_Is2]])
return np.matmul(np.matmul(h_cov, opg_cov_inv),h_cov)
示例12: _mel_to_linear_matrix
def _mel_to_linear_matrix(self):
"""Get the inverse mel transformation matrix."""
m = self._linear_to_mel_matrix()
m_t = np.transpose(m)
p = np.matmul(m, m_t)
d = [1.0 / x if np.abs(x) > 1.0e-8 else x for x in np.sum(p, axis=0)]
return np.matmul(m_t, np.diag(d))
示例13: MultiClass
def MultiClass(W1, W2, X, D):
alpha = 0.9
N = 5
for k in range(N):
x = np.reshape(X[:,:,k], (25, 1))
d = D[k, :].T
v1 = np.matmul(W1, x)
y1 = Sigmoid(v1)
v = np.matmul(W2, y1)
y = Softmax(v)
e = d - y
delta = e
e1 = np.matmul(W2.T, delta)
delta1 = y1*(1-y1) * e1
dW1 = alpha * delta1 * x.T
W1 = W1 + dW1
dW2 = alpha * delta * y1.T
W2 = W2 + dW2
return W1, W2
示例14: update_data
def update_data(t):
"""
Is run each step
Calculates the seedbank size and plant population in the next step by
multiplying M, the transition matrix, by X, the data matrix
"""
global M
global D
global M_original
global D_original
if STEP_OUTPUT:
print "[t: {}] Updating data...".format(t)
# Manual changes in transition matrix and disperion matrix
if t == 30:
# Initial inundation of right side. Good graphs with N = 50, T = 75,
for cell_i in range(N-26, N):
M[cell_i,0] = [ss*(1-g*0.001), 0.0]
M[cell_i,1] = [g*0.001, l*0.001]
# Migrate Seeds Produced
X[t + 1] = np.transpose([np.matmul(M[c], X[t, :, c]) for c in range(0, int(N))]) + \
np.matmul(e * np.transpose([[X[t, 1, c], 0]
for c in range(0, int(N))]), D)
if STEP_OUTPUT:
print X[t]
if t == T - 2:
print "Data Calculation finished"
示例15: backward
def backward(self, y, all_x):
"""backward
:param y: the label, the actual class of the samples, in one-hot format
:param all_x: input data and activation from every layer
"""
# [TODO 1.5] Compute delta factor from the output
delta = all_x[-1] - y
delta /= y.shape[0]
# print('last delta shape = ', delta.shape)
# [TODO 1.5] Compute gradient of the loss function with respect to w of softmax layer, use delta from the output
grad_last = np.matmul(np.transpose(all_x[-2]), delta)
grad_list = []
grad_list.append(grad_last)
for i in range(len(self.layers) - 1)[::-1]:
prev_layer = self.layers[i+1]
layer = self.layers[i]
x = all_x[i]
# [TODO 1.5] Compute delta_prev factor for previous layer (in backpropagation direction)
# print('last layer shape = ', prev_layer.w.shape)
delta_prev = np.matmul(delta, np.transpose(prev_layer.w))
# Use delta_prev to compute delta factor for the next layer (in backpropagation direction)
grad_w, delta = layer.backward(x, delta_prev)
grad_list.append(grad_w.copy())
grad_list = grad_list[::-1]
return grad_list