本文整理汇总了Python中tensorflow.python.ops.math_ops.batch_matmul函数的典型用法代码示例。如果您正苦于以下问题:Python batch_matmul函数的具体用法?Python batch_matmul怎么用?Python batch_matmul使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了batch_matmul函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _batch_sqrt_solve
def _batch_sqrt_solve(self, rhs):
# Recall the square root of this operator is M + VDV^T.
# The Woodbury formula gives:
# (M + VDV^T)^{-1}
# = M^{-1} - M^{-1} V (D^{-1} + V^T M^{-1} V)^{-1} V^T M^{-1}
# = M^{-1} - M^{-1} V C^{-1} V^T M^{-1}
# where C is the capacitance matrix.
m = self._operator
v = self._v
cchol = self._chol_capacitance(batch_mode=True)
# The operators will use batch/singleton mode automatically. We don't
# override.
# M^{-1} rhs
minv_rhs = m.solve(rhs)
# V^T M^{-1} rhs
vt_minv_rhs = math_ops.batch_matmul(v, minv_rhs, adj_x=True)
# C^{-1} V^T M^{-1} rhs
cinv_vt_minv_rhs = linalg_ops.batch_cholesky_solve(cchol, vt_minv_rhs)
# V C^{-1} V^T M^{-1} rhs
v_cinv_vt_minv_rhs = math_ops.batch_matmul(v, cinv_vt_minv_rhs)
# M^{-1} V C^{-1} V^T M^{-1} rhs
minv_v_cinv_vt_minv_rhs = m.solve(v_cinv_vt_minv_rhs)
# M^{-1} - M^{-1} V C^{-1} V^T M^{-1}
return minv_rhs - minv_v_cinv_vt_minv_rhs
示例2: _BatchMatrixInverseGrad
def _BatchMatrixInverseGrad(op, grad):
"""Gradient for BatchMatrixInverse."""
ainv = op.outputs[0]
return -math_ops.batch_matmul(
ainv,
math_ops.batch_matmul(grad, ainv, adj_y=True),
adj_x=True)
示例3: _overdetermined
def _overdetermined(op, grad):
"""Gradients for the overdetermined case of MatrixSolveLs.
This is the backprop for the solution to the normal equations of the first
kind:
X = F(A, B) = (A^T * A + lambda * I)^{-1} * A^T * B
which solve the least squares problem
min ||A * X - B||_F^2 + lambda ||X||_F^2.
"""
a = op.inputs[0]
b = op.inputs[1]
l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)
x = op.outputs[0]
a_shape = array_ops.shape(a)
batch_shape = a_shape[:-2]
n = a_shape[-1]
identity = linalg_ops.eye(n, batch_shape=batch_shape, dtype=a.dtype)
gramian = math_ops.batch_matmul(
a, a, adj_x=True) + l2_regularizer * identity
chol = linalg_ops.cholesky(gramian)
# Temporary z = (A^T * A + lambda * I)^{-1} * grad.
z = linalg_ops.cholesky_solve(chol, grad)
xzt = math_ops.batch_matmul(x, z, adj_y=True)
zx_sym = xzt + array_ops.matrix_transpose(xzt)
grad_a = -math_ops.batch_matmul(a, zx_sym) + math_ops.batch_matmul(
b, z, adj_y=True)
grad_b = math_ops.batch_matmul(a, z)
return (grad_a, grad_b, None)
示例4: matmul
def matmul(self, x, name='matmul'):
"""Left (batch) matrix multiplication of `x` by this operator."""
chol = self._chol
with ops.name_scope(self.name):
with ops.op_scope(self.inputs, name):
a_times_x = math_ops.batch_matmul(chol, x, adj_x=True)
return math_ops.batch_matmul(chol, a_times_x)
示例5: _BatchMatrixSolveGrad
def _BatchMatrixSolveGrad(op, grad):
"""Gradient for BatchMatrixSolve."""
a = op.inputs[0]
c = op.outputs[0]
# TODO(rmlarsen): Replace the following two lines with
# a single call to batch_matrix_solve after adding
# in an option to solve for A^T X = Y.
ainv = linalg_ops.batch_matrix_inverse(a)
grad_b = math_ops.batch_matmul(ainv, grad, adj_x=True)
grad_a = -math_ops.batch_matmul(grad_b, c, adj_y=True)
return (grad_a, grad_b)
示例6: _MatrixSolveGrad
def _MatrixSolveGrad(op, grad):
"""Gradient for MatrixSolve."""
a = op.inputs[0]
adjoint_a = op.get_attr("adjoint")
c = op.outputs[0]
grad_b = linalg_ops.matrix_solve(a, grad, adjoint=not adjoint_a)
if adjoint_a:
grad_a = -math_ops.batch_matmul(c, grad_b, adj_y=True)
else:
grad_a = -math_ops.batch_matmul(grad_b, c, adj_y=True)
return (grad_a, grad_b)
示例7: _test1
def _test1(op, grad_e, grad_v):
"""Gradient for SelfAdjointEigV2 derived with Joan with no adjustment for subspace"""
e = op.outputs[0]
v = op.outputs[1]
#dim = v.get_shape()
with ops.control_dependencies([grad_e.op, grad_v.op]):
if grad_v is not None:
E = array_ops.diag(e)
v_proj = array_ops.slice(v, [0,0], [20,2])
grad_grassman = grad_v - math_ops.batch_matmul(math_ops.batch_matmul(v_proj, array_ops.transpose(v_proj)), grad_v)
grad_a = math_ops.batch_matmul(grad_grassman, math_ops.batch_matmul(E, array_ops.transpose(grad_v)))+math_ops.batch_matmul(grad_v, math_ops.batch_matmul(E, array_ops.transpose(grad_grassman)))
return grad_a
示例8: _batch_sqrt_matmul
def _batch_sqrt_matmul(self, x, transpose_x=False):
v = self._v
m = self._operator
d = self._diag_operator
# The operators call the appropriate matmul/batch_matmul automatically. We
# cannot override.
# batch_matmul is defined as: x * y, so adj_x and adj_y are the ways to
# transpose the left and right.
mx = m.matmul(x, transpose_x=transpose_x)
vt_x = math_ops.batch_matmul(v, x, adj_x=True, adj_y=transpose_x)
d_vt_x = d.matmul(vt_x)
v_d_vt_x = math_ops.batch_matmul(v, d_vt_x)
return mx + v_d_vt_x
示例9: _BatchMatrixSolveGrad
def _BatchMatrixSolveGrad(op, grad):
"""Gradient for BatchMatrixSolve."""
a = op.inputs[0]
c = op.outputs[0]
grad_b = linalg_ops.batch_matrix_solve(a, grad, adjoint=True)
grad_a = -math_ops.batch_matmul(grad_b, c, adj_y=True)
return (grad_a, grad_b)
示例10: variance
def variance(self, name="variance"):
"""Variance of the Wishart distribution.
This function should not be confused with the covariance of the Wishart. The
covariance matrix would have shape `q x q` where,
`q = dimension * (dimension+1) / 2`
and having elements corresponding to some mapping from a lower-triangular
matrix to a vector-space.
This function returns the diagonal of the Covariance matrix but shaped
as a `dimension x dimension` matrix.
Args:
name: The name of this op.
Returns:
variance: `Tensor` of dtype `self.dtype`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=list(self.inputs.values())):
x = math_ops.sqrt(self.df) * self.scale_operator_pd.to_dense()
d = array_ops.expand_dims(array_ops.batch_matrix_diag_part(x), -1)
v = math_ops.square(x) + math_ops.batch_matmul(d, d, adj_y=True)
if self.cholesky_input_output_matrices:
return linalg_ops.batch_cholesky(v)
else:
return v
示例11: _variance
def _variance(self):
scale = self.alpha_sum * math_ops.sqrt(1.0 + self.alpha_sum)
alpha = self.alpha / scale
outer_prod = -math_ops.batch_matmul(
array_ops.expand_dims(alpha, dim=-1), array_ops.expand_dims(alpha, dim=-2) # column
) # row
return array_ops.batch_matrix_set_diag(outer_prod, alpha * (self.alpha_sum / scale - alpha))
示例12: _variance
def _variance(self):
x = math_ops.sqrt(self.df) * self.scale_operator_pd.to_dense()
d = array_ops.expand_dims(array_ops.matrix_diag_part(x), -1)
v = math_ops.square(x) + math_ops.batch_matmul(d, d, adj_y=True)
if self.cholesky_input_output_matrices:
return linalg_ops.cholesky(v)
return v
示例13: _variance
def _variance(self):
p = self.p * array_ops.expand_dims(array_ops.ones_like(self.n), -1)
outer_prod = math_ops.batch_matmul(
array_ops.expand_dims(self._mean_val, -1),
array_ops.expand_dims(p, -2))
return array_ops.batch_matrix_set_diag(
-outer_prod, self._mean_val - self._mean_val * p)
示例14: _MatrixTriangularSolveGrad
def _MatrixTriangularSolveGrad(op, grad):
"""Gradient for MatrixTriangularSolve."""
a = op.inputs[0]
adjoint_a = op.get_attr("adjoint")
lower_a = op.get_attr("lower")
c = op.outputs[0]
grad_b = linalg_ops.matrix_triangular_solve(a, grad, lower=lower_a, adjoint=not adjoint_a)
if adjoint_a:
grad_a = -math_ops.batch_matmul(c, grad_b, adj_y=True)
else:
grad_a = -math_ops.batch_matmul(grad_b, c, adj_y=True)
if lower_a:
grad_a = array_ops.matrix_band_part(grad_a, -1, 0)
else:
grad_a = array_ops.matrix_band_part(grad_a, 0, -1)
return (grad_a, grad_b)
示例15: _sample_n
def _sample_n(self, n, seed):
batch_shape = self.batch_shape()
event_shape = self.event_shape()
batch_ndims = array_ops.shape(batch_shape)[0]
ndims = batch_ndims + 3 # sample_ndims=1, event_ndims=2
shape = array_ops.concat(0, ((n,), batch_shape, event_shape))
# Complexity: O(nbk^2)
x = random_ops.random_normal(shape=shape,
mean=0.,
stddev=1.,
dtype=self.dtype,
seed=seed)
# Complexity: O(nbk)
# This parametrization is equivalent to Chi2, i.e.,
# ChiSquared(k) == Gamma(alpha=k/2, beta=1/2)
g = random_ops.random_gamma(shape=(n,),
alpha=self._multi_gamma_sequence(
0.5 * self.df, self.dimension),
beta=0.5,
dtype=self.dtype,
seed=distribution_util.gen_new_seed(
seed, "wishart"))
# Complexity: O(nbk^2)
x = array_ops.matrix_band_part(x, -1, 0) # Tri-lower.
# Complexity: O(nbk)
x = array_ops.matrix_set_diag(x, math_ops.sqrt(g))
# Make batch-op ready.
# Complexity: O(nbk^2)
perm = array_ops.concat(0, (math_ops.range(1, ndims), (0,)))
x = array_ops.transpose(x, perm)
shape = array_ops.concat(0, (batch_shape, (event_shape[0], -1)))
x = array_ops.reshape(x, shape)
# Complexity: O(nbM) where M is the complexity of the operator solving a
# vector system. E.g., for OperatorPDDiag, each matmul is O(k^2), so
# this complexity is O(nbk^2). For OperatorPDCholesky, each matmul is
# O(k^3) so this step has complexity O(nbk^3).
x = self.scale_operator_pd.sqrt_matmul(x)
# Undo make batch-op ready.
# Complexity: O(nbk^2)
shape = array_ops.concat(0, (batch_shape, event_shape, (n,)))
x = array_ops.reshape(x, shape)
perm = array_ops.concat(0, ((ndims-1,), math_ops.range(0, ndims-1)))
x = array_ops.transpose(x, perm)
if not self.cholesky_input_output_matrices:
# Complexity: O(nbk^3)
x = math_ops.batch_matmul(x, x, adj_y=True)
return x