本文整理匯總了Python中tensorflow.python.ops.array_ops.matrix_diag方法的典型用法代碼示例。如果您正苦於以下問題:Python array_ops.matrix_diag方法的具體用法?Python array_ops.matrix_diag怎麽用?Python array_ops.matrix_diag使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.ops.array_ops
的用法示例。
在下文中一共展示了array_ops.matrix_diag方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _MatrixDiagPartGrad
# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import matrix_diag [as 別名]
def _MatrixDiagPartGrad(op, grad):
matrix_shape = op.inputs[0].get_shape()[-2:]
if matrix_shape.is_fully_defined() and matrix_shape[0] == matrix_shape[1]:
return array_ops.matrix_diag(grad)
else:
return array_ops.matrix_set_diag(array_ops.zeros_like(op.inputs[0]), grad)
示例2: _SelfAdjointEigV2Grad
# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import matrix_diag [as 別名]
def _SelfAdjointEigV2Grad(op, grad_e, grad_v):
"""Gradient for SelfAdjointEigV2."""
e = op.outputs[0]
v = op.outputs[1]
# a = op.inputs[0], which satisfies
# a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i]
with ops.control_dependencies([grad_e.op, grad_v.op]):
if grad_v is not None:
# Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0).
# Notice that because of the term involving f, the gradient becomes
# infinite (or NaN in practice) when eigenvalues are not unique.
# Mathematically this should not be surprising, since for (k-fold)
# degenerate eigenvalues, the corresponding eigenvectors are only defined
# up to arbitrary rotation in a (k-dimensional) subspace.
f = array_ops.matrix_set_diag(
math_ops.reciprocal(
array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)),
array_ops.zeros_like(e))
grad_a = math_ops.matmul(
v,
math_ops.matmul(
array_ops.matrix_diag(grad_e) + f * math_ops.matmul(
v, grad_v, adjoint_a=True),
v,
adjoint_b=True))
else:
grad_a = math_ops.matmul(
v, math_ops.matmul(
array_ops.matrix_diag(grad_e), v, adjoint_b=True))
# The forward op only depends on the lower triangular part of a, so here we
# symmetrize and take the lower triangle
grad_a = array_ops.matrix_band_part(
grad_a + array_ops.matrix_transpose(grad_a), -1, 0)
grad_a = array_ops.matrix_set_diag(grad_a,
0.5 * array_ops.matrix_diag_part(grad_a))
return grad_a
示例3: _to_dense
# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import matrix_diag [as 別名]
def _to_dense(self):
diag = array_ops.ones(self.vector_shape(), dtype=self.dtype)
dense = array_ops.matrix_diag(diag)
dense.set_shape(self.get_shape())
return self._scale * dense
示例4: _sqrt_to_dense
# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import matrix_diag [as 別名]
def _sqrt_to_dense(self):
diag = array_ops.ones(self.vector_shape(), dtype=self.dtype)
dense = array_ops.matrix_diag(diag)
dense.set_shape(self.get_shape())
return math_ops.sqrt(self._scale) * dense
示例5: _covariance
# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import matrix_diag [as 別名]
def _covariance(self):
if distribution_util.is_diagonal_scale(self.scale):
return array_ops.matrix_diag(math_ops.square(self.scale.diag_part()))
else:
return self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
示例6: _to_dense
# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import matrix_diag [as 別名]
def _to_dense(self):
return array_ops.matrix_diag(self._diag)
示例7: _sqrt_to_dense
# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import matrix_diag [as 別名]
def _sqrt_to_dense(self):
return array_ops.matrix_diag(math_ops.sqrt(self._diag))
示例8: _to_dense
# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import matrix_diag [as 別名]
def _to_dense(self):
return array_ops.matrix_diag(math_ops.square(self._diag))
示例9: _SelfAdjointEigV2Grad
# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import matrix_diag [as 別名]
def _SelfAdjointEigV2Grad(op, grad_e, grad_v):
"""Gradient for SelfAdjointEigV2."""
e = op.outputs[0]
v = op.outputs[1]
# a = op.inputs[0], which satisfies
# a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i]
with ops.control_dependencies([grad_e.op, grad_v.op]):
if grad_v is not None:
# Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0).
# Notice that because of the term involving f, the gradient becomes
# infinite (or NaN in practice) when eigenvalues are not unique.
# Mathematically this should not be surprising, since for (k-fold)
# degenerate eigenvalues, the corresponding eigenvectors are only defined
# up to arbitrary rotation in a (k-dimensional) subspace.
f = array_ops.matrix_set_diag(
math_ops.inv(
array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)),
array_ops.zeros_like(e))
grad_a = math_ops.batch_matmul(
v,
math_ops.batch_matmul(
array_ops.matrix_diag(grad_e) + f * math_ops.batch_matmul(
v, grad_v, adj_x=True),
v,
adj_y=True))
else:
grad_a = math_ops.batch_matmul(
v,
math_ops.batch_matmul(
array_ops.matrix_diag(grad_e), v, adj_y=True))
# The forward op only depends on the lower triangular part of a, so here we
# symmetrize and take the lower triangle
grad_a = array_ops.matrix_band_part(
grad_a + array_ops.matrix_transpose(grad_a), -1, 0)
grad_a = array_ops.matrix_set_diag(grad_a,
0.5 * array_ops.matrix_diag_part(grad_a))
return grad_a
示例10: _to_dense
# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import matrix_diag [as 別名]
def _to_dense(self):
diag = array_ops.ones(self.vector_shape(), dtype=self.dtype)
dense = array_ops.matrix_diag(diag)
dense.set_shape(self.get_shape())
return dense
示例11: _sqrt_to_dense
# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import matrix_diag [as 別名]
def _sqrt_to_dense(self):
return array_ops.matrix_diag(self._diag)
示例12: _SelfAdjointEigV2Grad
# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import matrix_diag [as 別名]
def _SelfAdjointEigV2Grad(op, grad_e, grad_v):
"""Gradient for SelfAdjointEigV2."""
e = op.outputs[0]
compute_v = op.get_attr("compute_v")
# a = op.inputs[0], which satisfies
# a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i]
with ops.control_dependencies([grad_e, grad_v]):
if compute_v:
v = op.outputs[1]
# Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0).
# Notice that because of the term involving f, the gradient becomes
# infinite (or NaN in practice) when eigenvalues are not unique.
# Mathematically this should not be surprising, since for (k-fold)
# degenerate eigenvalues, the corresponding eigenvectors are only defined
# up to arbitrary rotation in a (k-dimensional) subspace.
f = array_ops.matrix_set_diag(
math_ops.reciprocal(
array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)),
array_ops.zeros_like(e))
grad_a = math_ops.matmul(
v,
math_ops.matmul(
array_ops.matrix_diag(grad_e) +
f * math_ops.matmul(v, grad_v, adjoint_a=True),
v,
adjoint_b=True))
else:
_, v = linalg_ops.self_adjoint_eig(op.inputs[0])
grad_a = math_ops.matmul(v,
math_ops.matmul(
array_ops.matrix_diag(grad_e),
v,
adjoint_b=True))
# The forward op only depends on the lower triangular part of a, so here we
# symmetrize and take the lower triangle
grad_a = array_ops.matrix_band_part(
grad_a + math_ops.conj(array_ops.matrix_transpose(grad_a)), -1, 0)
grad_a = array_ops.matrix_set_diag(grad_a,
0.5 * array_ops.matrix_diag_part(grad_a))
return grad_a
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:42,代碼來源:linalg_grad.py