本文整理匯總了Python中tensorflow.python.ops.math_ops.real方法的典型用法代碼示例。如果您正苦於以下問題:Python math_ops.real方法的具體用法?Python math_ops.real怎麽用?Python math_ops.real使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.ops.math_ops
的用法示例。
在下文中一共展示了math_ops.real方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _PowGrad
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import real [as 別名]
def _PowGrad(op, grad):
"""Returns grad * (y*x^(y-1), z*log(x))."""
x = op.inputs[0]
y = op.inputs[1]
z = op.outputs[0]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
z = math_ops.conj(z)
gx = array_ops.reshape(
math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx)
# Avoid false singularity at x = 0
if x.dtype.is_complex:
# real(x) < 0 is fine for the complex case
log_x = array_ops.where(
math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x))
else:
# There's no sensible real value to return if x < 0, so return 0
log_x = array_ops.where(x > 0, math_ops.log(x), array_ops.zeros_like(x))
gy = array_ops.reshape(math_ops.reduce_sum(grad * z * log_x, ry), sy)
return gx, gy
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:25,代碼來源:math_grad.py
示例2: _assert_positive_definite
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import real [as 別名]
def _assert_positive_definite(self):
if self.dtype.is_complex:
message = (
"Diagonal operator had diagonal entries with non-positive real part, "
"thus was not positive definite.")
else:
message = (
"Real diagonal operator had non-positive diagonal entries, "
"thus was not positive definite.")
return check_ops.assert_positive(
math_ops.real(self._diag),
message=message)
示例3: _assert_positive_definite
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import real [as 別名]
def _assert_positive_definite(self):
return check_ops.assert_positive(
math_ops.real(self.multiplier),
message="LinearOperator was not positive definite.")
示例4: modrelu
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import real [as 別名]
def modrelu(z, b, comp):
if comp:
z_norm = math_ops.sqrt(math_ops.square(math_ops.real(z)) + math_ops.square(math_ops.imag(z))) + 0.00001
step1 = nn_ops.bias_add(z_norm, b)
step2 = math_ops.complex(nn_ops.relu(step1), array_ops.zeros_like(z_norm))
step3 = z/math_ops.complex(z_norm, array_ops.zeros_like(z_norm))
else:
z_norm = math_ops.abs(z) + 0.00001
step1 = nn_ops.bias_add(z_norm, b)
step2 = nn_ops.relu(step1)
step3 = math_ops.sign(z)
return math_ops.multiply(step3, step2)
示例5: modrelu
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import real [as 別名]
def modrelu(z, b, comp):
if comp:
z_norm = math_ops.sqrt(math_ops.square(math_ops.real(z)) + math_ops.square(math_ops.imag(z))) + 0.00001
step1 = nn_ops.bias_add(z_norm, b)
step2 = math_ops.complex(nn_ops.relu(step1), array_ops.zeros_like(z_norm))
step3 = z/math_ops.complex(z_norm, array_ops.zeros_like(z_norm))
else:
z_norm = math_ops.abs(z) + 0.00001
step1 = nn_ops.bias_add(z_norm, b)
step2 = nn_ops.relu(step1)
step3 = math_ops.sign(z)
return math_ops.multiply(step3, step2)
示例6: _ComplexGrad
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import real [as 別名]
def _ComplexGrad(op, grad):
"""Returns the real and imaginary components of 'grad', respectively."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(math_ops.real(grad), rx), sx),
array_ops.reshape(math_ops.reduce_sum(math_ops.imag(grad), ry), sy))
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:11,代碼來源:math_grad.py
示例7: _RealGrad
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import real [as 別名]
def _RealGrad(_, grad):
"""Returns 'grad' as the real part and set the imaginary part 0."""
zero = constant_op.constant(0, dtype=grad.dtype)
return math_ops.complex(grad, zero)
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:6,代碼來源:math_grad.py
示例8: _ImagGrad
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import real [as 別名]
def _ImagGrad(_, grad):
"""Returns 'grad' as the imaginary part and set the real part 0."""
zero = constant_op.constant(0, dtype=grad.dtype)
return math_ops.complex(zero, grad)
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:6,代碼來源:math_grad.py
示例9: _ComplexAbsGrad
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import real [as 別名]
def _ComplexAbsGrad(op, grad):
"""Returns the gradient of ComplexAbs."""
# TODO(b/27786104): The cast to complex could be removed once arithmetic
# supports mixtures of complex64 and real values.
return (math_ops.complex(grad, array_ops.zeros_like(grad)) *
math_ops.sign(op.inputs[0]))
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:8,代碼來源:math_grad.py
示例10: svd
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import real [as 別名]
def svd(tensor, full_matrices=False, compute_uv=True, name=None):
"""Computes the singular value decompositions of one or more matrices.
Computes the SVD of each inner matrix in `tensor` such that
`tensor[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :,
:])`
```prettyprint
# a is a tensor.
# s is a tensor of singular values.
# u is a tensor of left singular vectors.
# v is a tensor of right singular vectors.
s, u, v = svd(a)
s = svd(a, compute_uv=False)
```
Args:
tensor: `Tensor` of shape `[..., M, N]`. Let `P` be the minimum of `M` and
`N`.
full_matrices: If true, compute full-sized `u` and `v`. If false
(the default), compute only the leading `P` singular vectors.
Ignored if `compute_uv` is `False`.
compute_uv: If `True` then left and right singular vectors will be
computed and returned in `u` and `v`, respectively. Otherwise, only the
singular values will be computed, which can be significantly faster.
name: string, optional name of the operation.
Returns:
s: Singular values. Shape is `[..., P]`. The values are sorted in reverse
order of magnitude, so s[..., 0] is the largest value, s[..., 1] is the
second largest, etc.
u: Left singular vectors. If `full_matrices` is `False` (default) then
shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
`[..., M, M]`. Not returned if `compute_uv` is `False`.
v: Right singular vectors. If `full_matrices` is `False` (default) then
shape is `[..., N, P]`. If `full_matrices` is `True` then shape is
`[..., N, N]`. Not returned if `compute_uv` is `False`.
@compatibility(numpy)
Mostly equivalent to numpy.linalg.svd, except that the order of output
arguments here is `s`, `u`, `v` when `compute_uv` is `True`, as opposed to
`u`, `s`, `v` for numpy.linalg.svd.
@end_compatibility
"""
# pylint: disable=protected-access
s, u, v = gen_linalg_ops._svd(
tensor, compute_uv=compute_uv, full_matrices=full_matrices)
# pylint: enable=protected-access
if compute_uv:
return math_ops.real(s), u, v
else:
return math_ops.real(s)
# pylint: disable=redefined-builtin
示例11: __init__
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import real [as 別名]
def __init__(self,
diag,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorDiag"):
r"""Initialize a `LinearOperatorDiag`.
Args:
diag: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
The diagonal of the operator. Allowed dtypes: `float32`, `float64`,
`complex64`, `complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `diag.dtype` is real, this is auto-set to `True`.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
TypeError: If `diag.dtype` is not an allowed type.
ValueError: If `diag.dtype` is real, and `is_self_adjoint` is not `True`.
"""
with ops.name_scope(name, values=[diag]):
self._diag = ops.convert_to_tensor(diag, name="diag")
self._check_diag(self._diag)
# Check and auto-set hints.
if not self._diag.dtype.is_complex:
if is_self_adjoint is False:
raise ValueError("A real diagonal operator is always self adjoint.")
else:
is_self_adjoint = True
if is_square is False:
raise ValueError("Only square diagonal operators currently supported.")
is_square = True
super(LinearOperatorDiag, self).__init__(
dtype=self._diag.dtype,
graph_parents=[self._diag],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
示例12: svd
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import real [as 別名]
def svd(tensor, full_matrices=False, compute_uv=True, name=None):
"""Computes the singular value decompositions of one or more matrices.
Computes the SVD of each inner matrix in `tensor` such that
`tensor[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :,
:])`
```prettyprint
# a is a tensor.
# s is a tensor of singular values.
# u is a tensor of left singular vectors.
#v is a tensor of right singular vectors.
s, u, v = svd(a)
s = svd(a, compute_uv=False)
```
Args:
tensor: `Tensor` of shape `[..., M, N]`. Let `P` be the minimum of `M` and
`N`.
full_matrices: If true, compute full-sized `u` and `v`. If false
(the default), compute only the leading `P` singular vectors.
Ignored if `compute_uv` is `False`.
compute_uv: If `True` then left and right singular vectors will be
computed and returned in `u` and `v`, respectively. Otherwise, only the
singular values will be computed, which can be significantly faster.
name: string, optional name of the operation.
Returns:
s: Singular values. Shape is `[..., P]`.
u: Right singular vectors. If `full_matrices` is `False` (default) then
shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
`[..., M, M]`. Not returned if `compute_uv` is `False`.
v: Left singular vectors. If `full_matrices` is `False` (default) then
shape is `[..., N, P]`. If `full_matrices` is `True` then shape is
`[..., N, N]`. Not returned if `compute_uv` is `False`.
@compatibility(numpy)
Mostly equivalent to numpy.linalg.svd, except that the order of output
arguments here is `s`, `u`, `v` when `compute_uv` is `True`, as opposed to
`u`, `s`, `v` for numpy.linalg.svd.
@end_compatibility
"""
# pylint: disable=protected-access
s, u, v = gen_linalg_ops._svd(
tensor, compute_uv=compute_uv, full_matrices=full_matrices)
# pylint: enable=protected-access
if compute_uv:
return math_ops.real(s), u, v
else:
return math_ops.real(s)
# pylint: disable=redefined-builtin
示例13: __init__
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import real [as 別名]
def __init__(self,
diag,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
name="LinearOperatorDiag"):
"""Initialize a `LinearOperatorDiag`.
Args:
diag: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
The diagonal of the operator. Allowed dtypes: `float32`, `float64`,
`complex64`, `complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `diag.dtype` is real, this is auto-set to `True`.
is_positive_definite: Expect that this operator is positive definite,
meaning the real part of all eigenvalues is positive. We do not require
the operator to be self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix
#Extension_for_non_symmetric_matrices
name: A name for this `LinearOperator`.
Raises:
TypeError: If `diag.dtype` is not an allowed type.
ValueError: If `diag.dtype` is real, and `is_self_adjoint` is not `True`.
"""
allowed_dtypes = [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
with ops.name_scope(name, values=[diag]):
self._diag = ops.convert_to_tensor(diag, name="diag")
dtype = self._diag.dtype
if dtype not in allowed_dtypes:
raise TypeError(
"Argument diag must have dtype in %s. Found: %s"
% (allowed_dtypes, dtype))
# Check and auto-set hints.
if not dtype.is_complex:
if is_self_adjoint is False:
raise ValueError("A real diagonal operator is always self adjoint.")
else:
is_self_adjoint = True
super(LinearOperatorDiag, self).__init__(
dtype=dtype,
graph_parents=[self._diag],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
name=name)
示例14: __init__
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import real [as 別名]
def __init__(self,
tril,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
name="LinearOperatorTriL"):
"""Initialize a `LinearOperatorTriL`.
Args:
tril: Shape `[B1,...,Bb, N, N]` with `b >= 0`, `N >= 0`.
The lower triangular part of `tril` defines this operator. The strictly
upper triangle is ignored. Allowed dtypes: `float32`, `float64`.
is_non_singular: Expect that this operator is non-singular.
This operator is non-singular if and only if its diagonal elements are
all non-zero.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. This operator is self-adjoint only if it is diagonal with
real-valued diagonal entries. In this case it is advised to use
`LinearOperatorDiag`.
is_positive_definite: Expect that this operator is positive definite,
meaning the real part of all eigenvalues is positive. We do not require
the operator to be self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix
#Extension_for_non_symmetric_matrices
name: A name for this `LinearOperator`.
Raises:
TypeError: If `diag.dtype` is not an allowed type.
"""
# TODO(langmore) Add complex types once matrix_triangular_solve works for
# them.
allowed_dtypes = [dtypes.float32, dtypes.float64]
with ops.name_scope(name, values=[tril]):
self._tril = array_ops.matrix_band_part(tril, -1, 0)
self._diag = array_ops.matrix_diag_part(self._tril)
dtype = self._tril.dtype
if dtype not in allowed_dtypes:
raise TypeError(
"Argument tril must have dtype in %s. Found: %s"
% (allowed_dtypes, dtype))
super(LinearOperatorTriL, self).__init__(
dtype=self._tril.dtype,
graph_parents=[self._tril],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
name=name)
示例15: svd
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import real [as 別名]
def svd(tensor, full_matrices=False, compute_uv=True, name=None):
"""Computes the singular value decompositions of one or more matrices.
Computes the SVD of each inner matrix in `tensor` such that
`tensor[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :,
:])`
```prettyprint
# a is a tensor.
# s is a tensor of singular values.
# u is a tensor of left singular vectors.
# v is a tensor of right singular vectors.
s, u, v = svd(a)
s = svd(a, compute_uv=False)
```
Args:
matrix: `Tensor` of shape `[..., M, N]`. Let `P` be the minimum of `M` and
`N`.
full_matrices: If true, compute full-sized `u` and `v`. If false
(the default), compute only the leading `P` singular vectors.
Ignored if `compute_uv` is `False`.
compute_uv: If `True` then left and right singular vectors will be
computed and returned in `u` and `v`, respectively. Otherwise, only the
singular values will be computed, which can be significantly faster.
name: string, optional name of the operation.
Returns:
s: Singular values. Shape is `[..., P]`.
u: Right singular vectors. If `full_matrices` is `False` (default) then
shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
`[..., M, M]`. Not returned if `compute_uv` is `False`.
v: Left singular vectors. If `full_matrices` is `False` (default) then
shape is `[..., N, P]`. If `full_matrices` is `True` then shape is
`[..., N, N]`. Not returned if `compute_uv` is `False`.
"""
# pylint: disable=protected-access
s, u, v = gen_linalg_ops._svd(
tensor, compute_uv=compute_uv, full_matrices=full_matrices)
if compute_uv:
return math_ops.real(s), u, v
else:
return math_ops.real(s)
# pylint: enable=invalid-name