本文整理汇总了Python中tensorflow.python.ops.math_ops.sign函数的典型用法代码示例。如果您正苦于以下问题:Python sign函数的具体用法?Python sign怎么用?Python sign使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sign函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _apply_sparse
def _apply_sparse(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
alpha_t = math_ops.cast(self._alpha_t, var.dtype.base_dtype)
beta_t = math_ops.cast(self._beta_t, var.dtype.base_dtype)
m = self.get_slot(var, 'm')
m_t = state_ops.assign(
m, (m * beta_t) + (grad * (1 - beta_t)), use_locking=self._use_locking)
sign_g = ops.IndexedSlices(
math_ops.sign(grad.values), grad.indices, dense_shape=grad.dense_shape)
sign_gm = ops.IndexedSlices(
array_ops.gather(math_ops.sign(m_t), sign_g.indices) * sign_g.values,
sign_g.indices,
dense_shape=sign_g.dense_shape)
sign_decayed = math_ops.cast(
self._sign_decay_t, var.dtype.base_dtype)
multiplier_values = alpha_t + sign_decayed * sign_gm.values
multiplier = ops.IndexedSlices(
multiplier_values, sign_gm.indices, dense_shape=sign_gm.dense_shape)
final_update = ops.IndexedSlices(
lr_t * multiplier.values * grad.values,
multiplier.indices,
dense_shape=multiplier.dense_shape)
var_update = state_ops.scatter_sub(
var,
final_update.indices,
final_update.values,
use_locking=self._use_locking)
return control_flow_ops.group(* [var_update, m_t])
示例2: sample_n
def sample_n(self, n, seed=None, name="sample_n"):
"""Sample `n` observations from the Laplace Distributions.
Args:
n: `Scalar`, type int32, the number of observations to sample.
seed: Python integer, the random seed.
name: The name to give this op.
Returns:
samples: `[n, ...]`, a `Tensor` of `n` samples for each
of the distributions determined by broadcasting the parameters.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._loc, self._scale, n]):
n = ops.convert_to_tensor(n)
n_val = tensor_util.constant_value(n)
shape = array_ops.concat(0, ([n], self.batch_shape()))
# Sample uniformly-at-random from the open-interval (-1, 1).
uniform_samples = random_ops.random_uniform(
shape=shape,
minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
self.dtype.as_numpy_dtype(0.)),
maxval=self.dtype.as_numpy_dtype(1.),
dtype=self.dtype,
seed=seed)
# Provide some hints to shape inference
inferred_shape = tensor_shape.vector(n_val).concatenate(
self.get_batch_shape())
uniform_samples.set_shape(inferred_shape)
return (self._loc - self._scale * math_ops.sign(uniform_samples) *
math_ops.log(1. - math_ops.abs(uniform_samples)))
示例3: random_sign_uniform
def random_sign_uniform(shape,
minval=None,
maxval=None,
dtype=dtypes.float32,
seed=None):
"""Tensor with (possibly complex) random entries from a "sign Uniform".
Letting `Z` be a random variable equal to `-1` and `1` with equal probability,
Samples from this `Op` are distributed like
```
Z * X, where X ~ Uniform[minval, maxval], if dtype is real,
Z * (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
minval: `0-D` `Tensor` giving the minimum values.
maxval: `0-D` `Tensor` giving the maximum values.
dtype: `TensorFlow` `dtype` or Python dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_sign_uniform"):
unsigned_samples = random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
if seed is not None:
seed += 12
signs = math_ops.sign(
random_ops.random_uniform(shape, minval=-1., maxval=1., seed=seed))
return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype)
示例4: __call__
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (num_cols, num_rows) if num_rows < num_cols else (num_rows,
num_cols)
# Generate a random matrix
a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed)
# Compute the qr factorization
q, r = linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
d = array_ops.diag_part(r)
q *= math_ops.sign(d)
if num_rows < num_cols:
q = array_ops.matrix_transpose(q)
return self.gain * array_ops.reshape(q, shape)
示例5: __call__
def __call__(self, shape, dtype=dtypes.float32):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
Raises:
ValueError: If the dtype is not floating point or the input shape is not
valid.
"""
dtype = _assert_float_dtype(dtype)
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))
# Generate a random matrix
a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed)
# Compute the qr factorization
q, r = gen_linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
d = array_ops.diag_part(r)
q *= math_ops.sign(d)
if num_rows < num_cols:
q = array_ops.matrix_transpose(q)
return self.gain * array_ops.reshape(q, shape)
示例6: _Solve
def _Solve(a, b, c):
"""Return solution of a quadratic minimization.
The optimization equation is:
f(a, b, c) = argmin_w{1/2 * a * w^2 + b * w + c * |w|}
we get optimal solution w*:
w* = -(b - sign(b)*c)/a if |b| > c else w* = 0
REQUIRES: Dimensionality of a and b must be same
Args:
a: A Tensor
b: A Tensor
c: A Tensor with one element.
Returns:
A Tensor w, which is solution for the equation
"""
with ops.name_scope("solve_" + b.op.name):
c = ops.convert_to_tensor(c)
k = array_ops.fill(array_ops.shape(b), c)
zero_t = array_ops.zeros(array_ops.shape(b), dtype=b.dtype)
w = (c * math_ops.sign(b) - b) / a
w = math_ops.select(math_ops.less(math_ops.abs(b), k), zero_t, w)
return w
示例7: Test
def Test(self):
np.random.seed(1)
n = shape_[-1]
batch_shape = shape_[:-2]
np_dtype = dtype_.as_numpy_dtype
a = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
if dtype_.is_complex:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
a += np.conj(a.T)
a = np.tile(a, batch_shape + (1, 1))
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(np_dtype).eps
delta = 0.1 * epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
tol = 1e-2
else:
tol = 1e-7
with self.session(use_gpu=True):
tf_a = constant_op.constant(a)
if compute_v_:
tf_e, tf_v = linalg_ops.self_adjoint_eig(tf_a)
# (complex) Eigenvectors are only unique up to an arbitrary phase
# We normalize the vectors such that the first component has phase 0.
top_rows = tf_v[..., 0:1, :]
if tf_a.dtype.is_complex:
angle = -math_ops.angle(top_rows)
phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
else:
phase = math_ops.sign(top_rows)
tf_v *= phase
outputs = [tf_e, tf_v]
else:
tf_e = linalg_ops.self_adjoint_eigvals(tf_a)
outputs = [tf_e]
for b in outputs:
x_init = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
if dtype_.is_complex:
x_init += 1j * np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
x_init += np.conj(x_init.T)
x_init = np.tile(x_init, batch_shape + (1, 1))
theoretical, numerical = gradient_checker.compute_gradient(
tf_a,
tf_a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=x_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
示例8: Compute
def Compute(x):
e, v = linalg_ops.self_adjoint_eig(x)
# (complex) Eigenvectors are only unique up to an arbitrary phase
# We normalize the vectors such that the first component has phase 0.
top_rows = v[..., 0:1, :]
if dtype_.is_complex:
angle = -math_ops.angle(top_rows)
phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
else:
phase = math_ops.sign(top_rows)
v *= phase
return e, v
示例9: _sample_n
def _sample_n(self, n, seed=None):
shape = array_ops.concat(0, ([n], self.batch_shape()))
# Sample uniformly-at-random from the open-interval (-1, 1).
uniform_samples = random_ops.random_uniform(
shape=shape,
minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
self.dtype.as_numpy_dtype(0.)),
maxval=1.,
dtype=self.dtype,
seed=seed)
return (self.loc - self.scale * math_ops.sign(uniform_samples) *
math_ops.log(1. - math_ops.abs(uniform_samples)))
示例10: _orthogonal_matrix
def _orthogonal_matrix(self, n):
"""Construct an n x n orthogonal matrix.
Args:
n: dimension.
Returns:
a n x n orthogonal matrix.
"""
a = random_ops.random_normal([n, n], dtype=self.dtype, seed=self.seed)
if self.seed:
self.seed += 1
q, r = linalg_ops.qr(a)
d = array_ops.diag_part(r)
# make q uniform
q *= math_ops.sign(d)
return q
示例11: _BesselI1eGrad
def _BesselI1eGrad(op, grad):
"""Compute gradient of bessel_i1e(x) with respect to its argument."""
x = op.inputs[0]
y = op.outputs[0]
with ops.control_dependencies([grad]):
# For x = 0, the correct gradient is 0.5.
# However, the main branch gives NaN because of the division by x, so
# we impute the gradient manually.
# An alternative solution is to express the gradient via bessel_i0e and
# bessel_i2e, but the latter is not yet implemented in Eigen.
eps = np.finfo(x.dtype.as_numpy_dtype).eps
zeros = array_ops.zeros_like(x)
x_is_not_tiny = math_ops.abs(x) > eps
safe_x = array_ops.where(x_is_not_tiny, x, eps + zeros)
dy_dx = math_ops.bessel_i0e(safe_x) - y * (
math_ops.sign(safe_x) + math_ops.reciprocal(safe_x))
return grad * array_ops.where(x_is_not_tiny, dy_dx, 0.5 + zeros)
示例12: _sample_n
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
# Uniform variates must be sampled from the open-interval `(-1, 1)` rather
# than `[-1, 1)`. In the case of `(0, 1)` we'd use
# `np.finfo(self.dtype.as_numpy_dtype).tiny` because it is the smallest,
# positive, "normal" number. However, the concept of subnormality exists
# only at zero; here we need the smallest usable number larger than -1,
# i.e., `-1 + eps/2`.
uniform_samples = random_ops.random_uniform(
shape=shape,
minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
self.dtype.as_numpy_dtype(0.)),
maxval=1.,
dtype=self.dtype,
seed=seed)
return (self.loc - self.scale * math_ops.sign(uniform_samples) *
math_ops.log1p(-math_ops.abs(uniform_samples)))
示例13: _NormalizingSvd
def _NormalizingSvd(tf_a):
tf_s, tf_u, tf_v = linalg_ops.svd(tf_a, compute_uv=True, full_matrices=True)
# Singular vectors are only unique up to an arbitrary phase. We normalize
# the vectors such that the first component of u (if m >=n) or v (if n > m)
# have phase 0.
m = tf_a.shape[-2]
n = tf_a.shape[-1]
if m >= n:
top_rows = tf_u[..., 0:1, :]
else:
top_rows = tf_v[..., 0:1, :]
if tf_u.dtype.is_complex:
angle = -math_ops.angle(top_rows)
phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
else:
phase = math_ops.sign(top_rows)
tf_u *= phase[..., :m]
tf_v *= phase[..., :n]
return tf_s, tf_u, tf_v
示例14: cdf
def cdf(self, x, name="cdf"):
"""CDF of observations in `x` under the Laplace distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `loc` and `scale`.
name: The name to give this op.
Returns:
cdf: tensor of dtype `dtype`, the CDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._loc, self._scale, x]):
x = ops.convert_to_tensor(x)
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s"
% (x.dtype, self.dtype))
y = x - self._loc
return 0.5 + 0.5 * math_ops.sign(y) * (
1. - math_ops.exp(-math_ops.abs(y) / self._scale))
示例15: build
def build(self, inputs_shape):
if inputs_shape[1].value is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s"
% inputs_shape)
input_depth = inputs_shape[1].value
if self._input_initializer is None:
self._input_initializer = init_ops.random_normal_initializer(mean=0.0,
stddev=0.001)
self._input_kernel = self.add_variable(
"input_kernel",
shape=[input_depth, self._num_units],
initializer=self._input_initializer)
if self._recurrent_initializer is None:
self._recurrent_initializer = init_ops.constant_initializer(1.)
self._recurrent_kernel = self.add_variable(
"recurrent_kernel",
shape=[self._num_units],
initializer=self._recurrent_initializer)
# Clip the absolute values of the recurrent weights to the specified minimum
if self._recurrent_min_abs:
abs_kernel = math_ops.abs(self._recurrent_kernel)
min_abs_kernel = math_ops.maximum(abs_kernel, self._recurrent_min_abs)
self._recurrent_kernel = math_ops.multiply(
math_ops.sign(self._recurrent_kernel),
min_abs_kernel
)
# Clip the absolute values of the recurrent weights to the specified maximum
if self._recurrent_max_abs:
self._recurrent_kernel = clip_ops.clip_by_value(self._recurrent_kernel,
-self._recurrent_max_abs,
self._recurrent_max_abs)
self._bias = self.add_variable(
"bias",
shape=[self._num_units],
initializer=init_ops.zeros_initializer(dtype=self.dtype))
self.built = True