本文整理汇总了Python中tensorflow.python.ops.math_ops.sign方法的典型用法代码示例。如果您正苦于以下问题:Python math_ops.sign方法的具体用法?Python math_ops.sign怎么用?Python math_ops.sign使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.math_ops
的用法示例。
在下文中一共展示了math_ops.sign方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _sample_n
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import sign [as 别名]
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
# Uniform variates must be sampled from the open-interval `(-1, 1)` rather
# than `[-1, 1)`. In the case of `(0, 1)` we'd use
# `np.finfo(self.dtype.as_numpy_dtype).tiny` because it is the smallest,
# positive, "normal" number. However, the concept of subnormality exists
# only at zero; here we need the smallest usable number larger than -1,
# i.e., `-1 + eps/2`.
uniform_samples = random_ops.random_uniform(
shape=shape,
minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
self.dtype.as_numpy_dtype(0.)),
maxval=1.,
dtype=self.dtype,
seed=seed)
return (self.loc - self.scale * math_ops.sign(uniform_samples) *
math_ops.log1p(-math_ops.abs(uniform_samples)))
示例2: _apply_weight_decays
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import sign [as 别名]
def _apply_weight_decays(self, var, var_t):
l1, l2 = self.weight_decays[var.name]
if l1 == 0 and l2 == 0:
if self.init_verbose and not self._init_notified:
print("Both penalties are 0 for %s, will skip" % var.name)
return var_t
norm = math_ops.cast(math_ops.sqrt(self.batch_size / self.total_iterations_wd),
'float32')
l1_normalized = l1 * norm
l2_normalized = l2 * norm
if l1 != 0 and l2 != 0:
decay = l1_normalized * math_ops.sign(var) + l2_normalized * var
elif l1 != 0:
decay = l1_normalized * math_ops.sign(var)
else:
decay = l2_normalized * var
var_t = var_t - self.eta_t * decay
if self.init_verbose and not self._init_notified:
norm_print = (self.batch_size / self.total_iterations_wd) ** (1 / 2)
l1n_print, l2n_print = l1 * norm_print, l2 * norm_print
decays_str = "{}(L1), {}(L2)".format(l1n_print, l2n_print)
print('{} weight decay set for {}'.format(decays_str, var.name))
return var_t
示例3: _cdf
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import sign [as 别名]
def _cdf(self, x):
z = self._z(x)
return (0.5 + 0.5 * math_ops.sign(z) *
(1. - math_ops.exp(-math_ops.abs(z))))
示例4: _AbsGrad
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import sign [as 别名]
def _AbsGrad(op, grad):
x = op.inputs[0]
return grad * math_ops.sign(x)
示例5: _ComplexAbsGrad
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import sign [as 别名]
def _ComplexAbsGrad(op, grad):
"""Returns the gradient of ComplexAbs."""
# TODO(b/27786104): The cast to complex could be removed once arithmetic
# supports mixtures of complex64 and real values.
return (math_ops.complex(grad, array_ops.zeros_like(grad)) *
math_ops.sign(op.inputs[0]))
示例6: sign
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import sign [as 别名]
def sign(x):
"""Element-wise sign.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sign(x)
示例7: _sample_n
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import sign [as 别名]
def _sample_n(self, n, seed=None):
shape = array_ops.concat(([n], self.batch_shape()), 0)
# Sample uniformly-at-random from the open-interval (-1, 1).
uniform_samples = random_ops.random_uniform(
shape=shape,
minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
self.dtype.as_numpy_dtype(0.)),
maxval=1.,
dtype=self.dtype,
seed=seed)
return (self.loc - self.scale * math_ops.sign(uniform_samples) *
math_ops.log(1. - math_ops.abs(uniform_samples)))
示例8: _cdf
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import sign [as 别名]
def _cdf(self, x):
y = x - self.loc
return (0.5 + 0.5 * math_ops.sign(y) *
(1. - math_ops.exp(-math_ops.abs(y) / self.scale)))
示例9: setUp
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import sign [as 别名]
def setUp(self):
super(CoreUnaryOpsTest, self).setUp()
self.ops = [
('abs', operator.abs, math_ops.abs, core.abs_function),
('neg', operator.neg, math_ops.negative, core.neg),
# TODO(shoyer): add unary + to core TensorFlow
('pos', None, None, None),
('sign', None, math_ops.sign, core.sign),
('reciprocal', None, math_ops.reciprocal, core.reciprocal),
('square', None, math_ops.square, core.square),
('round', None, math_ops.round, core.round_function),
('sqrt', None, math_ops.sqrt, core.sqrt),
('rsqrt', None, math_ops.rsqrt, core.rsqrt),
('log', None, math_ops.log, core.log),
('exp', None, math_ops.exp, core.exp),
('log', None, math_ops.log, core.log),
('ceil', None, math_ops.ceil, core.ceil),
('floor', None, math_ops.floor, core.floor),
('cos', None, math_ops.cos, core.cos),
('sin', None, math_ops.sin, core.sin),
('tan', None, math_ops.tan, core.tan),
('acos', None, math_ops.acos, core.acos),
('asin', None, math_ops.asin, core.asin),
('atan', None, math_ops.atan, core.atan),
('lgamma', None, math_ops.lgamma, core.lgamma),
('digamma', None, math_ops.digamma, core.digamma),
('erf', None, math_ops.erf, core.erf),
('erfc', None, math_ops.erfc, core.erfc),
('lgamma', None, math_ops.lgamma, core.lgamma),
]
total_size = np.prod([v.size for v in self.original_lt.axes.values()])
self.test_lt = core.LabeledTensor(
math_ops.cast(self.original_lt, dtypes.float32) / total_size,
self.original_lt.axes)
示例10: _sample_n
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import sign [as 别名]
def _sample_n(self, n, seed=None):
shape = array_ops.concat(0, ([n], self.batch_shape()))
# Sample uniformly-at-random from the open-interval (-1, 1).
uniform_samples = random_ops.random_uniform(
shape=shape,
minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
self.dtype.as_numpy_dtype(0.)),
maxval=1.,
dtype=self.dtype,
seed=seed)
return (self.loc - self.scale * math_ops.sign(uniform_samples) *
math_ops.log(1. - math_ops.abs(uniform_samples)))
示例11: modrelu
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import sign [as 别名]
def modrelu(z, b, comp):
if comp:
z_norm = math_ops.sqrt(math_ops.square(math_ops.real(z)) + math_ops.square(math_ops.imag(z))) + 0.00001
step1 = nn_ops.bias_add(z_norm, b)
step2 = math_ops.complex(nn_ops.relu(step1), array_ops.zeros_like(z_norm))
step3 = z/math_ops.complex(z_norm, array_ops.zeros_like(z_norm))
else:
z_norm = math_ops.abs(z) + 0.00001
step1 = nn_ops.bias_add(z_norm, b)
step2 = nn_ops.relu(step1)
step3 = math_ops.sign(z)
return math_ops.multiply(step3, step2)
示例12: modrelu
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import sign [as 别名]
def modrelu(z, b, comp):
if comp:
z_norm = math_ops.sqrt(math_ops.square(math_ops.real(z)) + math_ops.square(math_ops.imag(z))) + 0.00001
step1 = nn_ops.bias_add(z_norm, b)
step2 = math_ops.complex(nn_ops.relu(step1), array_ops.zeros_like(z_norm))
step3 = z/math_ops.complex(z_norm, array_ops.zeros_like(z_norm))
else:
z_norm = math_ops.abs(z) + 0.00001
step1 = nn_ops.bias_add(z_norm, b)
step2 = nn_ops.relu(step1)
step3 = math_ops.sign(z)
return math_ops.multiply(step3, step2)
示例13: get_grow_tensor
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import sign [as 别名]
def get_grow_tensor(self, weight, method):
if method.startswith('grad_scale'):
masked_grad = self._weight2masked_grads[weight.name]
divisor = extract_number(method)
grow_tensor = masked_grad / divisor
elif method.startswith('grad_sign'):
masked_grad_sign = math_ops.sign(self._weight2masked_grads[weight.name])
divisor = extract_number(method)
grow_tensor = masked_grad_sign / divisor
else:
grow_tensor = super(
SparseRigLOptimizer, self).get_grow_tensor(weight, method)
return grow_tensor
示例14: _apply_dense
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import sign [as 别名]
def _apply_dense(self, grad, var):
lr_scale = self.get_slot(var, "lr_scale")
momentum = self.get_slot(var, "momentum")
gbar = self.get_slot(var, "gbar")
gain = self.get_slot(var, "gain")
counter = self.get_slot(var, "counter")
counter_updated = state_ops.assign(counter, counter + 1)
# lr_scale update uses normalized grad and momentum to be independent of dim
normalized_grad = grad / (linalg_ops.norm(grad) + 1e-10)
normalized_momentum = momentum / (linalg_ops.norm(momentum) + 1e-10)
# Apply EG updates on lr_scale:
# grad_lr_scale = -inner_product(current_grad, old_momentum)
# lr_scale <- lr_scale * exp(-scale_learning_rate * grad_lr_scale)
lr_scale_unnormalized_updated = clip_ops.clip_by_value(
lr_scale * math_ops.exp(
self._scale_learning_rate * math_ops.reduce_sum(grad * momentum)),
self._min_scale, self._max_scale)
lr_scale_normalized_updated = clip_ops.clip_by_value(
lr_scale * math_ops.exp(self._scale_learning_rate * math_ops.reduce_sum(
normalized_grad * normalized_momentum)), self._min_scale,
self._max_scale)
lr_scale_updated = state_ops.assign(
lr_scale,
array_ops.where(self._use_directions, lr_scale_normalized_updated,
lr_scale_unnormalized_updated))
# remove the bias of zero initialization in gbar
corrected_gbar = gbar / (
1.0 - self._beta**math_ops.maximum(counter_updated - 1, 1))
# Apply EG updates on gain:
# grad_gain = - current_grad * old_gbar
# gain <- gain * exp(-gain_learning_rate * grad_gain)
gain_unnormalized_updated = clip_ops.clip_by_value(
gain * math_ops.exp(self._gain_learning_rate * grad * corrected_gbar),
self._min_gain, self._max_gain)
# Normalized update uses sign(grad) * sign(gbar) as a proxy for grad_gain.
gain_normalized_updated = clip_ops.clip_by_value(
gain * math_ops.exp(self._gain_learning_rate * math_ops.sign(grad) *
math_ops.sign(gbar)), self._min_gain,
self._max_gain)
gain_updated = state_ops.assign(
gain,
array_ops.where(self._use_signs, gain_normalized_updated,
gain_unnormalized_updated))
scaled_g = self._learning_rate_tensor * gain_updated * grad
with ops.control_dependencies([lr_scale_updated, scaled_g]):
momentum_updated = state_ops.assign(
momentum, self._momentum_tensor * momentum + scaled_g)
gbar_updated = state_ops.assign(
gbar, self._beta * gbar + (1.0 - self._beta) * grad)
with ops.control_dependencies([gbar_updated]):
return state_ops.assign_sub(var, lr_scale_updated * momentum_updated)
示例15: build
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import sign [as 别名]
def build(self, inputs_shape):
if inputs_shape[1].value is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s"
% inputs_shape)
input_depth = inputs_shape[1].value
if self._input_initializer is None:
self._input_initializer = init_ops.random_normal_initializer(mean=0.0,
stddev=0.001)
self._input_kernel = self.add_variable(
"input_kernel",
shape=[input_depth, self._num_units],
initializer=self._input_initializer)
if self._recurrent_initializer is None:
self._recurrent_initializer = init_ops.constant_initializer(1.)
self._recurrent_kernel = self.add_variable(
"recurrent_kernel",
shape=[self._num_units],
initializer=self._recurrent_initializer)
# Clip the absolute values of the recurrent weights to the specified minimum
if self._recurrent_min_abs:
abs_kernel = math_ops.abs(self._recurrent_kernel)
min_abs_kernel = math_ops.maximum(abs_kernel, self._recurrent_min_abs)
self._recurrent_kernel = math_ops.multiply(
math_ops.sign(self._recurrent_kernel),
min_abs_kernel
)
# Clip the absolute values of the recurrent weights to the specified maximum
if self._recurrent_max_abs:
self._recurrent_kernel = clip_ops.clip_by_value(self._recurrent_kernel,
-self._recurrent_max_abs,
self._recurrent_max_abs)
self._bias = self.add_variable(
"bias",
shape=[self._num_units],
initializer=init_ops.zeros_initializer(dtype=self.dtype))
self.built = True