本文整理汇总了Python中tensorflow.python.ops.math_ops.rsqrt方法的典型用法代码示例。如果您正苦于以下问题:Python math_ops.rsqrt方法的具体用法?Python math_ops.rsqrt怎么用?Python math_ops.rsqrt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.math_ops
的用法示例。
在下文中一共展示了math_ops.rsqrt方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _sample_n
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import rsqrt [as 别名]
def _sample_n(self, n, seed=None):
# The sampling method comes from the fact that if:
# X ~ Normal(0, 1)
# Z ~ Chi2(df)
# Y = X / sqrt(Z / df)
# then:
# Y ~ StudentT(df).
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed)
df = self.df * array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
gamma_sample = random_ops.random_gamma(
[n],
0.5 * df,
beta=0.5,
dtype=self.dtype,
seed=distribution_util.gen_new_seed(seed, salt="student_t"))
samples = normal_sample * math_ops.rsqrt(gamma_sample / df)
return samples * self.scale + self.loc # Abs(scale) not wanted.
示例2: LRSchedule
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import rsqrt [as 别名]
def LRSchedule(global_step, d_model, warmup_steps=4000):
if global_step is None:
raise ValueError("global_step is required for learning_rate_schedule.")
def deal_lr(global_step, d_model, warmup_steps):
d_model = ops.convert_to_tensor(d_model, dtype=tf.float32)
dtype = d_model.dtype
warmup_steps = math_ops.cast(warmup_steps, dtype)
global_step_recomp = math_ops.cast(global_step, dtype)
arg1 = math_ops.rsqrt(global_step_recomp)
arg2 = math_ops.multiply(global_step_recomp, math_ops.pow(warmup_steps, -1.5))
return math_ops.multiply(math_ops.rsqrt(d_model), math_ops.minimum(arg1, arg2))
return functools.partial(deal_lr, global_step, d_model, warmup_steps)
示例3: _BatchNormWithGlobalNormalizationGrad
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import rsqrt [as 别名]
def _BatchNormWithGlobalNormalizationGrad(op, grad):
"""Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.
We do not backprop anything for the mean and var intentionally as they are
not being trained with backprop in the operation.
Args:
op: The BatchNormOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the BatchNormOp.
Returns:
dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))
dm: Backprop for mean, which is
sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))
dv: Backprop for variance, which is
sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)
db: Backprop for beta, which is grad reduced in all except the
last dimension.
dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
"""
dx, dm, dv, db, dg = gen_nn_ops._batch_norm_with_global_normalization_grad(
op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad,
op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization"))
return dx, dm, dv, db, dg
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:26,代码来源:nn_grad.py
示例4: poincare_normalize
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import rsqrt [as 别名]
def poincare_normalize(x, axis=1, epsilon=1e-5, name=None):
"""Project into the Poincare ball with norm <= 1.0 - epsilon.
https://en.wikipedia.org/wiki/Poincare_ball_model
Used in
Poincare Embeddings for Learning Hierarchical Representations
Maximilian Nickel, Douwe Kiela
https://arxiv.org/pdf/1705.08039.pdf
For a 1-D tensor with `axis = 0`, computes
(x * (1 - epsilon)) / ||x|| if ||x|| > 1 - epsilon
output =
x otherwise
For `x` with more dimensions, independently normalizes each 1-D slice along
dimension `axis`.
Args:
x: A `Tensor`.
axis: Axis along which to normalize. A scalar or a vector of integers.
epsilon: A small deviation from the edge of the unit sphere for numerical
stability.
name: A name for this operation (optional).
Returns:
A `Tensor` with the same shape as `x`.
"""
with ops.name_scope(name, 'poincare_normalize', [x]) as name:
x = ops.convert_to_tensor(x, name='x')
square_sum = math_ops.reduce_sum(math_ops.square(x), axis, keepdims=True)
x_inv_norm = math_ops.rsqrt(square_sum)
x_inv_norm = math_ops.minimum((1. - epsilon) * x_inv_norm, 1.)
return math_ops.multiply(x, x_inv_norm, name=name)
示例5: _apply_dense
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import rsqrt [as 别名]
def _apply_dense(self, grad, var):
# Calculates the preconditioner statistics for each tensor.
partitioned_grads = TensorPartitioner.partition_tensor(
grad, self._partition_info)
shape = var.get_shape()
fallback_to_diagonal = self._fallback_to_diagonal_for_shape(shape)
precond_statistics_update = []
if not fallback_to_diagonal:
precond_statistics_update = self._updated_statistics(
var, partitioned_grads)
accumulator = self.get_slot(var, "accumulator")
accumulator_updated = state_ops.assign_add(accumulator, grad * grad)
accumulator_inv_sqrt = math_ops.rsqrt(accumulator_updated + 1e-30)
if self._momentum > 0.0:
scaled_g = (1.0 - self._momentum_tensor) * (grad * accumulator_inv_sqrt)
gbar = self.get_slot(var, "momentum")
gbar_updated = state_ops.assign_add(
gbar,
gbar * (self._momentum_tensor - 1.0) + scaled_g)
else:
gbar_updated = (grad * accumulator_inv_sqrt)
if not fallback_to_diagonal:
# Update the preconditioner statistics followed by computing the
# preconditioned gradient.
with ops.control_dependencies(precond_statistics_update):
s = tf.cast(self._run_nondiagonal_update, tf.float32)
preconditioned_grad = self._preconditioned_update(
var, partitioned_grads, gbar_updated)
# slowly adapt from diagonal to preconditioned gradient.
w = self._run_nondiagonal_update_warmup
warmup_update = s * self._learning_rate_tensor * (
w * preconditioned_grad + (1.0 - w) * gbar_updated)
fallback_update = (1 - s) * (self._learning_rate_tensor * gbar_updated)
return state_ops.assign_sub(var, warmup_update + fallback_update)
else:
return state_ops.assign_sub(var,
self._learning_rate_tensor * gbar_updated)
示例6: clip_by_average_norm
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import rsqrt [as 别名]
def clip_by_average_norm(t, clip_norm, name=None):
"""Clips tensor values to a maximum average L2-norm.
Given a tensor `t`, and a maximum clip value `clip_norm`, this operation
normalizes `t` so that its average L2-norm is less than or equal to
`clip_norm`. Specifically, if the average L2-norm is already less than or
equal to `clip_norm`, then `t` is not modified. If the average L2-norm is
greater than `clip_norm`, then this operation returns a tensor of the same
type and shape as `t` with its values set to:
`t * clip_norm / l2norm_avg(t)`
In this case, the average L2-norm of the output tensor is `clip_norm`.
This operation is typically used to clip gradients before applying them with
an optimizer.
Args:
t: A `Tensor`.
clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value.
name: A name for the operation (optional).
Returns:
A clipped `Tensor`.
"""
with ops.name_scope(name, "clip_by_average_norm", [t, clip_norm]) as name:
t = ops.convert_to_tensor(t, name="t")
# Calculate L2-norm per element, clip elements by ratio of clip_norm to
# L2-norm per element
n_element = math_ops.cast(array_ops.size(t), dtypes.float32)
l2norm_inv = math_ops.rsqrt(
math_ops.reduce_sum(t * t, math_ops.range(array_ops.rank(t))))
tclip = array_ops.identity(
t * clip_norm * math_ops.minimum(
l2norm_inv * n_element, constant_op.constant(1.0) / clip_norm),
name=name)
return tclip
示例7: per_image_standardization
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import rsqrt [as 别名]
def per_image_standardization(image):
"""Linearly scales `image` to have zero mean and unit norm.
This op computes `(x - mean) / adjusted_stddev`, where `mean` is the average
of all values in image, and
`adjusted_stddev = max(stddev, 1.0/sqrt(image.NumElements()))`.
`stddev` is the standard deviation of all values in `image`. It is capped
away from zero to protect against division by 0 when handling uniform images.
Args:
image: 3-D tensor of shape `[height, width, channels]`.
Returns:
The standardized image with same shape as `image`.
Raises:
ValueError: if the shape of 'image' is incompatible with this function.
"""
image = ops.convert_to_tensor(image, name='image')
image = control_flow_ops.with_dependencies(
_Check3DImage(image, require_static=False), image)
num_pixels = math_ops.reduce_prod(array_ops.shape(image))
image = math_ops.cast(image, dtype=dtypes.float32)
image_mean = math_ops.reduce_mean(image)
variance = (math_ops.reduce_mean(math_ops.square(image)) -
math_ops.square(image_mean))
variance = gen_nn_ops.relu(variance)
stddev = math_ops.sqrt(variance)
# Apply a minimum normalization that protects us against uniform images.
min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, dtypes.float32))
pixel_value_scale = math_ops.maximum(stddev, min_stddev)
pixel_value_offset = image_mean
image = math_ops.subtract(image, pixel_value_offset)
image = math_ops.div(image, pixel_value_scale)
return image
示例8: _variance_scale_term
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import rsqrt [as 别名]
def _variance_scale_term(self):
"""Helper to `_covariance` and `_variance` which computes a shared scale."""
return math_ops.rsqrt(1. + self.total_concentration[..., array_ops.newaxis])
示例9: per_image_standardization
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import rsqrt [as 别名]
def per_image_standardization(image):
"""Linearly scales `image` to have zero mean and unit norm.
This op computes `(x - mean) / adjusted_stddev`, where `mean` is the average
of all values in image, and
`adjusted_stddev = max(stddev, 1.0/sqrt(image.NumElements()))`.
`stddev` is the standard deviation of all values in `image`. It is capped
away from zero to protect against division by 0 when handling uniform images.
Args:
image: 3-D tensor of shape `[height, width, channels]`.
Returns:
The standardized image with same shape as `image`.
Raises:
ValueError: if the shape of 'image' is incompatible with this function.
"""
image = ops.convert_to_tensor(image, name='image')
_Check3DImage(image, require_static=False)
num_pixels = math_ops.reduce_prod(array_ops.shape(image))
image = math_ops.cast(image, dtype=dtypes.float32)
image_mean = math_ops.reduce_mean(image)
variance = (math_ops.reduce_mean(math_ops.square(image)) -
math_ops.square(image_mean))
variance = gen_nn_ops.relu(variance)
stddev = math_ops.sqrt(variance)
# Apply a minimum normalization that protects us against uniform images.
min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, dtypes.float32))
pixel_value_scale = math_ops.maximum(stddev, min_stddev)
pixel_value_offset = image_mean
image = math_ops.subtract(image, pixel_value_offset)
image = math_ops.div(image, pixel_value_scale)
return image
示例10: l2_normalize
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import rsqrt [as 别名]
def l2_normalize(x, dim, epsilon=1e-12, name=None):
"""Normalizes along dimension `dim` using an L2 norm.
For a 1-D tensor with `dim = 0`, computes
output = x / sqrt(max(sum(x**2), epsilon))
For `x` with more dimensions, independently normalizes each 1-D slice along
dimension `dim`.
Args:
x: A `Tensor`.
dim: Dimension along which to normalize. A scalar or a vector of
integers.
epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the
divisor if `norm < sqrt(epsilon)`.
name: A name for this operation (optional).
Returns:
A `Tensor` with the same shape as `x`.
"""
with ops.name_scope(name, "l2_normalize", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
square_sum = math_ops.reduce_sum(math_ops.square(x), dim, keep_dims=True)
x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon))
return math_ops.multiply(x, x_inv_norm, name=name)
示例11: l2_normalize
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import rsqrt [as 别名]
def l2_normalize(x, dim, epsilon=1e-12, name=None):
"""Normalizes along dimension `dim` using an L2 norm.
For a 1-D tensor with `dim = 0`, computes
output = x / sqrt(max(sum(x**2), epsilon))
For `x` with more dimensions, independently normalizes each 1-D slice along
dimension `dim`.
Args:
x: A `Tensor`.
dim: Dimension along which to normalize. A scalar or a vector of
integers.
epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the
divisor if `norm < sqrt(epsilon)`.
name: A name for this operation (optional).
Returns:
A `Tensor` with the same shape as `x`.
"""
with ops.name_scope(name, "l2_normalize", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
square_sum = math_ops.reduce_sum(math_ops.square(x), dim, keep_dims=True)
x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon))
return math_ops.mul(x, x_inv_norm, name=name)
示例12: call
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import rsqrt [as 别名]
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
ndim = self._input_rank
if self.rectify:
inputs = nn.relu(inputs)
# Compute normalization pool.
if ndim == 2:
norm_pool = math_ops.matmul(math_ops.square(inputs), self.gamma)
norm_pool = nn.bias_add(norm_pool, self.beta)
elif self.data_format == "channels_last" and ndim <= 5:
shape = self.gamma.shape.as_list()
gamma = array_ops.reshape(self.gamma, (ndim - 2) * [1] + shape)
norm_pool = nn.convolution(math_ops.square(inputs), gamma, "VALID")
norm_pool = nn.bias_add(norm_pool, self.beta)
else: # generic implementation
# This puts channels in the last dimension regardless of input.
norm_pool = math_ops.tensordot(
math_ops.square(inputs), self.gamma, [[self._channel_axis()], [0]])
norm_pool += self.beta
if self.data_format == "channels_first":
# Return to channels_first format if necessary.
axes = list(range(ndim - 1))
axes.insert(1, ndim - 1)
norm_pool = array_ops.transpose(norm_pool, axes)
if self.inverse:
norm_pool = math_ops.sqrt(norm_pool)
else:
norm_pool = math_ops.rsqrt(norm_pool)
outputs = inputs * norm_pool
if not context.executing_eagerly():
outputs.set_shape(self.compute_output_shape(inputs.shape))
return outputs
示例13: _test_rsqrt
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import rsqrt [as 别名]
def _test_rsqrt(data):
""" One iteration of rsqrt """
return _test_unary_elemwise(math_ops.rsqrt, data)
#######################################################################
# Neg
# ---
示例14: call
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import rsqrt [as 别名]
def call(self, inputs, training=None):
if training is None:
training = K.learning_phase()
conv_out = super(_DepthwiseConvBatchNorm2D, self).call(inputs)
self.batchnorm.call(conv_out)
folded_conv_kernel_multiplier = self.batchnorm.gamma * math_ops.rsqrt(
self.batchnorm.moving_variance + self.batchnorm.epsilon)
folded_conv_bias = math_ops.subtract(
self.batchnorm.beta,
self.batchnorm.moving_mean * folded_conv_kernel_multiplier,
name='folded_conv_bias')
depthwise_weights_shape = [
self.depthwise_kernel.get_shape().as_list()[2],
self.depthwise_kernel.get_shape().as_list()[3]
]
folded_conv_kernel_multiplier = array_ops.reshape(
folded_conv_kernel_multiplier, depthwise_weights_shape)
folded_conv_kernel = math_ops.mul(
folded_conv_kernel_multiplier,
self.depthwise_kernel,
name='folded_conv_kernel')
if self.is_quantized:
folded_conv_kernel = self._apply_weight_quantizer(training,
folded_conv_kernel)
# TODO(alanchiao): this is an internal API.
# See if Keras would make this public, like
# backend.conv2d is.
#
# From DepthwiseConv2D layer call() function.
folded_conv_out = K.depthwise_conv2d(
inputs,
folded_conv_kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format,
)
outputs = K.bias_add(
folded_conv_out, folded_conv_bias, data_format=self.data_format)
if self.post_activation is not None:
outputs = self.post_activation(outputs)
if self.is_quantized:
outputs = self._apply_activation_quantizer(training, outputs)
return outputs
示例15: clip_by_norm
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import rsqrt [as 别名]
def clip_by_norm(t, clip_norm, axes=None, name=None):
"""Clips tensor values to a maximum L2-norm.
Given a tensor `t`, and a maximum clip value `clip_norm`, this operation
normalizes `t` so that its L2-norm is less than or equal to `clip_norm`,
along the dimensions given in `axes`. Specifically, in the default case
where all dimensions are used for calculation, if the L2-norm of `t` is
already less than or equal to `clip_norm`, then `t` is not modified. If
the L2-norm is greater than `clip_norm`, then this operation returns a
tensor of the same type and shape as `t` with its values set to:
`t * clip_norm / l2norm(t)`
In this case, the L2-norm of the output tensor is `clip_norm`.
As another example, if `t` is a matrix and `axes == [1]`, then each row
of the output will have L2-norm equal to `clip_norm`. If `axes == [0]`
instead, each column of the output will be clipped.
This operation is typically used to clip gradients before applying them with
an optimizer.
Args:
t: A `Tensor`.
clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value.
axes: A 1-D (vector) `Tensor` of type int32 containing the dimensions
to use for computing the L2-norm. If `None` (the default), uses all
dimensions.
name: A name for the operation (optional).
Returns:
A clipped `Tensor`.
"""
with ops.name_scope(name, "clip_by_norm", [t, clip_norm]) as name:
t = ops.convert_to_tensor(t, name="t")
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
l2norm_inv = math_ops.rsqrt(
math_ops.reduce_sum(t * t, axes, keep_dims=True))
intermediate = t * clip_norm
# Assert that the shape is compatible with the initial shape,
# to prevent unintentional broadcasting.
_ = t.shape.merge_with(intermediate.shape)
tclip = array_ops.identity(intermediate * math_ops.minimum(
l2norm_inv, constant_op.constant(1.0, dtype=t.dtype) / clip_norm),
name=name)
return tclip