本文整理汇总了Python中tensorflow.compat.v1.rsqrt方法的典型用法代码示例。如果您正苦于以下问题:Python v1.rsqrt方法的具体用法?Python v1.rsqrt怎么用?Python v1.rsqrt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.rsqrt方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: diet_expert
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import rsqrt [as 别名]
def diet_expert(x, hidden_size, params):
"""A two-layer feed-forward network with relu activation on hidden layer.
Uses diet variables.
Recomputes hidden layer on backprop to save activation memory.
Args:
x: a Tensor with shape [batch, io_size]
hidden_size: an integer
params: a diet variable HParams object.
Returns:
a Tensor with shape [batch, io_size]
"""
@fn_with_diet_vars(params)
def diet_expert_internal(x):
dim = x.get_shape().as_list()[-1]
h = tf.layers.dense(x, hidden_size, activation=tf.nn.relu, use_bias=False)
y = tf.layers.dense(h, dim, use_bias=False)
y *= tf.rsqrt(tf.to_float(dim * hidden_size))
return y
return diet_expert_internal(x)
示例2: group_norm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import rsqrt [as 别名]
def group_norm(x, filters=None, num_groups=8, epsilon=1e-5):
"""Group normalization as in https://arxiv.org/abs/1803.08494."""
x_shape = shape_list(x)
if filters is None:
filters = x_shape[-1]
assert len(x_shape) == 4
assert filters % num_groups == 0
# Prepare variables.
scale = tf.get_variable(
"group_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"group_norm_bias", [filters], initializer=tf.zeros_initializer())
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
# Reshape and compute group norm.
x = tf.reshape(x, x_shape[:-1] + [num_groups, filters // num_groups])
# Calculate mean and variance on heights, width, channels (not groups).
mean, variance = tf.nn.moments(x, [1, 2, 4], keep_dims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return tf.reshape(norm_x, x_shape) * scale + bias
示例3: apply_batch_norm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import rsqrt [as 别名]
def apply_batch_norm(self, wrapper, mean, variance, scale, bias, epsilon):
# Element-wise multiplier.
multiplier = tf.rsqrt(variance + epsilon)
if scale is not None:
multiplier *= scale
w = multiplier
# Element-wise bias.
b = -multiplier * mean
if bias is not None:
b += bias
b = tf.squeeze(b, axis=0)
# Because the scale might be negative, we need to apply a strategy similar
# to linear.
c = (self.lower + self.upper) / 2.
r = (self.upper - self.lower) / 2.
c = tf.multiply(c, w) + b
r = tf.multiply(r, tf.abs(w))
return IntervalBounds(c - r, c + r)
示例4: apply_norm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import rsqrt [as 别名]
def apply_norm(x, epsilon=1e-6):
"""Applies layer normalization to x.
Based on "Layer Normalization":
https://arxiv.org/abs/1607.06450
Args:
x: <float>[..., input_size]
epsilon: Used to avoid division by 0.
Returns:
<float>[..., input_size]
"""
input_size = x.get_shape()[-1]
with tf.variable_scope("layer_norm", values=[x]):
scale = tf.get_variable(
"layer_norm_scale", [input_size], initializer=tf.ones_initializer())
bias = tf.get_variable(
"layer_norm_bias", [input_size], initializer=tf.zeros_initializer())
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
result = norm_x * scale + bias
return result
示例5: _learning_rate_default
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import rsqrt [as 别名]
def _learning_rate_default(self, multiply_by_parameter_scale):
learning_rate = tf.minimum(tf.rsqrt(step_num() + 1.0), 0.01)
if not multiply_by_parameter_scale:
learning_rate *= 0.05
return learning_rate
示例6: standardize_images
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import rsqrt [as 别名]
def standardize_images(x):
"""Image standardization on batches and videos."""
with tf.name_scope("standardize_images", values=[x]):
x_shape = shape_list(x)
x = to_float(tf.reshape(x, [-1] + x_shape[-3:]))
x_mean = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
x_variance = tf.reduce_mean(
tf.squared_difference(x, x_mean), axis=[1, 2], keepdims=True)
num_pixels = to_float(x_shape[-2] * x_shape[-3])
x = (x - x_mean) / tf.maximum(tf.sqrt(x_variance), tf.rsqrt(num_pixels))
return tf.reshape(x, x_shape)
示例7: l2_norm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import rsqrt [as 别名]
def l2_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):
"""Layer normalization with l2 norm."""
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(name, default_name="l2_norm", values=[x], reuse=reuse):
scale = tf.get_variable(
"l2_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"l2_norm_bias", [filters], initializer=tf.zeros_initializer())
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
l2norm = tf.reduce_sum(
tf.squared_difference(x, mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(l2norm + epsilon)
return norm_x * scale + bias
示例8: ae_latent_softmax
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import rsqrt [as 别名]
def ae_latent_softmax(latents_pred, latents_discrete_hot, vocab_size, hparams):
"""Latent prediction and loss.
Args:
latents_pred: Tensor of shape [..., depth].
latents_discrete_hot: Tensor of shape [..., vocab_size].
vocab_size: an int representing the vocab size.
hparams: HParams.
Returns:
sample: Tensor of shape [...], a sample from a multinomial distribution.
loss: Tensor of shape [...], the softmax cross-entropy.
"""
with tf.variable_scope("latent_logits"):
latents_logits = tf.layers.dense(latents_pred, vocab_size,
name="logits_dense")
if hparams.logit_normalization:
latents_logits *= tf.rsqrt(1e-8 +
tf.reduce_mean(tf.square(latents_logits)))
loss = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=latents_discrete_hot, logits=latents_logits)
# TODO(trandustin): tease this out from ae_latent_softmax.
# we use just the loss portion to anchor prior / encoder on text.
sample = multinomial_sample(latents_logits,
vocab_size,
hparams.sampling_method,
hparams.sampling_temp)
return sample, loss
示例9: preprocess_example
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import rsqrt [as 别名]
def preprocess_example(self, example, mode, hparams):
p = hparams
if p.audio_preproc_in_bottom:
example["inputs"] = tf.expand_dims(
tf.expand_dims(example["waveforms"], -1), -1)
else:
waveforms = tf.expand_dims(example["waveforms"], 0)
mel_fbanks = common_audio.compute_mel_filterbank_features(
waveforms,
sample_rate=p.audio_sample_rate,
dither=p.audio_dither,
preemphasis=p.audio_preemphasis,
frame_length=p.audio_frame_length,
frame_step=p.audio_frame_step,
lower_edge_hertz=p.audio_lower_edge_hertz,
upper_edge_hertz=p.audio_upper_edge_hertz,
num_mel_bins=p.audio_num_mel_bins,
apply_mask=False)
if p.audio_add_delta_deltas:
mel_fbanks = common_audio.add_delta_deltas(mel_fbanks)
fbank_size = common_layers.shape_list(mel_fbanks)
assert fbank_size[0] == 1
# This replaces CMVN estimation on data
var_epsilon = 1e-09
mean = tf.reduce_mean(mel_fbanks, keepdims=True, axis=1)
variance = tf.reduce_mean(tf.squared_difference(mel_fbanks, mean),
keepdims=True, axis=1)
mel_fbanks = (mel_fbanks - mean) * tf.rsqrt(variance + var_epsilon)
# Later models like to flatten the two spatial dims. Instead, we add a
# unit spatial dim and flatten the frequencies and channels.
example["inputs"] = tf.concat([
tf.reshape(mel_fbanks, [fbank_size[1], fbank_size[2], fbank_size[3]]),
tf.zeros((p.num_zeropad_frames, fbank_size[2], fbank_size[3]))], 0)
if not p.audio_keep_example_waveforms:
del example["waveforms"]
return super(SpeechRecognitionProblem, self
).preprocess_example(example, mode, hparams)
示例10: layer_norm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import rsqrt [as 别名]
def layer_norm(input_tensor, name=None, epsilon=1e-5):
"""Run layer normalization on the last dimension of the tensor."""
name2use = f'LayerNorm_{name}' if name is not None else name
with tf.variable_scope(name2use, default_name='LayerNorm'):
dim = input_tensor.shape[-1].value
gamma = tf.get_variable('gamma', [dim], initializer=tf.constant_initializer(1))
beta = tf.get_variable('beta', [dim], initializer=tf.constant_initializer(0))
mean = tf.reduce_mean(input_tensor, axis=-1, keepdims=True)
std = tf.reduce_mean(tf.square(input_tensor - mean), axis=-1, keepdims=True)
input_tensor = (input_tensor - mean) * tf.rsqrt(std + epsilon)
input_tensor = input_tensor * gamma + beta
return input_tensor
示例11: layer_norm_all
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import rsqrt [as 别名]
def layer_norm_all(h,
batch_size,
base,
num_units,
scope='layer_norm',
reuse=False,
gamma_start=1.0,
epsilon=1e-3,
use_bias=True):
"""Layer Norm (faster version, but not using defun)."""
# Performs layer norm on multiple base at once (ie, i, g, j, o for lstm)
# Reshapes h in to perform layer norm in parallel
h_reshape = tf.reshape(h, [batch_size, base, num_units])
mean = tf.reduce_mean(h_reshape, [2], keep_dims=True)
var = tf.reduce_mean(tf.square(h_reshape - mean), [2], keep_dims=True)
epsilon = tf.constant(epsilon)
rstd = tf.rsqrt(var + epsilon)
h_reshape = (h_reshape - mean) * rstd
# reshape back to original
h = tf.reshape(h_reshape, [batch_size, base * num_units])
with tf.variable_scope(scope):
if reuse:
tf.get_variable_scope().reuse_variables()
gamma = tf.get_variable(
'ln_gamma', [4 * num_units],
initializer=tf.constant_initializer(gamma_start))
if use_bias:
beta = tf.get_variable(
'ln_beta', [4 * num_units], initializer=tf.constant_initializer(0.0))
if use_bias:
return gamma * h + beta
return gamma * h
示例12: layer_norm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import rsqrt [as 别名]
def layer_norm(x,
num_units,
scope='layer_norm',
reuse=False,
gamma_start=1.0,
epsilon=1e-3,
use_bias=True):
"""Calculate layer norm."""
axes = [1]
mean = tf.reduce_mean(x, axes, keep_dims=True)
x_shifted = x - mean
var = tf.reduce_mean(tf.square(x_shifted), axes, keep_dims=True)
inv_std = tf.rsqrt(var + epsilon)
with tf.variable_scope(scope):
if reuse:
tf.get_variable_scope().reuse_variables()
gamma = tf.get_variable(
'ln_gamma', [num_units],
initializer=tf.constant_initializer(gamma_start))
if use_bias:
beta = tf.get_variable(
'ln_beta', [num_units], initializer=tf.constant_initializer(0.0))
output = gamma * (x_shifted) * inv_std
if use_bias:
output += beta
return output
示例13: pixel_norm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import rsqrt [as 别名]
def pixel_norm(images, epsilon=1.0e-8):
"""Pixel normalization.
For each pixel a[i,j,k] of image in HWC format, normalize its value to
b[i,j,k] = a[i,j,k] / SQRT(SUM_k(a[i,j,k]^2) / C + eps).
Args:
images: A 4D `Tensor` of NHWC format.
epsilon: A small positive number to avoid division by zero.
Returns:
A 4D `Tensor` with pixel-wise normalized channels.
"""
return images * tf.rsqrt(
tf.reduce_mean(tf.square(images), axis=3, keepdims=True) + epsilon)
示例14: norm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import rsqrt [as 别名]
def norm(x, scope, *, axis=-1, epsilon=1e-5):
"""Normalize to mean = 0, std = 1, then do a diagonal affine transform."""
with tf.variable_scope(scope):
n_state = x.shape[-1]
g = tf.get_variable(
'g', [n_state], initializer=tf.constant_initializer(1))
b = tf.get_variable(
'b', [n_state], initializer=tf.constant_initializer(0))
u = tf.reduce_mean(x, axis=axis, keepdims=True)
s = tf.reduce_mean(tf.square(x-u), axis=axis, keepdims=True)
x = (x - u) * tf.rsqrt(s + epsilon)
x = x*g + b
return x
示例15: test_forward_rsqrt
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import rsqrt [as 别名]
def test_forward_rsqrt():
"""test Rsqrt """
np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.rsqrt(in_data, name="rsqrt")
compare_tf_with_tvm([np_data], ['in_data:0'], 'rsqrt:0')