本文整理汇总了Python中tensorflow.compat.v1.abs方法的典型用法代码示例。如果您正苦于以下问题:Python v1.abs方法的具体用法?Python v1.abs怎么用?Python v1.abs使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.abs方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import abs [as 别名]
def __init__(self, pad_mask):
"""Compute and store the location of the padding.
Args:
pad_mask (tf.Tensor): Reference padding tensor of shape
[batch_size,length] or [dim_origin] (dim_origin=batch_size*length)
containing non-zeros positive values to indicate padding location.
"""
self.nonpad_ids = None
self.dim_origin = None
with tf.name_scope("pad_reduce/get_ids"):
pad_mask = tf.reshape(pad_mask, [-1]) # Flatten the batch
# nonpad_ids contains coordinates of zeros rows (as pad_mask is
# float32, checking zero equality is done with |x| < epsilon, with
# epsilon=1e-9 as standard, here pad_mask only contains positive values
# so tf.abs would be redundant)
self.nonpad_ids = tf.to_int32(tf.where(pad_mask < 1e-9))
self.dim_origin = tf.shape(pad_mask)[:1]
示例2: _quantize
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import abs [as 别名]
def _quantize(x, params, randomize=True):
"""Quantize x according to params, optionally randomizing the rounding."""
if not params.quantize:
return x
if not randomize:
return tf.bitcast(
tf.cast(x / params.quantization_scale, tf.int16), tf.float16)
abs_x = tf.abs(x)
sign_x = tf.sign(x)
y = abs_x / params.quantization_scale
y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x)))
y = tf.minimum(y, tf.int16.max) * sign_x
q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
return q
示例3: neural_gpu_body
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import abs [as 别名]
def neural_gpu_body(inputs, hparams, name=None):
"""The core Neural GPU."""
with tf.variable_scope(name, "neural_gpu"):
def step(state, inp): # pylint: disable=missing-docstring
x = tf.nn.dropout(state, 1.0 - hparams.dropout)
for layer in range(hparams.num_hidden_layers):
x = common_layers.conv_gru(
x, (hparams.kernel_height, hparams.kernel_width),
hparams.hidden_size,
name="cgru_%d" % layer)
# Padding input is zeroed-out in the modality, we check this by summing.
padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001)
new_state = tf.where(padding_inp, state, x) # No-op where inp is padding.
return new_state
return tf.foldl(
step,
tf.transpose(inputs, [1, 0, 2, 3]),
initializer=inputs,
parallel_iterations=1,
swap_memory=True)
示例4: get_kl_loss
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import abs [as 别名]
def get_kl_loss(self, means, log_vars, means_p=None, log_vars_p=None):
"""Get KL loss for all the predicted Gaussians."""
kl_loss = 0.0
if means_p is None:
means_p = tf.unstack(tf.zeros_like(means))
if log_vars_p is None:
log_vars_p = tf.unstack(tf.zeros_like(log_vars))
enumerated_inputs = enumerate(zip(means, log_vars, means_p, log_vars_p))
if self.is_training and self.hparams.stochastic_model:
for i, (mean, log_var, mean_p, log_var_p) in enumerated_inputs:
kl_loss += common_layers.kl_divergence(mean, log_var, mean_p, log_var_p)
tf.summary.histogram("posterior_mean_%d" % i, mean)
tf.summary.histogram("posterior_log_var_%d" % i, log_var)
tf.summary.histogram("prior_mean_%d" % i, mean_p)
tf.summary.histogram("prior_log_var_%d" % i, log_var_p)
tf.summary.scalar("kl_raw", tf.reduce_mean(kl_loss))
beta = self.get_beta(kl_loss)
# information capacity from "Understanding disentangling in beta-VAE"
if self.hparams.information_capacity > 0.0:
kl_loss = tf.abs(kl_loss - self.hparams.information_capacity)
return beta * kl_loss
示例5: group_norm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import abs [as 别名]
def group_norm(x, filters=None, num_groups=8, epsilon=1e-5):
"""Group normalization as in https://arxiv.org/abs/1803.08494."""
x_shape = shape_list(x)
if filters is None:
filters = x_shape[-1]
assert len(x_shape) == 4
assert filters % num_groups == 0
# Prepare variables.
scale = tf.get_variable(
"group_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"group_norm_bias", [filters], initializer=tf.zeros_initializer())
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
# Reshape and compute group norm.
x = tf.reshape(x, x_shape[:-1] + [num_groups, filters // num_groups])
# Calculate mean and variance on heights, width, channels (not groups).
mean, variance = tf.nn.moments(x, [1, 2, 4], keep_dims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return tf.reshape(norm_x, x_shape) * scale + bias
示例6: gated_linear_unit_layer
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import abs [as 别名]
def gated_linear_unit_layer(x, name=None):
"""Gated linear unit layer.
Paper: Language Modeling with Gated Convolutional Networks.
Link: https://arxiv.org/abs/1612.08083
x = Wx * sigmoid(W'x).
Args:
x: A tensor
name: A string
Returns:
A tensor of the same shape as x.
"""
with tf.variable_scope(name, default_name="glu_layer", values=[x]):
depth = shape_list(x)[-1]
x = layers().Dense(depth * 2, activation=None)(x)
x, gating_x = tf.split(x, 2, axis=-1)
return x * tf.nn.sigmoid(gating_x)
示例7: stfts_to_specgrams
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import abs [as 别名]
def stfts_to_specgrams(self, stfts):
"""Converts stfts to specgrams.
Args:
stfts: Complex64 tensor of stft, shape [batch, time, freq, 1].
Returns:
specgrams: Tensor of log magnitudes and instantaneous frequencies,
shape [batch, time, freq, 2].
"""
stfts = stfts[:, :, :, 0]
logmag = self._safe_log(tf.abs(stfts))
phase_angle = tf.angle(stfts)
if self._ifreq:
p = spectral_ops.instantaneous_frequency(phase_angle)
else:
p = phase_angle / np.pi
return tf.concat(
[logmag[:, :, :, tf.newaxis], p[:, :, :, tf.newaxis]], axis=-1)
示例8: mu_law
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import abs [as 别名]
def mu_law(x, mu=255, int8=False):
"""A TF implementation of Mu-Law encoding.
Args:
x: The audio samples to encode.
mu: The Mu to use in our Mu-Law.
int8: Use int8 encoding.
Returns:
out: The Mu-Law encoded int8 data.
"""
out = tf.sign(x) * tf.log(1 + mu * tf.abs(x)) / np.log(1 + mu)
out = tf.floor(out * 128)
if int8:
out = tf.cast(out, tf.int8)
return out
示例9: apply_batch_norm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import abs [as 别名]
def apply_batch_norm(self, wrapper, mean, variance, scale, bias, epsilon):
# Element-wise multiplier.
multiplier = tf.rsqrt(variance + epsilon)
if scale is not None:
multiplier *= scale
w = multiplier
# Element-wise bias.
b = -multiplier * mean
if bias is not None:
b += bias
b = tf.squeeze(b, axis=0)
# Because the scale might be negative, we need to apply a strategy similar
# to linear.
c = (self.lower + self.upper) / 2.
r = (self.upper - self.lower) / 2.
c = tf.multiply(c, w) + b
r = tf.multiply(r, tf.abs(w))
return IntervalBounds(c - r, c + r)
示例10: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import abs [as 别名]
def __init__(self, c, d=None, prune_irrelevant=True, collapse=True):
"""Builds a linear specification module."""
super(LinearSpecification, self).__init__(name='specs', collapse=collapse)
# c has shape [batch_size, num_specifications, num_outputs]
# d has shape [batch_size, num_specifications]
# Some specifications may be irrelevant (not a function of the output).
# We automatically remove them for clarity. We expect the number of
# irrelevant specs to be equal for all elements of a batch.
# Shape is [batch_size, num_specifications]
if prune_irrelevant:
irrelevant = tf.equal(tf.reduce_sum(
tf.cast(tf.abs(c) > 1e-6, tf.int32), axis=-1, keepdims=True), 0)
batch_size = tf.shape(c)[0]
num_outputs = tf.shape(c)[2]
irrelevant = tf.tile(irrelevant, [1, 1, num_outputs])
self._c = tf.reshape(
tf.boolean_mask(c, tf.logical_not(irrelevant)),
[batch_size, -1, num_outputs])
else:
self._c = c
self._d = d
示例11: get_perf_timing
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import abs [as 别名]
def get_perf_timing(batch_size, step_train_times, ewma_alpha=None, scale=1):
"""Calculate benchmark processing speed."""
times = np.array(step_train_times)
speeds = batch_size / times
if ewma_alpha:
weights = np.logspace(len(times)-1, 0, len(times), base=1-ewma_alpha)
time_mean = np.average(times, weights=weights)
else:
time_mean = np.mean(times)
speed_mean = scale * batch_size / time_mean
speed_uncertainty = np.std(speeds) / np.sqrt(float(len(speeds)))
speed_jitter = 1.4826 * np.median(np.abs(speeds - np.median(speeds)))
return speed_mean, speed_uncertainty, speed_jitter
示例12: gradient_histogram_summary
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import abs [as 别名]
def gradient_histogram_summary(self, avg_grads):
"""Create histogram of log values of all non-zero gradients."""
with tf.name_scope('log_gradients_summary'):
all_grads = []
for grad, _ in avg_grads:
all_grads.append(tf.reshape(grad, [-1]))
grads = tf.abs(tf.concat(all_grads, 0))
# exclude grads with zero values.
indices_for_non_zero_grads = tf.where(tf.not_equal(grads, 0))
log_grads = tf.reshape(
tf.log(tf.gather(grads, indices_for_non_zero_grads)), [-1])
tf.summary.histogram('log_gradients', log_grads)
示例13: abs_error
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import abs [as 别名]
def abs_error(predictions, labels, weights_fn=None):
"""Computes mean(abs(preds-target))."""
del weights_fn # Unused
targets = tf.squeeze(labels, axis=[2, 3])
batch_abs_error = tf.abs(predictions - targets)
den = tf.ones(tf.shape(batch_abs_error), dtype=tf.float32)
return (batch_abs_error, den)
示例14: variance_loss
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import abs [as 别名]
def variance_loss(self, b):
part = tf.random_uniform(common_layers.shape_list(b))
selection = tf.to_float(tf.less(part, tf.random_uniform([])))
selection_size = tf.reduce_sum(selection)
part_avg = tf.abs(tf.reduce_sum(b * selection)) / (selection_size + 1)
return part_avg
示例15: lenpred_stats
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import abs [as 别名]
def lenpred_stats(targets_length_pred, targets_length):
lenpred_diff = tf.abs(targets_length_pred - tf.cast(targets_length, tf.int32))
lenpred_acc = tf.cast(tf.equal(lenpred_diff, 0), tf.float32)
lenpred_acc = tf.reduce_mean(lenpred_acc)
lenpred_acc5 = tf.cast(tf.less_equal(lenpred_diff, 5), tf.float32)
lenpred_acc5 = tf.reduce_mean(lenpred_acc5)
return lenpred_acc, lenpred_acc5