本文整理汇总了Python中tensorflow.compat.v1.Tensor方法的典型用法代码示例。如果您正苦于以下问题:Python v1.Tensor方法的具体用法?Python v1.Tensor怎么用?Python v1.Tensor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.Tensor方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: compute_logits
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Tensor [as 别名]
def compute_logits(self, token_ids: tf.Tensor) -> tf.Tensor:
"""
Implements a language model, where each output is conditional on the current
input and inputs processed so far.
Args:
token_ids: int32 tensor of shape [B, T], storing integer IDs of tokens.
Returns:
tf.float32 tensor of shape [B, T, V], storing the distribution over output symbols
for each timestep for each batch element.
"""
# TODO 5# 1) Embed tokens
# TODO 5# 2) Run RNN on embedded tokens
# TODO 5# 3) Project RNN outputs onto the vocabulary to obtain logits.
return rnn_output_logits
示例2: _normal_distribution_cdf
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Tensor [as 别名]
def _normal_distribution_cdf(x, stddev):
"""Evaluates the CDF of the normal distribution.
Normal distribution with mean 0 and standard deviation stddev,
evaluated at x=x.
input and output `Tensor`s have matching shapes.
Args:
x: a `Tensor`
stddev: a `Tensor` with the same shape as `x`.
Returns:
a `Tensor` with the same shape as `x`.
"""
return 0.5 * (1.0 + tf.erf(x / (math.sqrt(2) * stddev + 1e-20)))
示例3: cv_squared
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Tensor [as 别名]
def cv_squared(x):
"""The squared coefficient of variation of a sample.
Useful as a loss to encourage a positive distribution to be more uniform.
Epsilons added for numerical stability.
Returns 0 for an empty Tensor.
Args:
x: a `Tensor`.
Returns:
a `Scalar`.
"""
epsilon = 1e-10
float_size = tf.to_float(tf.size(x)) + epsilon
mean = tf.reduce_sum(x) / float_size
variance = tf.reduce_sum(tf.squared_difference(x, mean)) / float_size
return variance / (tf.square(mean) + epsilon)
示例4: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Tensor [as 别名]
def __init__(self, pad_mask):
"""Compute and store the location of the padding.
Args:
pad_mask (tf.Tensor): Reference padding tensor of shape
[batch_size,length] or [dim_origin] (dim_origin=batch_size*length)
containing non-zeros positive values to indicate padding location.
"""
self.nonpad_ids = None
self.dim_origin = None
with tf.name_scope("pad_reduce/get_ids"):
pad_mask = tf.reshape(pad_mask, [-1]) # Flatten the batch
# nonpad_ids contains coordinates of zeros rows (as pad_mask is
# float32, checking zero equality is done with |x| < epsilon, with
# epsilon=1e-9 as standard, here pad_mask only contains positive values
# so tf.abs would be redundant)
self.nonpad_ids = tf.to_int32(tf.where(pad_mask < 1e-9))
self.dim_origin = tf.shape(pad_mask)[:1]
示例5: remove
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Tensor [as 别名]
def remove(self, x):
"""Remove padding from the given tensor.
Args:
x (tf.Tensor): of shape [dim_origin,...]
Returns:
a tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin
"""
with tf.name_scope("pad_reduce/remove"):
x_shape = x.get_shape().as_list()
x = tf.gather_nd(
x,
indices=self.nonpad_ids,
)
if not tf.executing_eagerly():
# This is a hack but for some reason, gather_nd return a tensor of
# undefined shape, so the shape is set up manually
x.set_shape([None] + x_shape[1:])
return x
示例6: restore
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Tensor [as 别名]
def restore(self, x):
"""Add padding back to the given tensor.
Args:
x (tf.Tensor): of shape [dim_compressed,...]
Returns:
a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The
dim is restored from the original reference tensor
"""
with tf.name_scope("pad_reduce/restore"):
x = tf.scatter_nd(
indices=self.nonpad_ids,
updates=x,
shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0),
)
return x
示例7: combine
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Tensor [as 别名]
def combine(self, expert_out, multiply_by_gates=True):
"""Sum together the expert output, weighted by the gates.
The slice corresponding to a particular batch element `b` is computed
as the sum over all experts `i` of the expert output, weighted by the
corresponding gate values. If `multiply_by_gates` is set to False, the
gate values are ignored.
Args:
expert_out: a list of `num_experts` `Tensor`s, each with shape
`[expert_batch_size_i, <extra_output_dims>]`.
multiply_by_gates: a boolean
Returns:
a `Tensor` with shape `[batch_size, <extra_output_dims>]`.
"""
# see comments on convert_gradient_to_tensor
stitched = common_layers.convert_gradient_to_tensor(
tf.concat(expert_out, 0))
if multiply_by_gates:
stitched *= tf.expand_dims(self._nonzero_gates, 1)
combined = tf.unsorted_segment_sum(stitched, self._batch_index,
tf.shape(self._gates)[0])
return combined
示例8: dispatch
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Tensor [as 别名]
def dispatch(self, inp):
"""Create one input Tensor for each expert.
Args:
inp: a list of length num_datashards `Tensor`s with shapes
`[batch_size[d], <extra_input_dims>]`.
Returns:
a list of `num_experts` `Tensor`s with shapes
`[num_examples[i], <extra_input_dims>]`.
"""
dispatched = self._dp(lambda a, b: a.dispatch(b), self._dispatchers, inp)
ret = self._ep(tf.concat, transpose_list_of_lists(dispatched), 0)
if ret[0].dtype == tf.float32:
# see comments on common_layers.convert_gradient_to_tensor
ret = self._ep(common_layers.convert_gradient_to_tensor, ret)
return ret
示例9: body
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Tensor [as 别名]
def body(self, features):
"""Computes the targets' pre-logit activations given transformed inputs.
Most `T2TModel` subclasses will override this method.
Args:
features: dict of str to Tensor, where each Tensor has shape [batch_size,
..., hidden_size]. It typically contains keys `inputs` and `targets`.
Returns:
output: Tensor of pre-logit activations with shape [batch_size, ...,
hidden_size].
losses: Either single loss as a scalar, a list, a Tensor (to be averaged),
or a dictionary of losses. If losses is a dictionary with the key
"training", losses["training"] is considered the final training
loss and output is considered logits; self.top and self.loss will
be skipped.
"""
raise NotImplementedError("Abstract Method")
示例10: eval_autoregressive
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Tensor [as 别名]
def eval_autoregressive(self, features=None, decode_length=50):
"""Autoregressive eval.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
logits: `Tensor`
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
Contains a single key "training".
"""
results = self._slow_greedy_infer(features, decode_length=decode_length)
return results["logits"], results["losses"]
示例11: _beam_decode
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Tensor [as 别名]
def _beam_decode(self,
features,
decode_length,
beam_size,
top_beams,
alpha,
use_tpu=False):
"""Beam search decoding.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do beam decode on TPU.
Returns:
samples: an integer `Tensor`. Top samples from the beam search
"""
return self._beam_decode_slow(features, decode_length, beam_size, top_beams,
alpha, use_tpu)
示例12: average_sharded_losses
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Tensor [as 别名]
def average_sharded_losses(sharded_losses):
"""Average losses across datashards.
Args:
sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss
can be a single Tensor or a 2-tuple (numerator and denominator).
Returns:
losses: dict<str loss_name, Tensor avg_loss>
"""
losses = {}
for loss_name in sorted(sharded_losses[0]):
all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses]
if isinstance(all_shards[0], tuple):
sharded_num, sharded_den = zip(*all_shards)
mean_loss = (
tf.add_n(sharded_num) / tf.maximum(
tf.cast(1.0, sharded_den[0].dtype), tf.add_n(sharded_den)))
else:
mean_loss = tf.reduce_mean(all_shards)
losses[loss_name] = mean_loss
return losses
示例13: summarize_features
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Tensor [as 别名]
def summarize_features(features, num_shards=1):
"""Generate summaries for features."""
if not common_layers.should_generate_summaries():
return
with tf.name_scope("input_stats"):
for (k, v) in sorted(six.iteritems(features)):
if (isinstance(v, tf.Tensor) and (v.get_shape().ndims > 1) and
(v.dtype != tf.string)):
tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards)
tf.summary.scalar("%s_length" % k, tf.shape(v)[1])
nonpadding = tf.to_float(tf.not_equal(v, 0))
nonpadding_tokens = tf.reduce_sum(nonpadding)
tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens)
tf.summary.scalar("%s_nonpadding_fraction" % k,
tf.reduce_mean(nonpadding))
示例14: ror
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Tensor [as 别名]
def ror(x, n, p=1):
"""Bitwise right rotation.
Args:
x: Input tensor
n: Bit count to represent x
p: Bit positions to shift
Returns:
tf.Tensor: x shifted by p positions in n bits
"""
a = tf.bitwise.right_shift(x, p)
b = tf.bitwise.left_shift(1, p) - 1
c = tf.bitwise.bitwise_and(x, b)
d = tf.bitwise.left_shift(c, n - p)
return a + d
示例15: rol
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Tensor [as 别名]
def rol(x, n, p=1):
"""Bitwise left rotation.
Args:
x: Input tensor
n: Bit count to represent x
p: Bit positions to shift
Returns:
tf.Tensor: x shifted by p positions in n bits
"""
a = tf.bitwise.left_shift(x, p)
b = tf.bitwise.left_shift(1, n) - 1
c = tf.bitwise.bitwise_and(a, b)
d = tf.bitwise.right_shift(x, n - p)
return tf.bitwise.bitwise_or(c, d)