本文整理汇总了Python中tensorflow.compat.v1.zeros_like方法的典型用法代码示例。如果您正苦于以下问题:Python v1.zeros_like方法的具体用法?Python v1.zeros_like怎么用?Python v1.zeros_like使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.zeros_like方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _distributional_to_value
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros_like [as 别名]
def _distributional_to_value(value_d, size, subscale, threshold):
"""Get a scalar value out of a value distribution in distributional RL."""
half = size // 2
value_range = (tf.to_float(tf.range(-half, half)) + 0.5) * subscale
probs = tf.nn.softmax(value_d)
if threshold == 0.0:
return tf.reduce_sum(probs * value_range, axis=-1)
# accumulated_probs[..., i] is the sum of probabilities in buckets upto i
# so it is the probability that value <= i'th bucket value
accumulated_probs = tf.cumsum(probs, axis=-1)
# New probs are 0 on all lower buckets, until the threshold
probs = tf.where(accumulated_probs < threshold, tf.zeros_like(probs), probs)
probs /= tf.reduce_sum(probs, axis=-1, keepdims=True) # Re-normalize.
return tf.reduce_sum(probs * value_range, axis=-1)
示例2: calculate_generalized_advantage_estimator
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros_like [as 别名]
def calculate_generalized_advantage_estimator(
reward, value, done, gae_gamma, gae_lambda):
# pylint: disable=g-doc-args
"""Generalized advantage estimator.
Returns:
GAE estimator. It will be one element shorter than the input; this is
because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N].
"""
# pylint: enable=g-doc-args
next_value = value[1:, :]
next_not_done = 1 - tf.cast(done[1:, :], tf.float32)
delta = (reward[:-1, :] + gae_gamma * next_value * next_not_done
- value[:-1, :])
return_ = tf.reverse(tf.scan(
lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg,
[tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])],
tf.zeros_like(delta[0, :]),
parallel_iterations=1), [0])
return tf.check_numerics(return_, "return")
示例3: padded_accuracy_topk
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros_like [as 别名]
def padded_accuracy_topk(predictions,
labels,
k,
weights_fn=common_layers.weights_nonzero):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[predictions, labels]):
padded_predictions, padded_labels = common_layers.pad_with_zeros(
predictions, labels)
weights = weights_fn(padded_labels)
effective_k = tf.minimum(k,
common_layers.shape_list(padded_predictions)[-1])
_, outputs = tf.nn.top_k(padded_predictions, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(padded_labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
示例4: image_summary
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros_like [as 别名]
def image_summary(predictions, targets, hparams):
"""Reshapes predictions and passes it to tensorboard.
Args:
predictions : The predicted image (logits).
targets : The ground truth.
hparams: model hparams.
Returns:
summary_proto: containing the summary images.
weights: A Tensor of zeros of the same shape as predictions.
"""
del hparams
results = tf.cast(tf.argmax(predictions, axis=-1), tf.uint8)
gold = tf.cast(targets, tf.uint8)
summary1 = tf.summary.image("prediction", results, max_outputs=2)
summary2 = tf.summary.image("data", gold, max_outputs=2)
summary = tf.summary.merge([summary1, summary2])
return summary, tf.zeros_like(predictions)
示例5: _apply_cond
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros_like [as 别名]
def _apply_cond(self, apply_fn, grad, var, *args, **kwargs):
"""Apply conditionally if counter is zero."""
grad_acc = self.get_slot(var, "grad_acc")
def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs):
total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype)
adam_op = apply_fn(total_grad, var, *args, **kwargs)
with tf.control_dependencies([adam_op]):
grad_acc_to_zero_op = grad_acc.assign(
tf.zeros_like(grad_acc), use_locking=self._use_locking)
return tf.group(adam_op, grad_acc_to_zero_op)
def accumulate_gradient(grad_acc, grad):
assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking)
return tf.group(assign_op) # Strip return value
return tf.cond(
tf.equal(self._get_iter_variable(), 0),
lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs),
lambda: accumulate_gradient(grad_acc, grad))
示例6: _apply_cond
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros_like [as 别名]
def _apply_cond(self, apply_fn, grad, var, *args, **kwargs):
"""Apply conditionally if counter is zero."""
grad_acc = self.get_slot(var, "grad_acc")
def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs):
total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype)
adam_op = apply_fn(total_grad, var, *args, **kwargs)
with tf.control_dependencies([adam_op]):
grad_acc_to_zero_op = grad_acc.assign(tf.zeros_like(grad_acc),
use_locking=self._use_locking)
return tf.group(adam_op, grad_acc_to_zero_op)
def accumulate_gradient(grad_acc, grad):
assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking)
return tf.group(assign_op) # Strip return value
return tf.cond(
tf.equal(self._get_iter_variable(), 0),
lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs),
lambda: accumulate_gradient(grad_acc, grad))
示例7: sample
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros_like [as 别名]
def sample(self, features=None, shape=None):
del features
hp = self.hparams
div_x = 2**hp.num_hidden_layers
div_y = 1 if self.is1d else 2**hp.num_hidden_layers
size = [
hp.batch_size, hp.sample_height // div_x, hp.sample_width // div_y,
hp.bottleneck_bits
]
size = size if shape is None else shape
rand = tf.random_uniform(size)
res = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0
# If you want to set some first bits to a fixed value, do this:
# fixed = tf.zeros_like(rand) - 1.0
# nbits = 3
# res = tf.concat([fixed[:, :, :, :nbits], res[:, :, :, nbits:]], axis=-1)
return res
示例8: bottleneck
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros_like [as 别名]
def bottleneck(self, x): # pylint: disable=arguments-differ
hparams = self.hparams
if hparams.unordered:
return super(AutoencoderOrderedDiscrete, self).bottleneck(x)
noise = hparams.bottleneck_noise
hparams.bottleneck_noise = 0.0 # We'll add noise below.
x, loss = discretization.parametrized_bottleneck(x, hparams)
hparams.bottleneck_noise = noise
if hparams.mode == tf.estimator.ModeKeys.TRAIN:
# We want a number p such that p^bottleneck_bits = 1 - noise.
# So log(p) * bottleneck_bits = log(noise)
log_p = tf.log1p(-float(noise) / 2) / float(hparams.bottleneck_bits)
# Probabilities of flipping are p, p^2, p^3, ..., p^bottleneck_bits.
noise_mask = 1.0 - tf.exp(tf.cumsum(tf.zeros_like(x) + log_p, axis=-1))
# Having the no-noise mask, we can make noise just uniformly at random.
ordered_noise = tf.random_uniform(tf.shape(x))
# We want our noise to be 1s at the start and random {-1, 1} bits later.
ordered_noise = tf.to_float(tf.less(noise_mask, ordered_noise))
# Now we flip the bits of x on the noisy positions (ordered and normal).
x *= 2.0 * ordered_noise - 1
return x, loss
示例9: testMultipleGradientsWithVariables
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros_like [as 别名]
def testMultipleGradientsWithVariables(self):
gradient = tf.constant(self._grad_vec, dtype=tf.float32)
variable = variables_lib.Variable(tf.zeros_like(gradient))
grad_to_var = (gradient, variable)
gradient_multipliers = {variable: self._multiplier}
[grad_to_var] = learning.multiply_gradients([grad_to_var],
gradient_multipliers)
# Ensure the variable passed through.
self.assertEqual(grad_to_var[1], variable)
with self.cached_session() as sess:
actual_gradient = sess.run(grad_to_var[0])
np_testing.assert_almost_equal(actual_gradient, self._multiplied_grad_vec,
5)
示例10: unwrap
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros_like [as 别名]
def unwrap(p, discont=np.pi, axis=-1):
"""Unwrap a cyclical phase tensor.
Args:
p: Phase tensor.
discont: Float, size of the cyclic discontinuity.
axis: Axis of which to unwrap.
Returns:
unwrapped: Unwrapped tensor of same size as input.
"""
dd = diff(p, axis=axis)
ddmod = tf.mod(dd + np.pi, 2.0 * np.pi) - np.pi
idx = tf.logical_and(tf.equal(ddmod, -np.pi), tf.greater(dd, 0))
ddmod = tf.where(idx, tf.ones_like(ddmod) * np.pi, ddmod)
ph_correct = ddmod - dd
idx = tf.less(tf.abs(dd), discont)
ddmod = tf.where(idx, tf.zeros_like(ddmod), dd)
ph_cumsum = tf.cumsum(ph_correct, axis=axis)
shape = p.get_shape().as_list()
shape[axis] = 1
ph_cumsum = tf.concat([tf.zeros(shape, dtype=p.dtype), ph_cumsum], axis=axis)
unwrapped = p + ph_cumsum
return unwrapped
示例11: iou
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros_like [as 别名]
def iou(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
with tf.name_scope(scope, 'IOU'):
intersections = intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = (
tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections), tf.truediv(intersections, unions))
示例12: CausallyMaskedSoftmax
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros_like [as 别名]
def CausallyMaskedSoftmax(x):
"""Causally masked Softmax. Zero out probabilities before and after norm.
pre-softmax logits are masked by setting upper diagonal to -inf:
|a 0, 0| |0, -inf, -inf|
|b, d, 0| + |0, 0, -inf|
|c, e, f| |0, 0, 0 |
Args:
x: Batched tensor of shape [batch_size, T, T].
Returns:
Softmax where each row corresponds to softmax vector for each query.
"""
lower_diag = tf.linalg.band_part(x, -1, 0)
upper_diag = -np.inf * tf.ones_like(x)
upper_diag = tf.linalg.band_part(upper_diag, 0, -1)
upper_diag = tf.linalg.set_diag(
upper_diag, tf.zeros_like(tf.linalg.diag_part(x)))
x = lower_diag + upper_diag
softmax = tf.nn.softmax(x)
return tf.linalg.band_part(softmax, -1, 0)
示例13: test_flip
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros_like [as 别名]
def test_flip(self):
with tf.Session() as sess:
image_3d = self.constant_3d_image()
image_3d_np = sess.run(image_3d)
for flip_axis in [0, 1, 2]:
image_3d_flip, _ = data_aug_lib.maybe_flip(
image_3d, tf.zeros_like(image_3d), flip_axis, 0.0)
image_3d_flip_np = sess.run(image_3d_flip)
self.assertAllClose(image_3d_flip_np, image_3d_np)
image_3d_flip = image_3d
for flip_axis in [0, 1, 2]:
if flip_axis == 0:
image_3d_np = image_3d_np[::-1, ...]
elif flip_axis == 1:
image_3d_np = image_3d_np[:, ::-1, :]
else:
image_3d_np = image_3d_np[..., ::-1]
image_3d_flip, _ = data_aug_lib.maybe_flip(
image_3d_flip, tf.zeros_like(image_3d_flip), flip_axis, 1.0)
image_3d_flip_np = sess.run(image_3d_flip)
self.assertAllClose(image_3d_flip_np, image_3d_np)
示例14: compute_thresholded_labels
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros_like [as 别名]
def compute_thresholded_labels(labels, null_threshold=4):
"""Computes thresholded labels.
Args:
labels: <int32> [batch_size, num_annotators]
null_threshold: If number of null annotations is greater than or equal to
this threshold, all annotations are set to null for this example.
Returns:
thresholded_labels: <int32> [batch_size, num_annotators]
"""
null_labels = tf.equal(labels, 0)
# <int32> [batch_size]
null_count = tf.reduce_sum(tf.to_int32(null_labels), 1)
threshold_mask = tf.less(null_count, null_threshold)
# <bool> [batch_size, num_annotators]
threshold_mask = tf.tile(
tf.expand_dims(threshold_mask, -1), [1, tf.shape(labels)[1]])
# <bool> [batch_size, num_annotators]
thresholded_labels = tf.where(
threshold_mask, x=labels, y=tf.zeros_like(labels))
return thresholded_labels
示例15: f1_metric
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros_like [as 别名]
def f1_metric(precision, precision_op, recall, recall_op):
"""Computes F1 based on precision and recall.
Args:
precision: <float> [batch_size]
precision_op: Update op for precision.
recall: <float> [batch_size]
recall_op: Update op for recall.
Returns:
tensor and update op for F1.
"""
f1_op = tf.group(precision_op, recall_op)
numerator = 2 * tf.multiply(precision, recall)
denominator = tf.add(precision, recall)
f1 = tf.divide(numerator, denominator)
# <float> [batch_size]
zero_vec = tf.zeros_like(f1)
is_valid = tf.greater(denominator, zero_vec)
f1 = tf.where(is_valid, x=f1, y=zero_vec)
return f1, f1_op