本文整理汇总了Python中tensorflow.compat.v2.stack方法的典型用法代码示例。如果您正苦于以下问题:Python v2.stack方法的具体用法?Python v2.stack怎么用?Python v2.stack使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v2
的用法示例。
在下文中一共展示了v2.stack方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _key2seed
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import stack [as 别名]
def _key2seed(a):
"""Converts an RNG key to an RNG seed.
Args:
a: an RNG key, an ndarray of shape [] and dtype `np.int64`.
Returns:
an RNG seed, a tensor of shape [2] and dtype `tf.int32`.
"""
def int64_to_int32s(a):
"""Converts an int64 tensor of shape [] to an int32 tensor of shape [2]."""
a = tf.cast(a, tf.uint64)
fst = tf.cast(a, tf.uint32)
snd = tf.cast(
tf.bitwise.right_shift(a, tf.constant(32, tf.uint64)), tf.uint32)
a = [fst, snd]
a = tf.nest.map_structure(lambda x: tf.cast(x, tf.int32), a)
a = tf.stack(a)
return a
return int64_to_int32s(a.data)
示例2: test_forward_unconnected_gradient
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import stack [as 别名]
def test_forward_unconnected_gradient(self):
t = tf.range(1, 3, dtype=tf.float32) # Shape [2]
zeros = tf.zeros([2], dtype=t.dtype)
func = lambda t: tf.stack([zeros, zeros, zeros], axis=0) # Shape [3, 2]
expected_result = [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]
with self.subTest("EagerExecution"):
fwd_grad = self.evaluate(tff.math.fwd_gradient(
func, t, unconnected_gradients=tf.UnconnectedGradients.ZERO))
self.assertEqual(fwd_grad.shape, (3, 2))
np.testing.assert_allclose(fwd_grad, expected_result)
with self.subTest("GraphExecution"):
@tf.function
def grad_computation():
y = func(t)
return tff.math.fwd_gradient(
y, t, unconnected_gradients=tf.UnconnectedGradients.ZERO)
fwd_grad = self.evaluate(grad_computation())
self.assertEqual(fwd_grad.shape, (3, 2))
np.testing.assert_allclose(fwd_grad, expected_result)
示例3: test_backward_unconnected_gradient
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import stack [as 别名]
def test_backward_unconnected_gradient(self):
t = tf.range(1, 3, dtype=tf.float32) # Shape [2]
zeros = tf.zeros([2], dtype=t.dtype)
expected_result = [0.0, 0.0]
func = lambda t: tf.stack([zeros, zeros, zeros], axis=0) # Shape [3, 2]
with self.subTest("EagerExecution"):
backward_grad = self.evaluate(tff.math.gradients(
func, t, unconnected_gradients=tf.UnconnectedGradients.ZERO))
self.assertEqual(backward_grad.shape, (2,))
np.testing.assert_allclose(backward_grad, expected_result)
with self.subTest("GraphExecution"):
@tf.function
def grad_computation():
y = func(t)
return tff.math.gradients(
y, t, unconnected_gradients=tf.UnconnectedGradients.ZERO)
backward_grad = self.evaluate(grad_computation())
self.assertEqual(backward_grad.shape, (2,))
np.testing.assert_allclose(backward_grad, expected_result)
示例4: to_tensor
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import stack [as 别名]
def to_tensor(self):
"""Packs the dates into a single Tensor.
The Tensor has shape `date_tensor.shape() + (3,)`, where the last dimension
represents years, months and days, in this order.
This can be convenient when the dates are the final result of a computation
in the graph mode: a `tf.function` can return `date_tensor.to_tensor()`, or,
if one uses `tf.compat.v1.Session`, they can call
`session.run(date_tensor.to_tensor())`.
Returns:
A Tensor of shape `date_tensor.shape() + (3,)`.
#### Example
```python
dates = tff.datetime.dates_from_tuples([(2019, 1, 25), (2020, 3, 2)])
dates.to_tensor() # tf.Tensor with contents [[2019, 1, 25], [2020, 3, 2]].
```
"""
return tf.stack((self.year(), self.month(), self.day()), axis=-1)
示例5: _decode_and_center_crop
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import stack [as 别名]
def _decode_and_center_crop(image_bytes):
"""Crops to center of image with padding then scales image size."""
shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((_IMAGE_SIZE / (_IMAGE_SIZE + _CROP_PADDING)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([
offset_height, offset_width, padded_center_crop_size,
padded_center_crop_size
])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = tf.image.resize([image], [_IMAGE_SIZE, _IMAGE_SIZE],
method=tf.image.ResizeMethod.BICUBIC)[0]
image = tf.cast(image, tf.int32)
return image
示例6: labels_of_top_ranked_predictions_in_batch
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import stack [as 别名]
def labels_of_top_ranked_predictions_in_batch(labels, predictions):
"""Applying tf.metrics.mean to this gives precision at 1.
Args:
labels: minibatch of dense 0/1 labels, shape [batch_size rows, num_classes]
predictions: minibatch of predictions of the same shape
Returns:
one-dimension tensor top_labels, where top_labels[i]=1.0 iff the
top-scoring prediction for batch element i has label 1.0
"""
indices_of_top_preds = tf.cast(tf.argmax(input=predictions, axis=1), tf.int32)
batch_size = tf.reduce_sum(input_tensor=tf.ones_like(indices_of_top_preds))
row_indices = tf.range(batch_size)
thresholded_labels = tf.where(labels > 0.0, tf.ones_like(labels),
tf.zeros_like(labels))
label_indices_to_gather = tf.transpose(
a=tf.stack([row_indices, indices_of_top_preds]))
return tf.gather_nd(thresholded_labels, label_indices_to_gather)
示例7: stack_nested_tensors
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import stack [as 别名]
def stack_nested_tensors(list_of_nests):
"""Stack a list of nested tensors.
Args:
list_of_nests: A list of nested tensors (or numpy arrays) of the same
shape/structure.
Returns:
A nested array containing batched items, where each batched item is obtained
by stacking corresponding items from the list of nested_arrays.
"""
def stack_tensor(*tensors):
result = [tf.convert_to_tensor(t) for t in tensors]
return tf.stack(result)
return tf.nest.map_structure(stack_tensor, *list_of_nests)
示例8: process_multidoc_dataset
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import stack [as 别名]
def process_multidoc_dataset(dataset, batch_size, params):
"""Parses, organizes and batches multi-doc dataset."""
name_to_features, feature_list = multidoc_parse_spec(params)
decode_fn = lambda record: decode_record(record, name_to_features)
dataset = dataset.map(
decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def _select_data_from_record(record):
"""Filter out features to use for pretraining."""
features = {"target_ids": record["input_ids_a"]}
for feature in feature_list:
tensors = [record["%s_%s" % (feature, i)] for i in params.passage_list]
features[feature] = tf.stack(tensors)
return features
dataset = dataset.map(
_select_data_from_record,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset
示例9: convert_sharded_tensor_to_eager_tensor
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import stack [as 别名]
def convert_sharded_tensor_to_eager_tensor(value, *args, **kwargs):
del args, kwargs
# TODO(nareshmodi): Consider a collective op to gather the tensors from the
# various devices for performance reasons.
return tf.stack(value.tensors)
示例10: stack
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import stack [as 别名]
def stack(arrays, axis=0):
arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
unwrapped_arrays = [
a.data if isinstance(a, arrays_lib.ndarray) else a for a in arrays
]
return asarray(tf.stack(unwrapped_arrays, axis))
示例11: test_forward_gradient
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import stack [as 别名]
def test_forward_gradient(self):
t = tf.range(1, 3, dtype=tf.float32) # Shape [2]
func = lambda t: tf.stack([t, t ** 2, t ** 3], axis=0) # Shape [3, 2]
with self.subTest("EagerExecution"):
fwd_grad = self.evaluate(tff.math.fwd_gradient(func, t))
self.assertEqual(fwd_grad.shape, (3, 2))
np.testing.assert_allclose(fwd_grad, [[1., 1.], [2., 4.], [3., 12.]])
with self.subTest("GraphExecution"):
@tf.function
def grad_computation():
y = func(t)
return tff.math.fwd_gradient(y, t)
fwd_grad = self.evaluate(grad_computation())
self.assertEqual(fwd_grad.shape, (3, 2))
np.testing.assert_allclose(fwd_grad, [[1., 1.], [2., 4.], [3., 12.]])
示例12: test_backward_gradient
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import stack [as 别名]
def test_backward_gradient(self):
t = tf.range(1, 3, dtype=tf.float32) # Shape [2]
func = lambda t: tf.stack([t, t ** 2, t ** 3], axis=0) # Shape [3, 2]
with self.subTest("EagerExecution"):
backward_grad = self.evaluate(tff.math.gradients(func, t))
self.assertEqual(backward_grad.shape, (2,))
np.testing.assert_allclose(backward_grad, [6., 17.])
with self.subTest("GraphExecution"):
@tf.function
def grad_computation():
y = func(t)
return tff.math.gradients(y, t)
backward_grad = self.evaluate(grad_computation())
self.assertEqual(backward_grad.shape, (2,))
np.testing.assert_allclose(backward_grad, [6., 17.])
示例13: test_diffs_differentiable
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import stack [as 别名]
def test_diffs_differentiable(self):
"""Tests that the diffs op is differentiable."""
x = tf.constant(2.0)
xv = tf.stack([x, x * x, x * x * x], axis=0)
# Produces [x, x^2 - x, x^3 - x^2]
dxv = self.evaluate(math.diff(xv))
np.testing.assert_array_equal(dxv, [2., 2., 4.])
grad = self.evaluate(tf.gradients(math.diff(xv), x)[0])
# Note that TF gradients adds up the components of the jacobian.
# The sum of [1, 2x-1, 3x^2-2x] at x = 2 is 12.
self.assertEqual(grad, 12.0)
示例14: test_data_fitting
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import stack [as 别名]
def test_data_fitting(self):
"""Tests MLE estimation for a simple geometric GLM."""
n, dim = 100, 3
dtype = tf.float64
np.random.seed(234095)
x = np.random.choice([0, 1], size=[dim, n])
s = 0.01 * np.sum(x, 0)
p = 1. / (1 + np.exp(-s))
y = np.random.geometric(p)
x_data = tf.convert_to_tensor(value=x, dtype=dtype)
y_data = tf.expand_dims(tf.convert_to_tensor(value=y, dtype=dtype), -1)
def neg_log_likelihood(state):
state_ext = tf.expand_dims(state, 0)
linear_part = tf.matmul(state_ext, x_data)
linear_part_ex = tf.stack([tf.zeros_like(linear_part), linear_part],
axis=0)
term1 = tf.squeeze(
tf.matmul(tf.reduce_logsumexp(linear_part_ex, axis=0), y_data), -1)
term2 = (0.5 * tf.reduce_sum(state_ext * state_ext, axis=-1) -
tf.reduce_sum(linear_part, axis=-1))
return tf.squeeze(term1 + term2)
self._check_algorithm(
func=neg_log_likelihood,
start_point=np.ones(shape=[dim]),
expected_argmin=[-0.020460034354, 0.171708568111, 0.021200423717])
示例15: test_spline_broadcast_batch
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import stack [as 别名]
def test_spline_broadcast_batch(self, optimize_for_tpu):
"""Tests batch shape of spline and interpolation are broadcasted."""
x_data1 = np.linspace(-5.0, 5.0, num=11)
x_data2 = np.linspace(0.0, 10.0, num=11)
x_data = np.array([x_data1, x_data2])
y_data = 1.0 / (2.0 + x_data**2)
x_data = tf.stack(x_data, axis=0)
dtype = np.float64
x_value_1 = tf.constant([[[-1.2, 0.0, 0.3]]], dtype=dtype)
x_value_2 = tf.constant([-1.2, 0.0, 0.3], dtype=dtype)
spline = tff.math.interpolation.cubic.build_spline(x_data,
y_data)
result_1 = tff.math.interpolation.cubic.interpolate(
x_value_1, spline,
optimize_for_tpu=optimize_for_tpu, dtype=dtype)
result_2 = tff.math.interpolation.cubic.interpolate(
x_value_2, spline,
optimize_for_tpu=optimize_for_tpu, dtype=dtype)
expected_1 = np.array([[[0.29131469, 0.5, 0.4779499],
[0.5, 0.5, 0.45159077]]], dtype=dtype)
expected_2 = np.array([[0.29131469, 0.5, 0.4779499],
[0.5, 0.5, 0.45159077]], dtype=dtype)
with self.subTest("BroadcastData"):
self.assertAllClose(result_1, expected_1)
with self.subTest("BroadcastValues"):
self.assertAllClose(result_2, expected_2)