本文整理汇总了Python中tensorflow.compat.v2.range方法的典型用法代码示例。如果您正苦于以下问题:Python v2.range方法的具体用法?Python v2.range怎么用?Python v2.range使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v2
的用法示例。
在下文中一共展示了v2.range方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: geomspace
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import range [as 别名]
def geomspace(start, stop, num=50, endpoint=True, dtype=float): # pylint: disable=missing-docstring
if dtype:
dtype = utils.result_type(dtype)
if num < 0:
raise ValueError('Number of samples {} must be non-negative.'.format(num))
if not num:
return empty([0])
step = 1.
if endpoint:
if num > 1:
step = tf.pow((stop / start), 1 / (num - 1))
else:
step = tf.pow((stop / start), 1 / num)
result = tf.cast(tf.range(num), step.dtype)
result = tf.pow(step, result)
result = tf.multiply(result, start)
if dtype:
result = tf.cast(result, dtype=dtype)
return arrays_lib.tensor_to_ndarray(result)
# Building matrices.
示例2: rot90
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import range [as 别名]
def rot90(m, k=1, axes=(0, 1)): # pylint: disable=missing-docstring
m_rank = tf.rank(m)
ax1, ax2 = utils._canonicalize_axes(axes, m_rank) # pylint: disable=protected-access
k = k % 4
if k == 0:
return m
elif k == 2:
return flip(flip(m, ax1), ax2)
else:
perm = tf.range(m_rank)
perm = tf.tensor_scatter_nd_update(perm, [[ax1], [ax2]], [ax2, ax1])
if k == 1:
return transpose(flip(m, ax2), perm)
else:
return flip(transpose(m, perm), ax2)
示例3: diff
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import range [as 别名]
def diff(a, n=1, axis=-1):
def f(a):
nd = a.shape.rank
if (axis + nd if axis < 0 else axis) >= nd:
raise ValueError("axis %s is out of bounds for array of dimension %s" %
(axis, nd))
if n < 0:
raise ValueError("order must be non-negative but got %s" % n)
slice1 = [slice(None)] * nd
slice2 = [slice(None)] * nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
op = tf.not_equal if a.dtype == tf.bool else tf.subtract
for _ in range(n):
a = op(a[slice1], a[slice2])
return a
return _scalar(f, a)
示例4: test_many_small_batches_same_as_one_big_batch
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import range [as 别名]
def test_many_small_batches_same_as_one_big_batch(self):
dim = 2
num_results_per_batch = 1
num_batches = 3
seed = 1925
sample1, _ = random.halton.sample(
dim, num_results_per_batch * num_batches, seed=seed)
batch_indices = (
tf.range(i * num_results_per_batch, (i + 1) * num_results_per_batch)
for i in range(num_batches))
sample2 = (
random.halton.sample(dim, sequence_indices=sequence_indices, seed=seed)
for sequence_indices in batch_indices)
result_set1 = set(tuple(row) for row in self.evaluate(sample1))
result_set2 = set()
for batch, _ in sample2:
result_set2.update(tuple(row) for row in self.evaluate(batch))
self.assertEqual(result_set1, result_set2)
示例5: testOutputIsPermutation
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import range [as 别名]
def testOutputIsPermutation(self):
"""Checks that stateless_random_shuffle outputs a permutation."""
for dtype in (tf.int32, tf.int64, tf.float32, tf.float64):
identity_permutation = tf.range(10, dtype=dtype)
random_shuffle_seed_1 = tff_rnd.stateless_random_shuffle(
identity_permutation, seed=tf.constant((1, 42), tf.int64))
random_shuffle_seed_2 = tff_rnd.stateless_random_shuffle(
identity_permutation, seed=tf.constant((2, 42), tf.int64))
# Check that the shuffles are of the correct dtype
for shuffle in (random_shuffle_seed_1, random_shuffle_seed_2):
np.testing.assert_equal(shuffle.dtype, dtype.as_numpy_dtype)
random_shuffle_seed_1 = self.evaluate(random_shuffle_seed_1)
random_shuffle_seed_2 = self.evaluate(random_shuffle_seed_2)
identity_permutation = self.evaluate(identity_permutation)
# Check that the shuffles are different
self.assertTrue(
np.abs(random_shuffle_seed_1 - random_shuffle_seed_2).max())
# Check that the shuffles are indeed permutations
for shuffle in (random_shuffle_seed_1, random_shuffle_seed_2):
self.assertAllEqual(set(shuffle), set(identity_permutation))
示例6: testOutputIsIndependentOfInputValues
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import range [as 别名]
def testOutputIsIndependentOfInputValues(self):
"""stateless_random_shuffle output is independent of input_tensor values."""
# Generate sorted array of random numbers to control that the result
# is independent of `input_tesnor` values
np.random.seed(25)
random_input = np.random.normal(size=[10])
random_input.sort()
for dtype in (tf.int32, tf.int64, tf.float32, tf.float64):
# Permutation of a sequence [0, 1, .., 9]
random_permutation = tff_rnd.stateless_random_shuffle(
tf.range(10, dtype=dtype), seed=(100, 42))
random_permutation = self.evaluate(random_permutation)
# Shuffle `random_input` with the same seed
random_shuffle_control = tff_rnd.stateless_random_shuffle(
random_input, seed=(100, 42))
random_shuffle_control = self.evaluate(random_shuffle_control)
# Checks that the generated permutation does not depend on the underlying
# values
np.testing.assert_array_equal(
np.argsort(random_permutation), np.argsort(random_shuffle_control))
示例7: test_forward_unconnected_gradient
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import range [as 别名]
def test_forward_unconnected_gradient(self):
t = tf.range(1, 3, dtype=tf.float32) # Shape [2]
zeros = tf.zeros([2], dtype=t.dtype)
func = lambda t: tf.stack([zeros, zeros, zeros], axis=0) # Shape [3, 2]
expected_result = [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]
with self.subTest("EagerExecution"):
fwd_grad = self.evaluate(tff.math.fwd_gradient(
func, t, unconnected_gradients=tf.UnconnectedGradients.ZERO))
self.assertEqual(fwd_grad.shape, (3, 2))
np.testing.assert_allclose(fwd_grad, expected_result)
with self.subTest("GraphExecution"):
@tf.function
def grad_computation():
y = func(t)
return tff.math.fwd_gradient(
y, t, unconnected_gradients=tf.UnconnectedGradients.ZERO)
fwd_grad = self.evaluate(grad_computation())
self.assertEqual(fwd_grad.shape, (3, 2))
np.testing.assert_allclose(fwd_grad, expected_result)
示例8: test_backward_unconnected_gradient
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import range [as 别名]
def test_backward_unconnected_gradient(self):
t = tf.range(1, 3, dtype=tf.float32) # Shape [2]
zeros = tf.zeros([2], dtype=t.dtype)
expected_result = [0.0, 0.0]
func = lambda t: tf.stack([zeros, zeros, zeros], axis=0) # Shape [3, 2]
with self.subTest("EagerExecution"):
backward_grad = self.evaluate(tff.math.gradients(
func, t, unconnected_gradients=tf.UnconnectedGradients.ZERO))
self.assertEqual(backward_grad.shape, (2,))
np.testing.assert_allclose(backward_grad, expected_result)
with self.subTest("GraphExecution"):
@tf.function
def grad_computation():
y = func(t)
return tff.math.gradients(
y, t, unconnected_gradients=tf.UnconnectedGradients.ZERO)
backward_grad = self.evaluate(grad_computation())
self.assertEqual(backward_grad.shape, (2,))
np.testing.assert_allclose(backward_grad, expected_result)
示例9: test_expected_continuation
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import range [as 别名]
def test_expected_continuation(self):
"""Tests that expected continuation works in V=1 case.
In particular this verifies that the regression done to get the expected
continuation value is performed on those elements which have a positive
exercise value.
"""
for dtype in (np.float32, np.float64):
a = tf.range(start=-2, limit=3, delta=1, dtype=dtype)
design = tf.concat([a, a], axis=0)
design = tf.concat([[tf.ones_like(design), design]], axis=1)
# These values ensure that the expected continuation value is `(1,...,1).`
exercise_now = tf.expand_dims(
tf.concat([tf.ones_like(a), tf.zeros_like(a)], axis=0), -1)
cashflow = tf.expand_dims(
tf.concat([tf.ones_like(a), -tf.ones_like(a)], axis=0), -1)
expected_exercise = lsm.expected_exercise_fn(
design, cashflow, exercise_now)
self.assertAllClose(expected_exercise, tf.ones_like(cashflow))
示例10: labels_of_top_ranked_predictions_in_batch
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import range [as 别名]
def labels_of_top_ranked_predictions_in_batch(labels, predictions):
"""Applying tf.metrics.mean to this gives precision at 1.
Args:
labels: minibatch of dense 0/1 labels, shape [batch_size rows, num_classes]
predictions: minibatch of predictions of the same shape
Returns:
one-dimension tensor top_labels, where top_labels[i]=1.0 iff the
top-scoring prediction for batch element i has label 1.0
"""
indices_of_top_preds = tf.cast(tf.argmax(input=predictions, axis=1), tf.int32)
batch_size = tf.reduce_sum(input_tensor=tf.ones_like(indices_of_top_preds))
row_indices = tf.range(batch_size)
thresholded_labels = tf.where(labels > 0.0, tf.ones_like(labels),
tf.zeros_like(labels))
label_indices_to_gather = tf.transpose(
a=tf.stack([row_indices, indices_of_top_preds]))
return tf.gather_nd(thresholded_labels, label_indices_to_gather)
示例11: train
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import range [as 别名]
def train(self,
num_steps: Optional[tf.Tensor]) -> Optional[Dict[Text, tf.Tensor]]:
"""Implements model training with multiple steps.
In training, it is common to break the total training steps into several
training loops, so users can do checkpointing, write summaries and run some
python callbacks. This is necessary for getting good performance in TPU
training, as the overhead for launching a multi worker tf.function may be
large in Eager mode. It is usually encouraged to create a host training loop
(e.g. using a `tf.range` wrapping `strategy.run` inside a
`tf.function`) in the TPU case. For the cases that don't require host
training loop to acheive peak performance, users can just implement a simple
python loop to drive each step.
Args:
num_steps: A guideline for how many training steps to run. Note that it is
up to the model what constitutes a "step" (this may involve more than
one update to model parameters, e.g. if training a GAN).
Returns:
The function may return a dictionary of `Tensors`, which will be
written to logs and as TensorBoard summaries.
"""
pass
示例12: arange
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import range [as 别名]
def arange(start, stop=None, step=1, dtype=None):
"""Returns `step`-separated values in the range [start, stop).
Args:
start: Start of the interval. Included in the range.
stop: End of the interval. If not specified, `start` is treated as 0 and
`start` value is used as `stop`. If specified, it is not included in the
range if `step` is integer. When `step` is floating point, it may or may
not be included.
step: The difference between 2 consecutive values in the output range. It is
recommended to use `linspace` instead of using non-integer values for
`step`.
dtype: Optional. Type of the resulting ndarray. Could be a python type, a
NumPy type or a TensorFlow `DType`. If not provided, the largest type of
`start`, `stop`, `step` is used.
Raises:
ValueError: If step is zero.
"""
if not step:
raise ValueError('step must be non-zero.')
if dtype:
dtype = utils.result_type(dtype)
else:
if stop is None:
dtype = utils.result_type(start, step)
else:
dtype = utils.result_type(start, step, stop)
if step > 0 and ((stop is not None and start > stop) or
(stop is None and start < 0)):
return array([], dtype=dtype)
if step < 0 and ((stop is not None and start < stop) or
(stop is None and start > 0)):
return array([], dtype=dtype)
# TODO(srbs): There are some bugs when start or stop is float type and dtype
# is integer type.
return arrays_lib.tensor_to_ndarray(
tf.cast(tf.range(start, limit=stop, delta=step), dtype=dtype))
示例13: swapaxes
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import range [as 别名]
def swapaxes(a, axis1, axis2): # pylint: disable=missing-docstring
a = asarray(a)
a_rank = tf.rank(a)
if axis1 < 0:
axis1 += a_rank
if axis2 < 0:
axis2 += a_rank
perm = tf.range(a_rank)
perm = tf.tensor_scatter_nd_update(perm, [[axis1], [axis2]], [axis2, axis1])
a = tf.transpose(a, perm)
return utils.tensor_to_ndarray(a)
示例14: diag_indices
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import range [as 别名]
def diag_indices(n, ndim=2): # pylint: disable=missing-docstring,redefined-outer-name
if n < 0:
raise ValueError('n argument to diag_indices must be nonnegative, got {}'
.format(n))
if ndim < 0:
raise ValueError('ndim argument to diag_indices must be nonnegative, got {}'
.format(ndim))
return (tf.range(n),) * ndim
示例15: vander
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import range [as 别名]
def vander(x, N=None, increasing=False): # pylint: disable=missing-docstring,invalid-name
x = asarray(x).data
x_shape = tf.shape(x)
N = N or x_shape[0]
N_temp = utils.get_static_value(N) # pylint: disable=invalid-name
if N_temp is not None:
N = N_temp
if N < 0:
raise ValueError('N must be nonnegative')
else:
tf.debugging.Assert(N >= 0, [N])
rank = tf.rank(x)
rank_temp = utils.get_static_value(rank)
if rank_temp is not None:
rank = rank_temp
if rank != 1:
raise ValueError('x must be a one-dimensional array')
else:
tf.debugging.Assert(rank == 1, [rank])
if increasing:
start = 0
limit = N
delta = 1
else:
start = N - 1
limit = -1
delta = -1
x = tf.expand_dims(x, -1)
return utils.tensor_to_ndarray(
tf.math.pow(x, tf.cast(tf.range(start, limit, delta), dtype=x.dtype)))