本文整理汇总了Python中tensorflow.python.data.experimental.ops.batching.unbatch函数的典型用法代码示例。如果您正苦于以下问题:Python unbatch函数的具体用法?Python unbatch怎么用?Python unbatch使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了unbatch函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testUnbatchDatasetWithDenseAndSparseTensor
def testUnbatchDatasetWithDenseAndSparseTensor(self):
st = sparse_tensor.SparseTensorValue(
indices=[[i, i] for i in range(10)],
values=list(range(10)),
dense_shape=[10, 10])
data = dataset_ops.Dataset.from_tensors((list(range(10)), st))
data = data.apply(batching.unbatch())
data = data.batch(5)
data = data.apply(batching.unbatch())
expected_output = [(i, sparse_tensor.SparseTensorValue([[i]], [i], [10]))
for i in range(10)]
self.assertDatasetProduces(data, expected_output=expected_output)
示例2: testUnbatchDatasetWithRaggedTensor
def testUnbatchDatasetWithRaggedTensor(self):
rt = ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]],
[[5]], [[6]], [[7]], [[8]], [[9]]])
data = dataset_ops.Dataset.from_tensors(rt)
data = data.apply(batching.unbatch())
data = data.batch(5)
data = data.batch(2)
data = data.apply(batching.unbatch())
expected_output = [
ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]]]),
ragged_factory_ops.constant_value([[[5]], [[6]], [[7]], [[8]], [[9]]]),
]
self.assertDatasetProduces(
data, expected_output=expected_output)
示例3: testUnbatchDatasetWithDenseSparseAndRaggedTensor
def testUnbatchDatasetWithDenseSparseAndRaggedTensor(self):
st = sparse_tensor.SparseTensorValue(
indices=[[i, i] for i in range(10)],
values=list(range(10)),
dense_shape=[10, 10])
rt = ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]],
[[5]], [[6]], [[7]], [[8]], [[9]]])
data = dataset_ops.Dataset.from_tensors((list(range(10)), st, rt))
data = data.apply(batching.unbatch())
data = data.batch(5)
data = data.apply(batching.unbatch())
expected_output = [(i, sparse_tensor.SparseTensorValue([[i]], [i], [10]),
ragged_factory_ops.constant_value([[i]]))
for i in range(10)]
self.assertDatasetProduces(
data, expected_output=expected_output)
示例4: testSkipEagerUnbatchDynamicShapeMismatch
def testSkipEagerUnbatchDynamicShapeMismatch(self):
ph1 = array_ops.placeholder(dtypes.int32, shape=[None])
ph2 = array_ops.placeholder(dtypes.int32, shape=None)
data = dataset_ops.Dataset.from_tensors((ph1, ph2))
data = data.apply(batching.unbatch())
iterator = dataset_ops.make_initializable_iterator(data)
next_element = iterator.get_next()
with self.cached_session() as sess:
# Mismatch in the 0th dimension.
sess.run(
iterator.initializer,
feed_dict={
ph1: np.arange(7).astype(np.int32),
ph2: np.arange(8).astype(np.int32)
})
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element)
# No 0th dimension (i.e. scalar value) for one component.
sess.run(
iterator.initializer,
feed_dict={
ph1: np.arange(7).astype(np.int32),
ph2: 7
})
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element)
示例5: benchmarkNativeUnbatch
def benchmarkNativeUnbatch(self):
batch_sizes = [1, 2, 5, 10, 20, 50]
elems_per_trial = 10000
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors("element").repeat(None)
batch_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
dataset = dataset.batch(batch_size_placeholder)
dataset = dataset.apply(batching.unbatch())
dataset = dataset.skip(elems_per_trial)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
with session.Session() as sess:
for batch_size in batch_sizes:
deltas = []
for _ in range(5):
sess.run(
iterator.initializer,
feed_dict={batch_size_placeholder: batch_size})
start = time.time()
sess.run(next_element.op)
end = time.time()
deltas.append((end - start) / elems_per_trial)
median_wall_time = np.median(deltas)
print("Unbatch (native) batch size: %d Median wall time per element:"
" %f microseconds" % (batch_size, median_wall_time * 1e6))
self.report_benchmark(
iters=10000,
wall_time=median_wall_time,
name="benchmark_unbatch_dataset_native_batch_size_%d" %
batch_size)
示例6: benchmarkNativeUnbatch
def benchmarkNativeUnbatch(self):
batch_sizes = [1, 2, 5, 10, 20, 50]
elems_per_trial = 10000
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors("element").repeat(None)
batch_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
dataset = dataset.batch(batch_size_placeholder)
dataset = dataset.apply(batching.unbatch())
dataset = dataset.skip(elems_per_trial)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
for batch_size in batch_sizes:
deltas = []
for _ in range(5):
sess.run(
iterator.initializer,
feed_dict={batch_size_placeholder: batch_size})
start = time.time()
sess.run(next_element.op)
end = time.time()
deltas.append((end - start) / elems_per_trial)
median_wall_time = np.median(deltas)
self.report_benchmark(
iters=10000,
wall_time=median_wall_time,
name="native_batch_size_%d" %
batch_size)
示例7: build_dataset
def build_dataset(self, multiplier=15.0, tensor_slice_len=2, batch_size=2):
components = (
np.arange(tensor_slice_len),
np.array([[1, 2, 3]]) * np.arange(tensor_slice_len)[:, np.newaxis],
np.array(multiplier) * np.arange(tensor_slice_len))
return dataset_ops.Dataset.from_tensor_slices(components).batch(
batch_size).apply(batching.unbatch())
示例8: testUnbatchSingleElementTupleDataset
def testUnbatchSingleElementTupleDataset(self):
data = tuple([(math_ops.range(10),) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = ((dtypes.int32,),) * 3
data = data.batch(2)
self.assertEqual(expected_types, data.output_types)
data = data.apply(batching.unbatch())
self.assertEqual(expected_types, data.output_types)
self.assertDatasetProduces(data, [((i,),) * 3 for i in range(10)])
示例9: testUnbatchScalarDataset
def testUnbatchScalarDataset(self):
data = tuple([math_ops.range(10) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = (dtypes.int32,) * 3
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.apply(batching.unbatch())
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(data, [(i,) * 3 for i in range(10)])
示例10: testUnbatchEmpty
def testUnbatchEmpty(self):
data = dataset_ops.Dataset.from_tensors(
(constant_op.constant([]), constant_op.constant([], shape=[0, 4]),
constant_op.constant([], shape=[0, 4, 0])))
data = data.apply(batching.unbatch())
iterator = data.make_one_shot_iterator()
next_element = iterator.get_next()
with self.cached_session() as sess:
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
示例11: testUnbatchDatasetWithSparseTensor
def testUnbatchDatasetWithSparseTensor(self):
st = sparse_tensor.SparseTensorValue(
indices=[[i, i] for i in range(10)],
values=list(range(10)),
dense_shape=[10, 10])
data = dataset_ops.Dataset.from_tensors(st)
data = data.apply(batching.unbatch())
data = data.batch(5)
data = data.apply(batching.unbatch())
iterator = data.make_one_shot_iterator()
next_element = iterator.get_next()
with self.cached_session() as sess:
for i in range(10):
st_row = self.evaluate(next_element)
self.assertEqual([i], st_row.indices)
self.assertEqual([i], st_row.values)
self.assertEqual([10], st_row.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
示例12: testUnbatchDatasetWithStrings
def testUnbatchDatasetWithStrings(self):
data = tuple([math_ops.range(10) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
data = data.map(lambda x, y, z: (x, string_ops.as_string(y), z))
expected_types = (dtypes.int32, dtypes.string, dtypes.int32)
data = data.batch(2)
self.assertEqual(expected_types, data.output_types)
data = data.apply(batching.unbatch())
self.assertEqual(expected_types, data.output_types)
self.assertDatasetProduces(
data, [(i, compat.as_bytes(str(i)), i) for i in range(10)])
示例13: make_dataset_iterator
def make_dataset_iterator(self, dataset):
"""Make iterators for each of the TPU hosts.
We first unbatch the users input dataset and then rebatch it with the
per replica batch size that is calculated using
`global_batch_size // num_replicas_in_sync`. The currently supported cases
are as follows:
`dataset.batch()` is the last operation on the dataset.
`dataset.apply(map_and_batch)` is the last operation on the dataset.
`dataset.batch().prefetch()` are the last 2 operations on the dataset.
`dataset.apply(map_and_batch).prefetch()` are the last 2 operations.
Args:
dataset: The `tf.data` dataset passed by the user.
Returns:
iterator: InputIterator created for each of the host machines.
"""
# TODO(sourabhbajaj): Remove this in lieu of distributed datasets
def _get_dataset_batch_size(dataset):
"""Get the global batch size from the dataset object."""
# pylint: disable=protected-access
if isinstance(dataset, dataset_ops.BatchDataset):
return tensor_util.constant_value(dataset._batch_size)
elif isinstance(dataset, batching._MapAndBatchDataset):
return dataset._batch_size
elif isinstance(dataset, dataset_ops.PrefetchDataset):
return _get_dataset_batch_size(dataset._input_dataset)
# pylint: enable=protected-access
raise ValueError(
"Unable to fetch the batch size from the input dataset. `batch` "
"`map_and_batch` need to be the last operations on the dataset. "
"The batch operations can be followed by a prefetch.")
global_batch_size = _get_dataset_batch_size(dataset)
if global_batch_size % self.num_replicas_in_sync:
raise ValueError(
"Batch size %s cannot be sharded evenly across replicas %s" % (
global_batch_size, self.num_replicas_in_sync))
per_replica_batch_size = global_batch_size // self.num_replicas_in_sync
dataset = dataset.apply(batching.unbatch())
dataset = dataset.batch(per_replica_batch_size, drop_remainder=True)
worker_devices = [
(self.get_host(hid), [self.get_host_cpu_device(hid)])
for hid in range(self.num_hosts)
]
distributed_dataset = values.MultiWorkerDataset(
functools.partial(self._call_dataset_fn, lambda: dataset),
worker_devices)
# TODO(priyag): Return distribution strategy specific InputIterator
return distributed_dataset.make_initializable_iterator()
示例14: testUnbatchWithUnknownRankInput
def testUnbatchWithUnknownRankInput(self):
placeholder = array_ops.placeholder(dtypes.int32)
dataset = dataset_ops.Dataset.from_tensors(placeholder).apply(
batching.unbatch())
iterator = dataset.make_initializable_iterator()
next_elem = iterator.get_next()
with self.cached_session() as sess:
sess.run(iterator.initializer, feed_dict={placeholder: [0, 1, 2, 3]})
for i in range(4):
self.assertEqual(i, self.evaluate(next_elem))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_elem)
示例15: testUnbatchMultiElementTupleDataset
def testUnbatchMultiElementTupleDataset(self):
data = tuple([(math_ops.range(10 * i, 10 * i + 10),
array_ops.fill([10], "hi")) for i in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = ((dtypes.int32, dtypes.string),) * 3
data = data.batch(2)
self.assertAllEqual(expected_types, data.output_types)
data = data.apply(batching.unbatch())
self.assertAllEqual(expected_types, data.output_types)
self.assertDatasetProduces(
data,
[((i, b"hi"), (10 + i, b"hi"), (20 + i, b"hi")) for i in range(10)])