本文整理汇总了Python中tensorflow.python.data.experimental.ops.prefetching_ops.copy_to_device函数的典型用法代码示例。如果您正苦于以下问题:Python copy_to_device函数的具体用法?Python copy_to_device怎么用?Python copy_to_device使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了copy_to_device函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testInsideFunction
def testInsideFunction(self):
if test_util.is_gpu_available():
self.skipTest(
"b/123899495: Colocation errors for critical sections in map on GPU")
cs = critical_section_ops.CriticalSection()
with ops.device("/gpu:0" if test_util.is_gpu_available() else "/cpu:0"):
v = resource_variable_ops.ResourceVariable(1)
def fn():
return v.read_value()
# map() creates a TensorFlow function.
ds = dataset_ops.Dataset.range(1)
if test_util.is_gpu_available():
ds = (ds.apply(prefetching_ops.copy_to_device("/gpu:0"))
.apply(prefetching_ops.map_on_gpu(lambda _: cs.execute(fn))))
else:
ds = ds.map(lambda _: cs.execute(fn))
def get_first():
if context.executing_eagerly():
return self.evaluate(ds.make_one_shot_iterator().get_next())
itr = ds.make_initializable_iterator()
self.evaluate([v.initializer, itr.initializer])
return self.evaluate(itr.get_next())
self.assertEqual(1, get_first())
示例2: testCopyToDeviceWithReInitAndPrefetch
def testCopyToDeviceWithReInitAndPrefetch(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
with ops.device("/cpu:1"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
self.assertEqual(host_dataset.output_types, device_dataset.output_types)
self.assertEqual(host_dataset.output_types, iterator.output_types)
self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
self.assertEqual(host_dataset.output_classes, iterator.output_classes)
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
self.evaluate(iterator.initializer)
for i in range(5):
self.assertEqual(i, self.evaluate(next_element))
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
示例3: testCopyToSameDevice
def testCopyToSameDevice(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:0"))
with ops.device("/cpu:0"):
iterator = device_dataset.make_one_shot_iterator()
next_element = iterator.get_next()
self.assertEqual(host_dataset.output_types, device_dataset.output_types)
self.assertEqual(host_dataset.output_types, iterator.output_types)
self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
self.assertEqual(host_dataset.output_classes, iterator.output_classes)
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config) as sess:
for i in range(10):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
示例4: testCopyToDeviceGpuWithMap
def testCopyToDeviceGpuWithMap(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
def generator():
for i in range(10):
yield i, float(i), str(i)
host_dataset = dataset_ops.Dataset.from_generator(
generator, output_types=(dtypes.int32, dtypes.float32, dtypes.string))
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
def gpu_map_func(x, y, z):
return math_ops.square(x), math_ops.square(y), z
device_dataset = device_dataset.apply(
prefetching_ops.map_on_gpu(gpu_map_func))
options = dataset_ops.Options()
options.experimental_autotune = False
device_dataset = device_dataset.with_options(options)
with ops.device("/gpu:0"):
iterator = device_dataset.make_initializable_iterator()
next_element = iterator.get_next()
with self.cached_session() as sess:
sess.run(iterator.initializer)
for i in range(10):
x, y, z = sess.run(next_element)
self.assertEqual(i**2, x)
self.assertEqual(float(i**2), y)
self.assertEqual(util_compat.as_bytes(str(i)), z)
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
示例5: testIteratorGetNextAsOptionalOnGPU
def testIteratorGetNextAsOptionalOnGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(3)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = device_dataset.make_initializable_iterator()
next_elem = iterator_ops.get_next_as_optional(iterator)
elem_has_value_t = next_elem.has_value()
elem_value_t = next_elem.get_value()
with self.cached_session() as sess:
# Before initializing the iterator, evaluating the optional fails with
# a FailedPreconditionError.
with self.assertRaises(errors.FailedPreconditionError):
sess.run(elem_has_value_t)
with self.assertRaises(errors.FailedPreconditionError):
sess.run(elem_value_t)
# For each element of the dataset, assert that the optional evaluates to
# the expected value.
sess.run(iterator.initializer)
for i in range(3):
elem_has_value, elem_value = sess.run([elem_has_value_t, elem_value_t])
self.assertTrue(elem_has_value)
self.assertEqual(i, elem_value)
# After exhausting the iterator, `next_elem.has_value()` will evaluate to
# false, and attempting to get the value will fail.
for _ in range(2):
self.assertFalse(sess.run(elem_has_value_t))
with self.assertRaises(errors.InvalidArgumentError):
sess.run(elem_value_t)
示例6: testCopySparseTensorsToDeviceWithPrefetch
def testCopySparseTensorsToDeviceWithPrefetch(self):
def make_tensor(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0]], values=(i * [1]), dense_shape=[2, 2])
host_dataset = dataset_ops.Dataset.range(10).map(make_tensor)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
with ops.device("/cpu:1"):
iterator = device_dataset.make_one_shot_iterator()
next_element = iterator.get_next()
self.assertEqual(host_dataset.output_types, device_dataset.output_types)
self.assertEqual(host_dataset.output_types, iterator.output_types)
self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
self.assertEqual(host_dataset.output_classes, iterator.output_classes)
self.assertEqual(dtypes.int64, next_element.dtype)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config) as sess:
for i in range(10):
actual = sess.run(next_element)
self.assertAllEqual([i], actual.values)
self.assertAllEqual([[0, 0]], actual.indices)
self.assertAllEqual([2, 2], actual.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
示例7: testCopyDictToDeviceWithPrefetch
def testCopyDictToDeviceWithPrefetch(self):
host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x})
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertEqual(host_dataset.output_types, device_dataset.output_types)
self.assertEqual(host_dataset.output_types, iterator.output_types)
self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
self.assertEqual(host_dataset.output_classes, iterator.output_classes)
self.assertEqual(dtypes.int64, next_element["a"].dtype)
self.assertEqual([], next_element["a"].shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual({"a": i}, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
示例8: __init__
def __init__(self, dataset):
"""Creates a new iterator over the given dataset.
For example:
```python
dataset = tf.data.Dataset.range(4)
for x in Iterator(dataset):
print(x)
```
Tensors produced will be placed on the device on which this iterator object
was created.
Args:
dataset: A `tf.data.Dataset` object.
Raises:
TypeError: If `dataset` is an unsupported type.
RuntimeError: When invoked without eager execution enabled.
"""
if not context.context().device_spec.device_type:
is_remote_device = False
else:
is_remote_device = context.context().device_spec.device_type != "CPU"
if is_remote_device:
with ops.device(None):
# Let the placer figure out where to place the various functions etc.
# created by the CopyToDeviceDataset.
dataset = dataset.apply(prefetching_ops.copy_to_device(
context.context().device_name))
dataset = dataset.prefetch(1)
super(Iterator, self).__init__(dataset)
示例9: copy_to_device
def copy_to_device(target_device, source_device="/cpu:0"):
"""A transformation that copies dataset elements to the given `target_device`.
Args:
target_device: The name of a device to which elements will be copied.
source_device: The original device on which `input_dataset` will be placed.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return prefetching_ops.copy_to_device(target_device, source_device)
示例10: testCopyToDevicePingPongCPUGPU
def testCopyToDevicePingPongCPUGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with compat.forward_compatibility_horizon(2018, 8, 4):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0", source_device="/cpu:0"))
back_to_cpu_dataset = device_dataset.apply(
prefetching_ops.copy_to_device("/cpu:0", source_device="/gpu:0"))
with ops.device("/cpu:0"):
iterator = back_to_cpu_dataset.make_initializable_iterator()
next_element = iterator.get_next()
with self.cached_session() as sess:
sess.run(iterator.initializer)
for i in range(10):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
示例11: testCopyToDeviceGpuStringsAndPrefetch
def testCopyToDeviceGpuStringsAndPrefetch(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.from_tensors(["a", "b", "c"])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = device_dataset.make_initializable_iterator()
next_element = iterator.get_next()
with self.cached_session() as sess:
sess.run(iterator.initializer)
self.assertAllEqual([b"a", b"b", b"c"], sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
示例12: testCopyToDeviceGpuWithPrefetch
def testCopyToDeviceGpuWithPrefetch(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0")).prefetch(1)
with ops.device("/gpu:0"):
iterator = device_dataset.make_initializable_iterator()
next_element = iterator.get_next()
with self.cached_session() as sess:
sess.run(iterator.initializer)
for i in range(10):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
示例13: testCopyToDeviceGpuStrings
def testCopyToDeviceGpuStrings(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.from_tensors(["a", "b", "c"])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
self.assertAllEqual([b"a", b"b", b"c"], self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
示例14: testCopyToDeviceGpuWithPrefetch
def testCopyToDeviceGpuWithPrefetch(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0")).prefetch(1)
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
示例15: __init__
def __init__(self, dataset):
"""Creates a new iterator over the given dataset.
For example:
```python
dataset = tf.data.Dataset.range(4)
for x in Iterator(dataset):
print(x)
```
Tensors produced will be placed on the device on which this iterator object
was created.
Args:
dataset: A `tf.data.Dataset` object.
Raises:
TypeError: If `dataset` is an unsupported type.
RuntimeError: When invoked without eager execution enabled.
"""
# pylint: disable=protected-access
if (isinstance(dataset, prefetching_ops._PrefetchToDeviceDataset)
or (isinstance(dataset, dataset_ops.DatasetV1Adapter)
and isinstance(
dataset._dataset, prefetching_ops._PrefetchToDeviceDataset))):
raise TypeError(
"`tf.data.experimental.prefetch_to_device()` is not compatible with "
"`tf.contrib.eager.Iterator`. Use `for ... in dataset:` to iterate "
"over the dataset instead.")
# pylint: enable=protected-access
if not context.context().device_spec.device_type:
is_remote_device = False
else:
is_remote_device = context.context().device_spec.device_type != "CPU"
if is_remote_device:
with ops.device(None):
# Let the placer figure out where to place the various functions etc.
# created by the CopyToDeviceDataset.
dataset = dataset.apply(prefetching_ops.copy_to_device(
context.context().device_name))
dataset = dataset.prefetch(1)
super(Iterator, self).__init__(dataset)