本文整理汇总了Python中tensorflow.python.data.util.sparse.as_dense_types函数的典型用法代码示例。如果您正苦于以下问题:Python as_dense_types函数的具体用法?Python as_dense_types怎么用?Python as_dense_types使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了as_dense_types函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: materialize
def materialize(self, shared_name=None, container=None):
"""Materialize creates a MaterializedIndexedDataset.
IndexedDatasets can be combined through operations such as TBD. Therefore,
they are only materialized when absolutely required.
Args:
shared_name: a string for the shared name to use for the resource.
container: a string for the container to store the resource.
Returns:
A MaterializedIndexedDataset.
"""
if container is None:
container = ""
if shared_name is None:
shared_name = ""
materialized_resource = (
ged_ops.experimental_materialized_index_dataset_handle(
container=container,
shared_name=shared_name,
output_types=nest.flatten(
sparse.as_dense_types(self.output_types, self.output_classes)),
output_shapes=nest.flatten(
sparse.as_dense_types(self.output_shapes,
self.output_classes))))
with ops.colocate_with(materialized_resource):
materializer = ged_ops.experimental_indexed_dataset_materialize(
self._as_variant_tensor(), materialized_resource)
return MaterializedIndexedDataset(materialized_resource, materializer,
self.output_classes, self.output_types,
self.output_shapes)
示例2: get_next
def get_next(self, name=None):
"""See `tf.data.Iterator.get_next`."""
self._get_next_call_count += 1
if self._get_next_call_count > iterator_ops.GET_NEXT_CALL_WARNING_THRESHOLD:
warnings.warn(iterator_ops.GET_NEXT_CALL_WARNING_MESSAGE)
flat_result = []
# TODO(priyag): This will fail if the input size (typically number of
# batches) is not divisible by number of devices.
# How do we handle that more gracefully / let the user know?
for buffer_resource in self._buffering_resources:
flat_ret = gen_dataset_ops.function_buffering_resource_get_next(
buffer_resource,
output_types=data_nest.flatten(sparse.as_dense_types(
self.output_types, self.output_classes)), name=name)
ret = sparse.deserialize_sparse_tensors(
data_nest.pack_sequence_as(self.output_types, flat_ret),
self.output_types, self.output_shapes, self.output_classes)
for tensor, shape in zip(
data_nest.flatten(ret), data_nest.flatten(self.output_shapes)):
if isinstance(tensor, ops.Tensor):
tensor.set_shape(shape)
flat_result.append(ret)
return nest.pack_sequence_as(self._devices, flat_result)
示例3: get_next
def get_next(self, name=None):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
Args:
name: (Optional.) A name for the created operation.
Returns:
A nested structure of `tf.Tensor` objects.
"""
self._get_next_call_count += 1
if self._get_next_call_count > GET_NEXT_CALL_WARNING_THRESHOLD:
warnings.warn(GET_NEXT_CALL_WARNING_MESSAGE)
return sparse.deserialize_sparse_tensors(
nest.pack_sequence_as(self._output_types,
gen_dataset_ops.iterator_get_next(
self._iterator_resource,
output_types=nest.flatten(
sparse.as_dense_types(
self._output_types,
self._output_classes)),
output_shapes=nest.flatten(
sparse.as_dense_shapes(
self._output_shapes,
self._output_classes)),
name=name)), self._output_types,
self._output_shapes, self._output_classes)
示例4: _make_key_func
def _make_key_func(self, key_func, input_dataset):
"""Make wrapping Defun for key_func."""
@function.Defun(*nest.flatten(
sparse.as_dense_types(input_dataset.output_types,
input_dataset.output_classes)))
def tf_key_func(*args):
"""A wrapper for Defun that facilitates shape inference."""
# Pass in shape information from the input_dataset.
dense_shapes = sparse.as_dense_shapes(input_dataset.output_shapes,
input_dataset.output_classes)
for arg, shape in zip(args, nest.flatten(dense_shapes)):
arg.set_shape(shape)
nested_args = nest.pack_sequence_as(input_dataset.output_types, args)
nested_args = sparse.deserialize_sparse_tensors(
nested_args, input_dataset.output_types, input_dataset.output_shapes,
input_dataset.output_classes)
# pylint: disable=protected-access
if dataset_ops._should_unpack_args(nested_args):
ret = key_func(*nested_args)
# pylint: enable=protected-access
else:
ret = key_func(nested_args)
ret = ops.convert_to_tensor(ret, dtype=dtypes.int64)
if ret.dtype != dtypes.int64:
raise ValueError("`key_func` must return a single tf.int64 tensor.")
return ret
self._key_func = tf_key_func
self._key_func.add_to_graph(ops.get_default_graph())
示例5: _as_variant_tensor
def _as_variant_tensor(self):
return gen_dataset_ops.ignore_errors_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
output_shapes=nest.flatten(
sparse.as_dense_shapes(self.output_shapes, self.output_classes)),
output_types=nest.flatten(
sparse.as_dense_types(self.output_types, self.output_classes)))
示例6: get_next_as_optional
def get_next_as_optional(iterator):
"""Returns an `Optional` that contains the next value from the iterator.
If `iterator` has reached the end of the sequence, the returned `Optional`
will have no value.
Args:
iterator: A `tf.data.Iterator` object.
Returns:
An `Optional` object representing the next value from the iterator (if it
has one) or no value.
"""
# pylint: disable=protected-access
return optional_ops._OptionalImpl(
gen_dataset_ops.iterator_get_next_as_optional(
iterator._iterator_resource,
output_types=nest.flatten(
sparse.as_dense_types(iterator.output_types,
iterator.output_classes)),
output_shapes=nest.flatten(
sparse.as_dense_shapes(iterator.output_shapes,
iterator.output_classes))),
structure.Structure._from_legacy_structure(iterator.output_types,
iterator.output_shapes,
iterator.output_classes))
示例7: _as_variant_tensor
def _as_variant_tensor(self):
return gen_dataset_ops.set_stats_aggregator_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
self._stats_aggregator._resource, # pylint: disable=protected-access
output_types=nest.flatten(
sparse.as_dense_types(self.output_types, self.output_classes)),
output_shapes=nest.flatten(
sparse.as_dense_shapes(self.output_shapes, self.output_classes)))
示例8: _as_variant_tensor
def _as_variant_tensor(self):
return self._op_function(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
self._tag,
output_types=nest.flatten(
sparse.as_dense_types(self.output_types, self.output_classes)),
output_shapes=nest.flatten(
sparse.as_dense_shapes(self.output_shapes, self.output_classes)))
示例9: _as_variant_tensor
def _as_variant_tensor(self):
return gen_dataset_ops.random_dataset(
seed=self._seed,
seed2=self._seed2,
output_shapes=nest.flatten(
sparse.as_dense_shapes(self.output_shapes, self.output_classes)),
output_types=nest.flatten(
sparse.as_dense_types(self.output_types, self.output_classes)))
示例10: get
def get(self, index):
"""Get retrieves a value (or set of values) from the IndexedDataset.
Args:
index: A uint64 scalar or vector tensor with the indices to retrieve.
Returns:
A tensor containing the values corresponding to `index`.
"""
# TODO(saeta): nest.pack_sequence_as(...)
return ged_ops.experimental_indexed_dataset_get(
self._materialized_resource,
index,
output_types=nest.flatten(
sparse.as_dense_types(self._output_types, self._output_classes)),
output_shapes=nest.flatten(
sparse.as_dense_types(self._output_shapes, self._output_classes)))
示例11: _as_variant_tensor
def _as_variant_tensor(self):
# pylint: disable=protected-access
return gen_dataset_ops.directed_interleave_dataset(
self._selector_input._as_variant_tensor(),
[data_input._as_variant_tensor() for data_input in self._data_inputs],
output_shapes=nest.flatten(
sparse.as_dense_shapes(self.output_shapes, self.output_classes)),
output_types=nest.flatten(
sparse.as_dense_types(self.output_types, self.output_classes)))
示例12: _as_variant_tensor
def _as_variant_tensor(self):
return gen_dataset_ops.slide_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
window_size=self._window_size,
stride=self._stride,
output_shapes=nest.flatten(
sparse.as_dense_shapes(self.output_shapes, self.output_classes)),
output_types=nest.flatten(
sparse.as_dense_types(self.output_types, self.output_classes)))
示例13: _as_variant_tensor
def _as_variant_tensor(self):
return gen_dataset_ops.dense_to_sparse_batch_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
self._batch_size,
row_shape=dataset_ops._partial_shape_to_tensor(self._row_shape), # pylint: disable=protected-access
output_shapes=nest.flatten(
sparse.as_dense_shapes(self.output_shapes, self.output_classes)),
output_types=nest.flatten(
sparse.as_dense_types(self.output_types, self.output_classes)))
示例14: __init__
def __init__(self,
input_dataset,
one_shot,
devices,
buffer_size,
shared_name=None):
self._input_dataset = input_dataset
self._get_next_call_count = 0
self._one_shot = one_shot
if shared_name is None:
shared_name = ""
self._devices = devices
if self._one_shot:
self._input_iterator = input_dataset.make_one_shot_iterator()
else:
self._input_iterator = iterator_ops.Iterator.from_structure(
self._input_dataset.output_types, self._input_dataset.output_shapes,
shared_name, self._input_dataset.output_classes)
input_iterator_handle = self._input_iterator.string_handle()
@function.Defun(dtypes.string)
def _prefetch_fn(handle):
"""Prefetches one element from `input_iterator`."""
remote_iterator = iterator_ops.Iterator.from_string_handle(
handle, self._input_iterator.output_types,
self._input_iterator.output_shapes,
self._input_iterator.output_classes)
ret = remote_iterator.get_next()
return nest.flatten(sparse.serialize_sparse_tensors(ret))
target_device = ged_ops.experimental_iterator_get_device(
self._input_iterator._iterator_resource)
self._buffering_resources = []
for device in nest.flatten(self._devices):
with ops.device(device):
buffer_resource_handle = prefetching_ops.function_buffering_resource(
f=_prefetch_fn,
output_types=data_nest.flatten(
sparse.as_dense_types(self._input_dataset.output_types,
self._input_dataset.output_classes)),
target_device=target_device,
string_arg=input_iterator_handle,
buffer_size=buffer_size,
shared_name=shared_name)
self._buffering_resources.append(buffer_resource_handle)
if not self._one_shot:
reset_ops = []
for buffer_resource in self._buffering_resources:
reset_ops.append(
ged_ops.experimental_function_buffering_resource_reset(
buffer_resource))
with ops.control_dependencies(reset_ops):
self._initializer = self._input_iterator.make_initializer(
self._input_dataset)
示例15: __init__
def __init__(self,
dataset,
devices,
prefetch_buffer_size=1,
source_device="/cpu:0"):
self._dataset = dataset
self._devices = devices
self._source_device = source_device
self._source_device_tensor = ops.convert_to_tensor(source_device)
self._flat_output_shapes = nest.flatten(
sparse.as_dense_shapes(self._dataset.output_shapes,
self._dataset.output_classes))
self._flat_output_types = nest.flatten(
sparse.as_dense_types(self._dataset.output_types,
self._dataset.output_classes))
# Create the MultiDeviceIterator.
with ops.device(self._source_device):
self._multi_device_iterator_resource = (
gen_dataset_ops.multi_device_iterator(
devices=self._devices,
shared_name="",
container="",
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes))
# The incarnation ID is used to ensure consistency between the per-device
# iterators and the multi-device iterator.
self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(
self._dataset._as_variant_tensor(), # pylint: disable=protected-access
self._multi_device_iterator_resource)
# TODO(rohanj): Explore the possibility of the MultiDeviceIterator to
# initialize the device side of the pipeline. This would allow the
# MultiDeviceIterator to choose, for example, to move some transformations
# into the device side from its input. It might be useful in rewriting.
# Create the per device iterators.
self._device_iterators = []
i = 0
for device in self._devices:
ds = _PerDeviceGenerator(
i, self._multi_device_iterator_resource, self._incarnation_id,
self._source_device_tensor, device, self._dataset.output_shapes,
self._dataset.output_types, self._dataset.output_classes)
ds = ds.prefetch(prefetch_buffer_size)
with ops.device(device):
self._device_iterators.append(ds.make_initializable_iterator())
i += 1
device_iterator_initializers = [
iterator.initializer for iterator in self._device_iterators
]
self._initializer = control_flow_ops.group(*device_iterator_initializers)