本文整理匯總了Python中tensorflow.compat.v1.FixedLenSequenceFeature方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.FixedLenSequenceFeature方法的具體用法?Python v1.FixedLenSequenceFeature怎麽用?Python v1.FixedLenSequenceFeature使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.FixedLenSequenceFeature方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: example_reading_spec
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import FixedLenSequenceFeature [as 別名]
def example_reading_spec(self):
data_fields, data_items_to_decoders = (
super(ImageVqav2Tokens10kLabels3k, self).example_reading_spec())
data_fields["image/image_id"] = tf.FixedLenFeature((), tf.int64)
data_fields["image/question_id"] = tf.FixedLenFeature((), tf.int64)
data_fields["image/question"] = tf.FixedLenSequenceFeature(
(), tf.int64, allow_missing=True)
data_fields["image/answer"] = tf.FixedLenSequenceFeature(
(), tf.int64, allow_missing=True)
slim = contrib.slim()
data_items_to_decoders["question"] = slim.tfexample_decoder.Tensor(
"image/question")
data_items_to_decoders["targets"] = slim.tfexample_decoder.Tensor(
"image/answer")
return data_fields, data_items_to_decoders
示例2: parse_example
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import FixedLenSequenceFeature [as 別名]
def parse_example(serialized_example):
"""Parse example."""
features = tf.parse_single_example(
serialized_example,
features={
"question":
tf.FixedLenFeature([], tf.string),
"context":
tf.FixedLenSequenceFeature(
dtype=tf.string, shape=[], allow_missing=True),
"long_answer_indices":
tf.FixedLenSequenceFeature(
dtype=tf.int64, shape=[], allow_missing=True)
})
features["question"] = features["question"]
features["context"] = features["context"]
features["long_answer_indices"] = tf.to_int32(features["long_answer_indices"])
return features
示例3: parse_and_preprocess
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import FixedLenSequenceFeature [as 別名]
def parse_and_preprocess(self, value, batch_position):
"""Parse an TFRecord."""
del batch_position
assert self.supports_datasets()
context_features = {
'labels': tf.VarLenFeature(dtype=tf.int64),
'input_length': tf.FixedLenFeature([], dtype=tf.int64),
'label_length': tf.FixedLenFeature([], dtype=tf.int64),
}
sequence_features = {
'features': tf.FixedLenSequenceFeature([161], dtype=tf.float32)
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=value,
context_features=context_features,
sequence_features=sequence_features,
)
return [
# Input
tf.expand_dims(sequence_parsed['features'], axis=2),
# Label
tf.cast(
tf.reshape(
tf.sparse_tensor_to_dense(context_parsed['labels']), [-1]),
dtype=tf.int32),
# Input length
tf.cast(
tf.reshape(context_parsed['input_length'], [1]),
dtype=tf.int32),
# Label length
tf.cast(
tf.reshape(context_parsed['label_length'], [1]),
dtype=tf.int32),
]
示例4: _get_feature
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import FixedLenSequenceFeature [as 別名]
def _get_feature(tensor_spec,
decode_images = True):
"""Get FixedLenfeature or FixedLenSequenceFeature for a tensor spec."""
varlen_default_value = getattr(tensor_spec, 'varlen_default_value', None)
if getattr(tensor_spec, 'is_sequence', False):
cls = tf.FixedLenSequenceFeature
elif varlen_default_value is not None:
cls = tf.VarLenFeature
else:
cls = tf.FixedLenFeature
if decode_images and is_encoded_image_spec(tensor_spec):
if varlen_default_value is not None:
# Contains a variable length list of images.
return cls(tf.string)
elif len(tensor_spec.shape) > 3:
# Contains a fixed length list of images.
return cls((tensor_spec.shape[0]), tf.string)
else:
return cls((), tf.string)
elif varlen_default_value is not None:
return cls(tensor_spec.dtype)
else:
return cls(tensor_spec.shape, tensor_spec.dtype)
示例5: tensorspec_to_feature_dict
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import FixedLenSequenceFeature [as 別名]
def tensorspec_to_feature_dict(tensor_spec_struct, decode_images = True):
"""Converts collection of tensorspecs to a dict of FixedLenFeatures specs.
Args:
tensor_spec_struct: A (possibly nested) collection of TensorSpec.
decode_images: If True, TensorSpec with data_format 'JPEG' or 'PNG' are
interpreted as encoded image strings.
Returns:
features: A dict mapping feature keys to FixedLenFeature and
FixedLenSequenceFeature values.
Raises:
ValueError: If duplicate keys are found in the TensorSpecs.
"""
assert_valid_spec_structure(tensor_spec_struct)
features = {}
tensor_spec_dict = {}
# Note it is valid to iterate over all tensors since
# assert_valid_spec_structure will ensure that non unique tensor_spec names
# have the identical properties.
flat_tensor_spec_struct = flatten_spec_structure(tensor_spec_struct)
for key, tensor_spec in flat_tensor_spec_struct.items():
if tensor_spec.name is None:
# Do not attempt to parse TensorSpecs whose name attribute is not set.
logging.info(
'TensorSpec name attribute for %s is not set; will not parse this '
'Tensor from TFExamples.', key)
continue
features[tensor_spec.name] = _get_feature(tensor_spec, decode_images)
tensor_spec_dict[tensor_spec.name] = tensor_spec
return features, tensor_spec_dict
示例6: get_sequence_features
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import FixedLenSequenceFeature [as 別名]
def get_sequence_features(use_segment_ids, use_foreign_key_features,
string_alignment_features):
"""Gets sequence features (i.e., for input/output sequence to the model)."""
keys_to_sequence_features = {
constants.SOURCE_WORDPIECES_KEY:
tf.FixedLenSequenceFeature([], dtype=tf.int64),
constants.TARGET_ACTION_TYPES_KEY:
tf.FixedLenSequenceFeature([], dtype=tf.int64),
constants.TARGET_ACTION_IDS_KEY:
tf.FixedLenSequenceFeature([], dtype=tf.int64),
constants.COPIABLE_INPUT_KEY:
tf.FixedLenSequenceFeature([], dtype=tf.int64)
}
if use_segment_ids:
keys_to_sequence_features[
constants.SEGMENT_ID_KEY] = tf.FixedLenSequenceFeature([],
dtype=tf.int64)
if use_foreign_key_features:
keys_to_sequence_features[
constants.FOREIGN_KEY_KEY] = tf.FixedLenSequenceFeature([],
dtype=tf.int64)
if string_alignment_features:
keys_to_sequence_features[
constants.ALIGNED_KEY] = tf.FixedLenSequenceFeature([], dtype=tf.int64)
return keys_to_sequence_features
示例7: __init__
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import FixedLenSequenceFeature [as 別名]
def __init__(self, keys_to_context_features, keys_to_sequence_features,
items_to_handlers):
"""Constructs the decoder.
Args:
keys_to_context_features: A dictionary from TF-SequenceExample context
keys to either tf.VarLenFeature or tf.FixedLenFeature instances.
See tensorflow's parsing_ops.py.
keys_to_sequence_features: A dictionary from TF-SequenceExample sequence
keys to either tf.VarLenFeature or tf.FixedLenSequenceFeature instances.
items_to_handlers: A dictionary from items (strings) to ItemHandler
instances. Note that the ItemHandler's are provided the keys that they
use to return the final item Tensors.
Raises:
ValueError: If the same key is present for context features and sequence
features.
"""
unique_keys = set()
unique_keys.update(keys_to_context_features)
unique_keys.update(keys_to_sequence_features)
if len(unique_keys) != (
len(keys_to_context_features) + len(keys_to_sequence_features)):
# This situation is ambiguous in the decoder's keys_to_tensors variable.
raise ValueError('Context and sequence keys are not unique. \n'
' Context keys: %s \n Sequence keys: %s' %
(list(keys_to_context_features.keys()),
list(keys_to_sequence_features.keys())))
self._keys_to_context_features = keys_to_context_features
self._keys_to_sequence_features = keys_to_sequence_features
self._items_to_handlers = items_to_handlers
示例8: __init__
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import FixedLenSequenceFeature [as 別名]
def __init__(self,
shape,
dtype,
name = None,
is_optional = None,
is_sequence = False,
is_extracted = False,
data_format = None,
dataset_key = None,
varlen_default_value = None):
"""Creates a TensorSpec.
Args:
shape: Value convertible to `tf.TensorShape`. The shape of the tensor.
dtype: Value convertible to `tf.DType`. The type of the tensor values.
name: Optional name for the Tensor.
is_optional: If True, the tensor is optional, required otherwise.
is_sequence: If True, interpret as tf.FixedLenSequenceFeature instead of
tf.FixedLenFeature.
is_extracted: If True, implies this spec was inferred from a Tensor or
np.array.
data_format: Optional name of the data_format, e.g. jpeg, png.
dataset_key: Optional key name of which dataset to pull this tensor from.
varlen_default_value: Optional if a value other than None is provided
the spec is assumed to be a VarLenFeature with the default value in the
corrensponding data type. When using a VarLenFeature, the 0th index in
the shape corresponds to the length that the feature will be padded or
clipped to. When padded, the varlen_default_value will be used for
padding. When clipped, some data might be ignored.
Raises:
TypeError: If shape is not convertible to a `tf.TensorShape`, or dtype is
not convertible to a `tf.DType`.
"""
super(ExtendedTensorSpec, self).__init__(
shape=shape, dtype=dtype, name=name)
if is_optional is None:
is_optional = False
self._is_optional = is_optional
self._is_sequence = is_sequence
self._is_extracted = is_extracted
self._data_format = data_format
if dataset_key is None:
dataset_key = ''
self._dataset_key = dataset_key
self._varlen_default_value = varlen_default_value
if self._varlen_default_value is not None:
if data_format is None and len(self.shape) != 1:
raise ValueError(
('VarLenFeatures are only supported for shapes of rank 1 ({}) when '
'not using an image spec.').format(shape))
if data_format is not None and len(self.shape) != 4:
raise ValueError(
('VarLenFeatures are only supported for shapes of rank 4 ({}) when '
'using an image spec.').format(shape))
示例9: file_based_input_fn_builder
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import FixedLenSequenceFeature [as 別名]
def file_based_input_fn_builder(input_file, seq_length, fewshot_num_classes,
fewshot_num_examples_per_class, drop_remainder):
"""Creates an `input_fn` closure to be passed to tf.Estimator."""
# Add one for the 'query' example.
fewshot_batch = fewshot_num_classes * fewshot_num_examples_per_class + 1
name_to_features = {
"input_ids": tf.FixedLenSequenceFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenSequenceFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenSequenceFeature([seq_length], tf.int64),
"guid": tf.FixedLenSequenceFeature([], tf.string),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
_, example = tf.parse_single_sequence_example(
record, sequence_features=name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
shape = tf.shape(example[name])
# sequence_examples come with dynamic/unknown dimension which we reshape
# to explicit dimension for the fewshot "batch" size.
example[name] = tf.reshape(t, tf.concat([[fewshot_batch], shape[1:]], 0))
return example
def input_fn(params):
"""The actual input function."""
d = tf.data.TFRecordDataset(input_file)
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=params["batch_size"],
drop_remainder=drop_remainder))
return d
return input_fn