本文整理汇总了Python中tensorflow.compat.v1.VarLenFeature方法的典型用法代码示例。如果您正苦于以下问题:Python v1.VarLenFeature方法的具体用法?Python v1.VarLenFeature怎么用?Python v1.VarLenFeature使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.VarLenFeature方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import VarLenFeature [as 别名]
def __init__(self, include_mask=False, regenerate_source_id=False):
self._include_mask = include_mask
self._regenerate_source_id = regenerate_source_id
self._keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string),
'image/source_id': tf.FixedLenFeature((), tf.string, ''),
'image/height': tf.FixedLenFeature((), tf.int64, -1),
'image/width': tf.FixedLenFeature((), tf.int64, -1),
'image/object/bbox/xmin': tf.VarLenFeature(tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(tf.float32),
'image/object/class/label': tf.VarLenFeature(tf.int64),
'image/object/area': tf.VarLenFeature(tf.float32),
'image/object/is_crowd': tf.VarLenFeature(tf.int64),
}
if include_mask:
self._keys_to_features.update({
'image/object/mask':
tf.VarLenFeature(tf.string),
})
示例2: test_pad_or_clip_tensor_to_spec_shape
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import VarLenFeature [as 别名]
def test_pad_or_clip_tensor_to_spec_shape(self, input_data, expected_output):
varlen_spec = utils.ExtendedTensorSpec(
shape=(3,), dtype=tf.int64, name='varlen', varlen_default_value=3.0)
tmp_dir = self.create_tempdir().full_path
file_path_padded_to_size_two = os.path.join(tmp_dir, 'size_two.tfrecord')
self._write_test_examples(input_data, file_path_padded_to_size_two)
dataset = tf.data.TFRecordDataset(
filenames=tf.constant([file_path_padded_to_size_two]))
dataset = dataset.batch(len(input_data), drop_remainder=True)
def parse_fn(example):
return tf.parse_example(example, {'varlen': tf.VarLenFeature(tf.int64)})
dataset = dataset.map(parse_fn)
sparse_tensors = dataset.make_one_shot_iterator().get_next()['varlen']
default_value = tf.cast(
tf.constant(varlen_spec.varlen_default_value), dtype=varlen_spec.dtype)
tensor = utils.pad_or_clip_tensor_to_spec_shape(
tf.sparse.to_dense(sparse_tensors, default_value), varlen_spec)
with self.session() as sess:
np_tensor = sess.run(tensor)
self.assertAllEqual(np_tensor, np.array(expected_output))
示例3: _get_feature
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import VarLenFeature [as 别名]
def _get_feature(tensor_spec,
decode_images = True):
"""Get FixedLenfeature or FixedLenSequenceFeature for a tensor spec."""
varlen_default_value = getattr(tensor_spec, 'varlen_default_value', None)
if getattr(tensor_spec, 'is_sequence', False):
cls = tf.FixedLenSequenceFeature
elif varlen_default_value is not None:
cls = tf.VarLenFeature
else:
cls = tf.FixedLenFeature
if decode_images and is_encoded_image_spec(tensor_spec):
if varlen_default_value is not None:
# Contains a variable length list of images.
return cls(tf.string)
elif len(tensor_spec.shape) > 3:
# Contains a fixed length list of images.
return cls((tensor_spec.shape[0]), tf.string)
else:
return cls((), tf.string)
elif varlen_default_value is not None:
return cls(tensor_spec.dtype)
else:
return cls(tensor_spec.shape, tensor_spec.dtype)
示例4: parse_and_preprocess
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import VarLenFeature [as 别名]
def parse_and_preprocess(self, value, batch_position):
"""Parse an TFRecord."""
del batch_position
assert self.supports_datasets()
context_features = {
'labels': tf.VarLenFeature(dtype=tf.int64),
'input_length': tf.FixedLenFeature([], dtype=tf.int64),
'label_length': tf.FixedLenFeature([], dtype=tf.int64),
}
sequence_features = {
'features': tf.FixedLenSequenceFeature([161], dtype=tf.float32)
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=value,
context_features=context_features,
sequence_features=sequence_features,
)
return [
# Input
tf.expand_dims(sequence_parsed['features'], axis=2),
# Label
tf.cast(
tf.reshape(
tf.sparse_tensor_to_dense(context_parsed['labels']), [-1]),
dtype=tf.int32),
# Input length
tf.cast(
tf.reshape(context_parsed['input_length'], [1]),
dtype=tf.int32),
# Label length
tf.cast(
tf.reshape(context_parsed['label_length'], [1]),
dtype=tf.int32),
]
示例5: example_reading_spec
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import VarLenFeature [as 别名]
def example_reading_spec(self):
data_fields = {
"inputs": tf.VarLenFeature(tf.int64),
"targets": tf.VarLenFeature(tf.int64),
"floats": tf.VarLenFeature(tf.float32),
}
data_items_to_decoders = None
return (data_fields, data_items_to_decoders)
示例6: example_reading_spec
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import VarLenFeature [as 别名]
def example_reading_spec(self):
label_key = "image/unpadded_label"
data_fields, data_items_to_decoders = (
super(ImageFSNS, self).example_reading_spec())
data_fields[label_key] = tf.VarLenFeature(tf.int64)
data_items_to_decoders["targets"] = contrib.slim().tfexample_decoder.Tensor(
label_key)
return data_fields, data_items_to_decoders
示例7: example_reading_spec
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import VarLenFeature [as 别名]
def example_reading_spec(self):
data_fields = {"dist_targets": tf.VarLenFeature(tf.int64)}
if self.has_inputs:
data_fields["inputs"] = tf.VarLenFeature(tf.int64)
# hack: ignoring true targets and putting dist_targets in targets
data_items_to_decoders = {
"inputs": contrib.slim().tfexample_decoder.Tensor("inputs"),
"targets": contrib.slim().tfexample_decoder.Tensor("dist_targets"),
}
return (data_fields, data_items_to_decoders)
示例8: example_reading_spec
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import VarLenFeature [as 别名]
def example_reading_spec(self):
data_fields = {
"inputs": tf.VarLenFeature(tf.int64),
"targets": tf.VarLenFeature(tf.int64),
"section_boundaries": tf.VarLenFeature(tf.int64),
}
data_items_to_decoders = None
return (data_fields, data_items_to_decoders)
示例9: example_reading_spec
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import VarLenFeature [as 别名]
def example_reading_spec(self):
data_fields = {
"inputs": tf.VarLenFeature(tf.int64),
"audio/sample_count": tf.FixedLenFeature((), tf.int64),
"audio/sample_width": tf.FixedLenFeature((), tf.int64),
"targets": tf.VarLenFeature(tf.int64),
}
return data_fields, None
示例10: example_reading_spec
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import VarLenFeature [as 别名]
def example_reading_spec(self):
data_fields = {
"inputs": tf.VarLenFeature(tf.float32),
"targets": tf.VarLenFeature(tf.float32),
}
data_items_to_decoders = None
return (data_fields, data_items_to_decoders)
示例11: example_reading_spec
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import VarLenFeature [as 别名]
def example_reading_spec(self):
label_key = "image/class/label"
data_fields, data_items_to_decoders = (
super(Image2TextProblem, self).example_reading_spec())
data_fields[label_key] = tf.VarLenFeature(tf.int64)
data_items_to_decoders["targets"] = contrib.slim().tfexample_decoder.Tensor(
label_key)
return data_fields, data_items_to_decoders
示例12: example_reading_spec
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import VarLenFeature [as 别名]
def example_reading_spec(self):
"""Define how data is serialized to file and read back.
Returns:
data_fields: A dictionary mapping data names to its feature type.
data_items_to_decoders: A dictionary mapping data names to TF Example
decoders, to be used when reading back TF examples from disk.
"""
data_fields = {
"inputs": tf.VarLenFeature(tf.int64),
"targets": tf.VarLenFeature(tf.int64)
}
data_items_to_decoders = None
return (data_fields, data_items_to_decoders)
示例13: example_reading_spec
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import VarLenFeature [as 别名]
def example_reading_spec(self):
data_fields = {
"waveforms": tf.VarLenFeature(tf.float32),
"targets": tf.VarLenFeature(tf.int64),
}
data_items_to_decoders = None
return data_fields, data_items_to_decoders
示例14: example_reading_spec
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import VarLenFeature [as 别名]
def example_reading_spec(self):
data_fields, data_items_to_decoders = (super(QuestionAndContext2TextProblem,
self)
.example_reading_spec())
data_fields["context"] = tf.VarLenFeature(tf.int64)
return (data_fields, data_items_to_decoders)
示例15: example_reading_spec
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import VarLenFeature [as 别名]
def example_reading_spec(self):
data_fields = {
"inputs": tf.VarLenFeature(tf.int64),
"targets": tf.VarLenFeature(tf.float32),
}
data_items_to_decoders = None
return (data_fields, data_items_to_decoders)