本文整理汇总了Python中tensorflow.compat.v1.FixedLenFeature方法的典型用法代码示例。如果您正苦于以下问题:Python v1.FixedLenFeature方法的具体用法?Python v1.FixedLenFeature怎么用?Python v1.FixedLenFeature使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.FixedLenFeature方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: example_reading_spec
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import FixedLenFeature [as 别名]
def example_reading_spec(self):
extra_data_fields, extra_data_items_to_decoders = self.extra_reading_spec
data_fields = {
"image/encoded": tf.FixedLenFeature((), tf.string),
"image/format": tf.FixedLenFeature((), tf.string),
}
data_fields.update(extra_data_fields)
data_items_to_decoders = {
"frame":
contrib.slim().tfexample_decoder.Image(
image_key="image/encoded",
format_key="image/format",
shape=[self.frame_height, self.frame_width, self.num_channels],
channels=self.num_channels),
}
data_items_to_decoders.update(extra_data_items_to_decoders)
return data_fields, data_items_to_decoders
示例2: example_reading_spec
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import FixedLenFeature [as 别名]
def example_reading_spec(self):
data_fields, data_items_to_decoders = (
super(ImageVqav2Tokens10kLabels3k, self).example_reading_spec())
data_fields["image/image_id"] = tf.FixedLenFeature((), tf.int64)
data_fields["image/question_id"] = tf.FixedLenFeature((), tf.int64)
data_fields["image/question"] = tf.FixedLenSequenceFeature(
(), tf.int64, allow_missing=True)
data_fields["image/answer"] = tf.FixedLenSequenceFeature(
(), tf.int64, allow_missing=True)
slim = contrib.slim()
data_items_to_decoders["question"] = slim.tfexample_decoder.Tensor(
"image/question")
data_items_to_decoders["targets"] = slim.tfexample_decoder.Tensor(
"image/answer")
return data_fields, data_items_to_decoders
示例3: example_reading_spec
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import FixedLenFeature [as 别名]
def example_reading_spec(self):
"""Return a mix of env and video data fields and decoders."""
slim = contrib.slim()
video_fields, video_decoders = (
video_utils.VideoProblem.example_reading_spec(self))
env_fields, env_decoders = (
gym_env_problem.GymEnvProblem.example_reading_spec(self))
# Remove raw observations field since we want to capture them as videos.
env_fields.pop(env_problem.OBSERVATION_FIELD)
env_decoders.pop(env_problem.OBSERVATION_FIELD)
# Add frame number spec and decoder.
env_fields[_FRAME_NUMBER_FIELD] = tf.FixedLenFeature((1,), tf.int64)
env_decoders[_FRAME_NUMBER_FIELD] = slim.tfexample_decoder.Tensor(
_FRAME_NUMBER_FIELD)
# Add video fields and decoders
env_fields.update(video_fields)
env_decoders.update(video_decoders)
return env_fields, env_decoders
示例4: serving_input_receiver_fn
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import FixedLenFeature [as 别名]
def serving_input_receiver_fn():
"""Creates an input function for serving."""
seq_len = FLAGS.max_seq_length
serialized_example = tf.placeholder(
dtype=tf.string, shape=[None], name="serialized_example")
features = {
"input_ids": tf.FixedLenFeature([seq_len], dtype=tf.int64),
"input_mask": tf.FixedLenFeature([seq_len], dtype=tf.int64),
"segment_ids": tf.FixedLenFeature([seq_len], dtype=tf.int64),
}
feature_map = tf.parse_example(serialized_example, features=features)
feature_map["is_real_example"] = tf.constant(1, dtype=tf.int32)
feature_map["label_ids"] = tf.constant(0, dtype=tf.int32)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in feature_map.keys():
t = feature_map[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
feature_map[name] = t
return tf.estimator.export.ServingInputReceiver(
features=feature_map, receiver_tensors=serialized_example)
示例5: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import FixedLenFeature [as 别名]
def __init__(self, include_mask=False, regenerate_source_id=False):
self._include_mask = include_mask
self._regenerate_source_id = regenerate_source_id
self._keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string),
'image/source_id': tf.FixedLenFeature((), tf.string, ''),
'image/height': tf.FixedLenFeature((), tf.int64, -1),
'image/width': tf.FixedLenFeature((), tf.int64, -1),
'image/object/bbox/xmin': tf.VarLenFeature(tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(tf.float32),
'image/object/class/label': tf.VarLenFeature(tf.int64),
'image/object/area': tf.VarLenFeature(tf.float32),
'image/object/is_crowd': tf.VarLenFeature(tf.int64),
}
if include_mask:
self._keys_to_features.update({
'image/object/mask':
tf.VarLenFeature(tf.string),
})
示例6: test_tensorspec_to_feature_dict
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import FixedLenFeature [as 别名]
def test_tensorspec_to_feature_dict(self):
features, tensor_spec_dict = utils.tensorspec_to_feature_dict(
mock_nested_subset_spec, decode_images=True)
self.assertDictEqual(tensor_spec_dict, {
'images': T1,
'actions': T2,
})
self.assertDictEqual(
features, {
'images': tf.FixedLenFeature((), tf.string),
'actions': tf.FixedLenFeature(T2.shape, T2.dtype),
})
features, tensor_spec_dict = utils.tensorspec_to_feature_dict(
mock_nested_subset_spec, decode_images=False)
self.assertDictEqual(tensor_spec_dict, {
'images': T1,
'actions': T2,
})
self.assertDictEqual(
features, {
'images': tf.FixedLenFeature(T1.shape, T1.dtype),
'actions': tf.FixedLenFeature(T2.shape, T2.dtype),
})
示例7: _get_feature
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import FixedLenFeature [as 别名]
def _get_feature(tensor_spec,
decode_images = True):
"""Get FixedLenfeature or FixedLenSequenceFeature for a tensor spec."""
varlen_default_value = getattr(tensor_spec, 'varlen_default_value', None)
if getattr(tensor_spec, 'is_sequence', False):
cls = tf.FixedLenSequenceFeature
elif varlen_default_value is not None:
cls = tf.VarLenFeature
else:
cls = tf.FixedLenFeature
if decode_images and is_encoded_image_spec(tensor_spec):
if varlen_default_value is not None:
# Contains a variable length list of images.
return cls(tf.string)
elif len(tensor_spec.shape) > 3:
# Contains a fixed length list of images.
return cls((tensor_spec.shape[0]), tf.string)
else:
return cls((), tf.string)
elif varlen_default_value is not None:
return cls(tensor_spec.dtype)
else:
return cls(tensor_spec.shape, tensor_spec.dtype)
示例8: main
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import FixedLenFeature [as 别名]
def main(_):
# Define schema.
raw_metadata = dataset_metadata.DatasetMetadata(
dataset_schema.from_feature_spec({
'text': tf.FixedLenFeature([], tf.string),
'language_code': tf.FixedLenFeature([], tf.string),
}))
# Add in padding tokens.
reserved_tokens = FLAGS.reserved_tokens
if FLAGS.num_pad_tokens:
padded_tokens = ['<pad>']
padded_tokens += ['<pad%d>' % i for i in range(1, FLAGS.num_pad_tokens)]
reserved_tokens = padded_tokens + reserved_tokens
params = learner.Params(FLAGS.upper_thresh, FLAGS.lower_thresh,
FLAGS.num_iterations, FLAGS.max_input_tokens,
FLAGS.max_token_length, FLAGS.max_unique_chars,
FLAGS.vocab_size, FLAGS.slack_ratio,
FLAGS.include_joiner_token, FLAGS.joiner,
reserved_tokens)
generate_vocab(FLAGS.data_file, FLAGS.vocab_file, FLAGS.metrics_file,
raw_metadata, params)
示例9: parse_example
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import FixedLenFeature [as 别名]
def parse_example(serialized_example):
"""Parse example."""
features = tf.parse_single_example(
serialized_example,
features={
"question":
tf.FixedLenFeature([], tf.string),
"context":
tf.FixedLenSequenceFeature(
dtype=tf.string, shape=[], allow_missing=True),
"long_answer_indices":
tf.FixedLenSequenceFeature(
dtype=tf.int64, shape=[], allow_missing=True)
})
features["question"] = features["question"]
features["context"] = features["context"]
features["long_answer_indices"] = tf.to_int32(features["long_answer_indices"])
return features
示例10: _parse_fn
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import FixedLenFeature [as 别名]
def _parse_fn(self, value):
"""Parses an image and its label from a serialized TFExample.
Args:
value: serialized string containing an TFExample.
Returns:
Returns a tuple of (image, label) from the TFExample.
"""
if FLAGS.get_flag_value('pseudo_label_key', None):
self.ORIGINAL_LABEL_KEY = FLAGS.get_flag_value(
'original_label_key', None)
assert self.ORIGINAL_LABEL_KEY is not None, (
'You must set original_label_key for pseudo labeling.')
#Replace original label_key with pseudo_label_key.
self.LABEL_KEY = FLAGS.get_flag_value('pseudo_label_key', None)
self.FEATURE_MAP.update({
self.LABEL_KEY: tf.FixedLenFeature(shape=[], dtype=tf.int64),
self.ORIGINAL_LABEL_KEY: tf.FixedLenFeature(
shape=[], dtype=tf.int64),
self.FLAG_KEY: tf.FixedLenFeature(shape=[], dtype=tf.int64),
})
return tf.parse_single_example(value, self.FEATURE_MAP)
示例11: __call__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import FixedLenFeature [as 别名]
def __call__(self, example_string):
"""Processes a single example string.
Extracts and processes the feature, and ignores the label.
Args:
example_string: str, an Example protocol buffer.
Returns:
feat: The feature tensor.
"""
feat = tf.parse_single_example(
example_string,
features={
'image/embedding':
tf.FixedLenFeature([self.feat_len], dtype=tf.float32),
'image/class/label':
tf.FixedLenFeature([], tf.int64)
})['image/embedding']
return feat
示例12: parse_and_preprocess
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import FixedLenFeature [as 别名]
def parse_and_preprocess(self, value, batch_position):
"""Parse an TFRecord."""
del batch_position
assert self.supports_datasets()
context_features = {
'labels': tf.VarLenFeature(dtype=tf.int64),
'input_length': tf.FixedLenFeature([], dtype=tf.int64),
'label_length': tf.FixedLenFeature([], dtype=tf.int64),
}
sequence_features = {
'features': tf.FixedLenSequenceFeature([161], dtype=tf.float32)
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=value,
context_features=context_features,
sequence_features=sequence_features,
)
return [
# Input
tf.expand_dims(sequence_parsed['features'], axis=2),
# Label
tf.cast(
tf.reshape(
tf.sparse_tensor_to_dense(context_parsed['labels']), [-1]),
dtype=tf.int32),
# Input length
tf.cast(
tf.reshape(context_parsed['input_length'], [1]),
dtype=tf.int32),
# Label length
tf.cast(
tf.reshape(context_parsed['label_length'], [1]),
dtype=tf.int32),
]
示例13: example_reading_spec
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import FixedLenFeature [as 别名]
def example_reading_spec(self):
data_fields, data_items_to_decoders = (
super(BabiQa, self).example_reading_spec())
data_fields["targets"] = tf.FixedLenFeature([1], tf.int64)
return (data_fields, data_items_to_decoders)
示例14: _references_content
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import FixedLenFeature [as 别名]
def _references_content(ref_files):
"""Returns dict<str ref_url, str ref_content>."""
example_spec = {
"url": tf.FixedLenFeature([], tf.string),
"content": tf.FixedLenFeature([], tf.string),
}
data = {}
for ex in generator_utils.tfrecord_iterator(
ref_files, gzipped=True, example_spec=example_spec):
data[ex["url"]] = text_encoder.to_unicode(ex["content"])
return data
示例15: extra_reading_spec
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import FixedLenFeature [as 别名]
def extra_reading_spec(self):
"""Additional data fields to store on disk and their decoders."""
data_fields = {
"frame_number": tf.FixedLenFeature([1], tf.int64),
"action": tf.FixedLenFeature([4], tf.float32),
}
decoders = {
"frame_number":
contrib.slim().tfexample_decoder.Tensor(tensor_key="frame_number"),
"action":
contrib.slim().tfexample_decoder.Tensor(tensor_key="action"),
}
return data_fields, decoders