本文整理汇总了Python中tensorflow.parse_single_example函数的典型用法代码示例。如果您正苦于以下问题:Python parse_single_example函数的具体用法?Python parse_single_example怎么用?Python parse_single_example使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了parse_single_example函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: read_and_decode
def read_and_decode(filename, is_train):
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
_,serialized_example = reader.read(filename_queue)
if is_train == True:
features = tf.parse_single_example(serialized_example,
features={
"hat_label": tf.FixedLenFeature([], tf.int64),
"hair_label": tf.FixedLenFeature([], tf.int64),
"gender_label": tf.FixedLenFeature([], tf.int64),
"top_label": tf.FixedLenFeature([], tf.int64),
"down_label": tf.FixedLenFeature([], tf.int64),
"shoes_label": tf.FixedLenFeature([], tf.int64),
"bag_label": tf.FixedLenFeature([], tf.int64),
"img_raw": tf.FixedLenFeature([], tf.string),
})
img = tf.decode_raw(features['img_raw'], tf.uint8)
img = tf.reshape(img, [128, 256, 3])
#image = Image.frombytes('RGB', (224, 224), img[0])
img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
#print(type(img))
#img = np.asarray(img, dtype=np.uint8)
#print(type(img))
#tl.visualize.frame(I=img, second=5, saveable=False, name='frame', fig_idx=12836)
hat_label = tf.cast(features['hat_label'], tf.int32)
hair_label = tf.cast(features['hair_label'], tf.int32)
gender_label = tf.cast(features['gender_label'], tf.int32)
top_label = tf.cast(features['top_label'], tf.int32)
down_label = tf.cast(features['down_label'], tf.int32)
shoes_label = tf.cast(features['shoes_label'], tf.int32)
bag_label = tf.cast(features['bag_label'], tf.int32)
labels = {"hat":hat_label, "hair":hair_label, "gender":gender_label,
"top":top_label, "down":down_label, "shoes":shoes_label,
"bag":bag_label}
return img, labels
else:
features = tf.parse_single_example(serialized_example,
features={
"img_raw": tf.FixedLenFeature([], tf.string),
})
img = tf.decode_raw(features['img_raw'], tf.uint8)
img = tf.reshape(img, [128, 256, 3])
img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
#tl.visualize.frame(I=img, second=5, saveable=False, name='frame', fig_idx=12833)
return img
示例2: parse_labeled_example
def parse_labeled_example(
example_proto, view_index, preprocess_fn, image_attr_keys, label_attr_keys):
"""Parses a labeled test example from a specified view.
Args:
example_proto: A scalar string Tensor.
view_index: Int, index on which view to parse.
preprocess_fn: A function with the signature (raw_images, is_training) ->
preprocessed_images, where raw_images is a 4-D float32 image `Tensor`
of raw images, is_training is a Boolean describing if we're in training,
and preprocessed_images is a 4-D float32 image `Tensor` holding
preprocessed images.
image_attr_keys: List of Strings, names for image keys.
label_attr_keys: List of Strings, names for label attributes.
Returns:
data: A tuple of images, attributes and tasks `Tensors`.
"""
features = {}
for attr_key in image_attr_keys:
features[attr_key] = tf.FixedLenFeature((), tf.string)
for attr_key in label_attr_keys:
features[attr_key] = tf.FixedLenFeature((), tf.int64)
parsed_features = tf.parse_single_example(example_proto, features)
image_only_keys = [i for i in image_attr_keys if 'image' in i]
view_image_key = image_only_keys[view_index]
image = preprocessing.decode_image(parsed_features[view_image_key])
preprocessed = preprocess_fn(image, is_training=False)
attributes = [parsed_features[k] for k in label_attr_keys]
task = parsed_features['task']
return tuple([preprocessed] + attributes + [task])
示例3: read_and_decode
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'vector': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
})
# features = tf.parse_single_example(serialized_example, dense_keys=['vector', 'label'], dense_types=[tf.string, tf.int64])
# Convert from a scalar string tensor (whose single string has
# length tf_model.IMAGE_PIXELS) to a uint8 tensor with shape
# [tf_model.IMAGE_PIXELS].
image = tf.decode_raw(features['vector'], tf.float32)
image.set_shape([FEATURE_DIMENSIONALITY])
if FLAGS.transpose_input:
image = tf.reshape(image, FEATURE_INPUT_SHAPE)
image = tf.transpose(image, perm=[0,2,1])
image = tf.reshape(image, [-1])
# print("Image shape is %s" %(image.shape))
# OPTIONAL: Could reshape into a 28x28 image and apply distortions
# here. Since we are not applying any distortions in this
# example, and the next step expects the image to be flattened
# into a vector, we don't bother.
# Convert from [0, 255] -> [-0.5, 0.5] floats.
# image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
# Convert label from a scalar uint8 tensor to an int32 scalar.
label = tf.cast(features['label'], tf.int32)
return image, label
示例4: read_and_decode
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
dense_keys=['image_raw', 'label'],
# Defaults are not specified since both keys are required.
dense_types=[tf.string, tf.int64])
# Convert from a scalar string tensor (whose single string has
# length mnist.IMAGE_PIXELS) to a uint8 tensor with shape
# [mnist.IMAGE_PIXELS].
image = tf.decode_raw(features['image_raw'], tf.uint8)
image.set_shape([mnist.IMAGE_PIXELS])
# OPTIONAL: Could reshape into a 28x28 image and apply distortions
# here. Since we are not applying any distortions in this
# example, and the next step expects the image to be flattened
# into a vector, we don't bother.
# Convert from [0, 255] -> [-0.5, 0.5] floats.
image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
# Convert label from a scalar uint8 tensor to an int32 scalar.
label = tf.cast(features['label'], tf.int32)
return image, label
示例5: record_parser_fn
def record_parser_fn(value, is_training):
"""Parse an image record from `value`."""
keys_to_features = {
'width': tf.FixedLenFeature([], dtype=tf.int64, default_value=0),
'height': tf.FixedLenFeature([], dtype=tf.int64, default_value=0),
'image': tf.FixedLenFeature([], dtype=tf.string, default_value=''),
'label': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),
'name': tf.FixedLenFeature([], dtype=tf.string, default_value='')
}
parsed = tf.parse_single_example(value, keys_to_features)
image = tf.image.decode_image(tf.reshape(parsed['image'], shape=[]),
FLAGS.image_channels)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
bbox = tf.concat(axis=0, values=[ [[]], [[]], [[]], [[]] ])
bbox = tf.transpose(tf.expand_dims(bbox, 0), [0, 2, 1])
image = image_preprocess.preprocess_image(
image=image,
output_height=FLAGS.image_size,
output_width=FLAGS.image_size,
object_cover=0.0,
area_cover=0.05,
is_training=is_training,
bbox=bbox)
label = tf.cast(tf.reshape(parsed['label'], shape=[]),dtype=tf.int32)
label = tf.one_hot(label, FLAGS.class_num)
return image, label
示例6: read_and_decode
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'depth': tf.FixedLenFeature([], tf.int64)
})
image = tf.decode_raw(features['image_raw'], tf.uint8)
img_height = tf.cast(features['height'], tf.int32)
img_width = tf.cast(features['width'], tf.int32)
img_depth = tf.cast(features['depth'], tf.int32)
# Convert label from a scalar uint8 tensor to an int32 scalar.
label = tf.cast(features['label'], tf.int32)
image.set_shape([IMG_PIXELS])
image = tf.reshape(image, [IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
# Convert from [0, 255] -> [-0.5, 0.5] floats.
image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
return image, label
示例7: deserialize
def deserialize(examples_serialized):
"""Called by Dataset.map() to convert batches of records to tensors."""
features = tf.parse_single_example(examples_serialized, feature_map)
users = tf.reshape(tf.decode_raw(
features[movielens.USER_COLUMN], tf.int32), (batch_size,))
items = tf.reshape(tf.decode_raw(
features[movielens.ITEM_COLUMN], tf.uint16), (batch_size,))
if params["use_tpu"] or params["use_xla_for_gpu"]:
items = tf.cast(items, tf.int32) # TPU and XLA disallows uint16 infeed.
if not training:
dupe_mask = tf.reshape(tf.cast(tf.decode_raw(
features[rconst.DUPLICATE_MASK], tf.int8), tf.bool), (batch_size,))
return {
movielens.USER_COLUMN: users,
movielens.ITEM_COLUMN: items,
rconst.DUPLICATE_MASK: dupe_mask,
}
labels = tf.reshape(tf.cast(tf.decode_raw(
features["labels"], tf.int8), tf.bool), (batch_size,))
return {
movielens.USER_COLUMN: users,
movielens.ITEM_COLUMN: items,
}, labels
示例8: read_cifar10
def read_cifar10(filename_queue):
"""Reads and parses examples from CIFAR10 data files.
Recommendation: if you want N-way read parallelism, call this function
N times. This will give you N independent Readers reading different
files & positions within those files, which will give better mixing of
examples.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
An object representing a single example, with the following fields:
height: number of rows in the result (32)
width: number of columns in the result (32)
depth: number of color channels in the result (3)
key: a scalar string Tensor describing the filename & record number
for this example.
label: an int32 Tensor with the label in the range 0..9.
uint8image: a [height, width, depth] uint8 Tensor with the image data
"""
class CIFAR10Record(object):
pass
result = CIFAR10Record()
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
label_bytes = 1 # 2 for CIFAR-100
result.height = 256
result.width = 256
result.depth = 3
image_bytes = result.height * result.width * result.depth
# Every record consists of a label followed by the image, with a
# fixed number of bytes for each.
record_bytes = label_bytes + image_bytes
# Read a record, getting filenames from the filename_queue. No
# header or footer in the CIFAR-10 format, so we leave header_bytes
# and footer_bytes at their default of 0.
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
})
record_bytes = tf.decode_raw(features['image_raw'], tf.uint8)
# depth_major = tf.reshape(record_bytes, [result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
result.uint8image = tf.reshape(record_bytes, [result.height, result.width, result.depth])
# The first bytes represent the label, which we convert from uint8->int32.
result.label = tf.cast(features['label'], tf.int32)
return result
示例9: read_and_preprocess
def read_and_preprocess(example_data):
"""parses tfrecord and returns image, label.
Args:
example_data (str): tfrecord
Returns:
img, label
"""
height = width = PATCH_SIZE(params)
parsed = tf.parse_single_example(
example_data, {
'ref': tf.VarLenFeature(tf.float32),
'ltg': tf.VarLenFeature(tf.float32),
'has_ltg': tf.FixedLenFeature([], tf.int64, 1),
})
parsed['ref'] = _sparse_to_dense(parsed['ref'], height * width)
parsed['ltg'] = _sparse_to_dense(parsed['ltg'], height * width)
# keras wants labels to be float32
label = tf.cast(
tf.reshape(parsed['has_ltg'], shape=[]),
dtype=tf.float32)
print('shape of label {}'.format(label.shape))
img = reshape_into_image(parsed, params)
return img, label
示例10: tfrecord_to_graph_ops
def tfrecord_to_graph_ops(filenames, num_epochs):
file_queue = tf.train.string_input_producer(
filenames, name='file_queue', num_epochs=num_epochs
)
reader = tf.TFRecordReader(
options=tf.python_io.TFRecordOptions(
compression_type=tf.python_io.TFRecordCompressionType.GZIP
)
)
_, tfrecord = reader.read(file_queue)
tfrecord_features = tf.parse_single_example(
tfrecord,
features={
'images': tf.FixedLenFeature([], tf.string),
'labels': tf.FixedLenFeature([], tf.string),
},
name='data'
)
tfeat = tf.decode_raw(tfrecord_features['images'], tf.uint8)
# note, 'NCHW' is only supported on GPUs, so use 'NHWC'...
tfeat = tf.reshape(tfeat, [-1, 28, 28, 1])
ttarg = tf.decode_raw(tfrecord_features['labels'], tf.uint8)
ttarg = tf.one_hot(indices=ttarg, depth=10, on_value=1, off_value=0)
return tfeat, ttarg
示例11: getImage
def getImage(filenames):
# convert filenames to a queue for an input pipeline.
filenameQ = tf.train.string_input_producer(filenames,num_epochs=None)
# object to read records
recordReader = tf.TFRecordReader()
# read the full set of features for a single example
key, fullExample = recordReader.read(filenameQ)
# parse the full example into its' component features.
features = tf.parse_single_example(
fullExample,
features={
'image/height': tf.FixedLenFeature([], tf.int64),
'image/width': tf.FixedLenFeature([], tf.int64),
'image/depth': tf.FixedLenFeature([], tf.int64),
'image/class/label': tf.FixedLenFeature([],tf.int64),
'image/class/text': tf.FixedLenFeature([], dtype=tf.string,default_value=''),
'image/filename': tf.FixedLenFeature([], dtype=tf.string,default_value=''),
'image/encoded': tf.FixedLenFeature([], dtype=tf.string, default_value='')
})
label = features['image/class/label']
image_buffer = features['image/encoded']
image = tf.decode_raw(image_buffer, tf.float32)
image = tf.reshape(image, tf.stack([FLAGS.width*FLAGS.height*FLAGS.depth]))
label=tf.stack(tf.one_hot(label-1, nLabel))
return label, image
示例12: _test
def _test(self, kwargs, expected_values=None, expected_err_re=None):
with self.test_session() as sess:
# Pull out some keys to check shape inference
dense_keys = kwargs["dense_keys"] if "dense_keys" in kwargs else []
sparse_keys = kwargs["sparse_keys"] if "sparse_keys" in kwargs else []
dense_shapes = kwargs["dense_shapes"] if "dense_shapes" in kwargs else []
# Returns dict w/ Tensors and SparseTensors
out = tf.parse_single_example(**kwargs)
# Check shapes
self.assertEqual(len(dense_keys), len(dense_shapes))
for (k, s) in zip(dense_keys, dense_shapes):
self.assertEqual(tuple(out[k].get_shape()), s)
for k in sparse_keys:
self.assertEqual(tuple(out[k].indices.get_shape().as_list()), (None, 1))
self.assertEqual(tuple(out[k].values.get_shape().as_list()), (None,))
self.assertEqual(tuple(out[k].shape.get_shape().as_list()), (1,))
# Check values
result = flatten_values_tensors_or_sparse(out.values()) # flatten values
if expected_err_re is None:
tf_result = sess.run(result)
_compare_output_to_expected(self, out, expected_values, tf_result)
else:
with self.assertRaisesOpError(expected_err_re):
sess.run(result)
示例13: _input_fn
def _input_fn():
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer(
filenames, num_epochs=num_epochs)
reader = tf.TFRecordReader()
_, serialized_example = reader.read_up_to(filename_queue)
features = tf.parse_single_example(
serialized_examples,
{
'words': tf.VarLenFeature(tf.string),
'subreddit': tf.FixedLenFeature([1], tf.int64)
}
)
padded_words = tf.sparse_to_dense(
features['words'].indices,
[sentence_length],
features['words'].values,
default_value='UNK'
)
word_indices = tf.string_to_hash_bucket_fast(
padded_words,
vocab_size)
sentences, subreddits = tf.train.shuffle_batch(
[word_indices, features['subreddit']],
batch_size,
capacity=1000 + 3 * batch_size,
min_after_dequeue=1000,
enqueue_many=False
)
return sentences, subreddits
示例14: read_and_decode
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
})
# Convert from a scalar string tensor (whose single string has
# length mnist.IMAGE_PIXELS) to a uint8 tensor with shape
# [mnist.IMAGE_PIXELS].
image = tf.decode_raw(features['image_raw'], tf.uint8)
image.set_shape([57600])
# OPTIONAL: Could reshape into a 28x28 image and apply distortions
# here. Since we are not applying any distortions in this
# example, and the next step expects the image to be flattened
# into a vector, we don't bother.
# Convert from [0, 255] -> [-0.5, 0.5] floats.
image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
# Convert label from a scalar uint8 tensor to an int32 scalar.
#label = tf.cast(features['label'], tf.int32) <-- placeholder instead
return tf.reshape(image, [160, 120, 3]), tf.placeholder(tf.int32) # TODO doublecheck this
示例15: _parse_example_proto
def _parse_example_proto(example_serialized):
"""Parses an Example proto containing a training example of an image.
The dataset contains serialized Example protocol buffers.
The Example proto is expected to contain features named
image/encoded (a JPEG-encoded string) and image/class/label (int)
Args:
example_serialized: scalar Tensor tf.string containing a serialized
Example protocol buffer.
Returns:
image_buffer: Tensor tf.string containing the contents of a JPEG file.
label: Tensor tf.int64 containing the label.
"""
# Dense features in Example proto.
feature_map = {
'image/encoded': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,
default_value=-1)
}
features = tf.parse_single_example(example_serialized, feature_map)
return features['image/encoded'], features['image/class/label']