本文整理汇总了Python中tensorflow.decode_raw函数的典型用法代码示例。如果您正苦于以下问题:Python decode_raw函数的具体用法?Python decode_raw怎么用?Python decode_raw使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了decode_raw函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: read_and_decode
def read_and_decode(filename_queue):
# input: filename
# output: image, label pair
# setup a TF record reader
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
# list the features we want to extract, i.e., the image and the label
features = tf.parse_single_example(
serialized_example,
features={
'img_raw': tf.FixedLenFeature([], tf.string),
'label_raw': tf.FixedLenFeature([], tf.string),
})
# Decode the training image
# Convert from a scalar string tensor (whose single string has
# length 256*256) to a float tensor
image = tf.decode_raw(features['img_raw'], tf.int64)
image.set_shape([65536])
image_re = tf.reshape(image, (256,256))
# Scale input pixels by 1024
image_re = tf.cast(image_re, tf.float32) * (1. / 1024)
# decode the label image, an image with all 0's except 1's where the left
# ventricle exists
label = tf.decode_raw(features['label_raw'], tf.uint8)
label.set_shape([65536])
label_re = tf.reshape(label, [256,256])
return image_re, label_re
示例2: read_and_decode
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'mask_raw': tf.FixedLenFeature([], tf.string),
}
)
# must be read back as uint8 here
image = tf.decode_raw(features['image_raw'], tf.uint8)
segmentation = tf.decode_raw(features['mask_raw'], tf.uint8)
image.set_shape([224*224*3])
segmentation.set_shape([224*224*1])
image = tf.reshape(image,[224,224,3])
segmentation = tf.reshape(segmentation,[224,224])
rgb = tf.cast(image, tf.float32)
rgb = rgb * (1./255)
rgb = tf.cast(image, tf.float32)
mask = tf.cast(segmentation, tf.float32)
mask = (mask / 255.) * 20
mask = tf.cast(mask, tf.int64)
return rgb, mask
示例3: read_and_decode
def read_and_decode(filename_queue, label_type, shape):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'label_raw': tf.FixedLenFeature([], tf.string),
})
image = tf.decode_raw(features['image_raw'], tf.uint8)
image = tf.cast(image, tf.float32)
image = (image - 127.5) * (1. / 128.0)
image.set_shape([shape * shape * 3])
image = tf.reshape(image, [shape, shape, 3])
label = tf.decode_raw(features['label_raw'], tf.float32)
if label_type == 'cls':
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
label.set_shape([2])
elif label_type == 'bbx':
label.set_shape([4])
elif label_type == 'pts':
label.set_shape([10])
return image, label
示例4: get_batch
def get_batch():
'''Makes batch queues from the training data.
Returns:
A Tuple of x (Tensor), y (Tensor).
x and y have the shape [batch_size, maxlen].
'''
import tensorflow as tf
# Load data
X, Y = load_train_data()
# Create Queues
x, y = tf.train.slice_input_producer([tf.convert_to_tensor(X),
tf.convert_to_tensor(Y)])
x = tf.decode_raw(x, tf.int32)
y = tf.decode_raw(y, tf.int32)
x, y = tf.train.batch([x, y],
shapes=[(None,), (None,)],
num_threads=8,
batch_size=hp.batch_size,
capacity=hp.batch_size * 64,
allow_smaller_final_batch=False,
dynamic_pad=True)
num_batch = len(X) // hp.batch_size
return x, y, num_batch # (N, None) int32, (N, None) int32, ()
示例5: tfrecord_to_graph_ops
def tfrecord_to_graph_ops(filenames, num_epochs):
file_queue = tf.train.string_input_producer(
filenames, name='file_queue', num_epochs=num_epochs
)
reader = tf.TFRecordReader(
options=tf.python_io.TFRecordOptions(
compression_type=tf.python_io.TFRecordCompressionType.GZIP
)
)
_, tfrecord = reader.read(file_queue)
tfrecord_features = tf.parse_single_example(
tfrecord,
features={
'images': tf.FixedLenFeature([], tf.string),
'labels': tf.FixedLenFeature([], tf.string),
},
name='data'
)
tfeat = tf.decode_raw(tfrecord_features['images'], tf.uint8)
# note, 'NCHW' is only supported on GPUs, so use 'NHWC'...
tfeat = tf.reshape(tfeat, [-1, 28, 28, 1])
ttarg = tf.decode_raw(tfrecord_features['labels'], tf.uint8)
ttarg = tf.one_hot(indices=ttarg, depth=10, on_value=1, off_value=0)
return tfeat, ttarg
示例6: read_single_example_and_decode
def read_single_example_and_decode(filename_queue):
tfrecord_options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)
reader = tf.TFRecordReader(options=tfrecord_options)
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized=serialized_example,
features={
'img_name': tf.FixedLenFeature([], tf.string),
'img_height': tf.FixedLenFeature([], tf.int64),
'img_width': tf.FixedLenFeature([], tf.int64),
'img': tf.FixedLenFeature([], tf.string),
'gtboxes_and_label': tf.FixedLenFeature([], tf.string),
'num_objects': tf.FixedLenFeature([], tf.int64)
}
)
img_name = features['img_name']
img_height = tf.cast(features['img_height'], tf.int32)
img_width = tf.cast(features['img_width'], tf.int32)
img = tf.decode_raw(features['img'], tf.uint8)
img = tf.reshape(img, shape=[img_height, img_width, 3])
gtboxes_and_label = tf.decode_raw(features['gtboxes_and_label'], tf.int32)
gtboxes_and_label = tf.reshape(gtboxes_and_label, [-1, 9])
num_objects = tf.cast(features['num_objects'], tf.int32)
return img_name, img, gtboxes_and_label, num_objects
示例7: parse_sequence_example
def parse_sequence_example(self, record_string):
features_dict = {
'images_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'depth': tf.FixedLenFeature([], tf.int64),
'sequence_length': tf.FixedLenFeature([], tf.int64)
}
if ADD_GEOLOCATIONS:
features_dict['geo'] = tf.FixedLenFeature([], tf.string)
features = tf.parse_single_example(record_string, features_dict)
images = tf.decode_raw(features['images_raw'], tf.float32)
width = tf.cast(features['width'], tf.int32)
height = tf.cast(features['height'], tf.int32)
depth = tf.cast(features['depth'], tf.int32)
label = tf.cast(features['label'], tf.int32)
sequence_length = tf.cast(features['sequence_length'], tf.int32)
images = tf.reshape(images, [sequence_length, height, width, depth])
if ADD_GEOLOCATIONS:
geo = tf.decode_raw(features['geo'], tf.float32)
geo = tf.reshape(geo, [2, ])
return images, label, geo
else:
return images, label
示例8: build_next_batch_op
def build_next_batch_op(self):
reader = tf.TFRecordReader()
_, serialized_experience = reader.read(self.filename_queue)
features = tf.parse_single_example(serialized_experience, features={
'state': tf.FixedLenFeature([], tf.string),
'action': tf.FixedLenFeature([2], tf.float32),
'reward': tf.FixedLenFeature([], tf.float32),
'next_state': tf.FixedLenFeature([], tf.string),
'is_episode_finished': tf.FixedLenFeature([], tf.int64)})
state = tf.decode_raw(features['state'], tf.uint8)
state.set_shape([86*86*4])
action = features['action']
reward = features['reward']
next_state = tf.decode_raw(features['next_state'], tf.uint8)
next_state.set_shape([86*86*4])
is_episode_finished = features['is_episode_finished']
state = tf.reshape(state, [86, 86, 4])
next_state = tf.reshape(next_state, [86, 86, 4])
state_batch, action_batch, reward_batch, next_state_batch, is_episode_finished_batch = tf.train.shuffle_batch(
[state, action, reward, next_state, is_episode_finished], batch_size=self.batch_size, capacity=100,
min_after_dequeue=0)
return state_batch, action_batch, reward_batch, next_state_batch, is_episode_finished_batch
示例9: read_and_decode
def read_and_decode(self, filename_queue):
"""
A definition of how TF should read the file record.
Slightly altered version from https://github.com/tensorflow/tensorflow/blob/r0.7/tensorflow/examples/how_tos/ \
reading_data/fully_connected_reader.py
:param filename_queue: The file name queue to be read.
:type filename_queue: tf.QueueBase
:return: The read file data including the image data and depth data.
:rtype: (tf.Tensor, tf.Tensor)
"""
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'depth_raw': tf.FixedLenFeature([], tf.string),
})
image = tf.decode_raw(features['image_raw'], tf.uint8)
image = tf.reshape(image, [self.height, self.width, self.channels])
image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
depth = tf.decode_raw(features['depth_raw'], tf.float32)
depth = tf.reshape(depth, [self.height, self.width, 1])
return image, depth
示例10: buildSpImageConverter
def buildSpImageConverter(channelOrder, img_dtype):
"""
Convert a imageIO byte encoded image into a image tensor suitable as input to ConvNets
The name of the input must be a subset of those specified in `image.imageIO.imageSchema`.
:param img_dtype: the type of data the underlying image bytes represent
"""
with IsolatedSession() as issn:
# Flat image data -> image dimensions
# This has to conform to `imageIO.imageSchema`
height = tf.placeholder(tf.int32, [], name="height")
width = tf.placeholder(tf.int32, [], name="width")
num_channels = tf.placeholder(tf.int32, [], name="nChannels")
image_buffer = tf.placeholder(tf.string, [], name="data")
# The image is packed into bytes with height as leading dimension
# This is the default behavior of Python Image Library
shape = tf.reshape(tf.stack([height, width, num_channels], axis=0),
shape=(3,), name='shape')
if img_dtype == 'uint8':
image_uint8 = tf.decode_raw(image_buffer, tf.uint8, name="decode_raw")
image_float = tf.to_float(image_uint8)
elif img_dtype == 'float32':
image_float = tf.decode_raw(image_buffer, tf.float32, name="decode_raw")
else:
raise ValueError('''unsupported image data type "%s", currently only know how to
handle uint8 and float32''' % img_dtype)
image_reshaped = tf.reshape(image_float, shape, name="reshaped")
image_reshaped = imageIO.fixColorChannelOrdering(channelOrder, image_reshaped)
image_input = tf.expand_dims(image_reshaped, 0, name="image_input")
gfn = issn.asGraphFunction([height, width, image_buffer, num_channels], [image_input])
return gfn
示例11: parser
def parser(self, record):
keys_to_features = {
'labels': tf.FixedLenFeature([], tf.string),
'userIds': tf.VarLenFeature(tf.int64),
'itemIds': tf.VarLenFeature(tf.int64),
'user_profiles_indices': tf.FixedLenFeature([], tf.string),
'user_profiles_values': tf.VarLenFeature(tf.int64),
'user_profiles_weights': tf.VarLenFeature(tf.float32),
'user_profiles_shape': tf.FixedLenFeature([2], tf.int64),
'item_profiles_indices': tf.FixedLenFeature([], tf.string),
'item_profiles_values': tf.VarLenFeature(tf.int64),
'item_profiles_weights': tf.VarLenFeature(tf.float32),
'item_profiles_shape': tf.FixedLenFeature([2], tf.int64)
}
parsed = tf.parse_single_example(record, keys_to_features)
labels = tf.reshape(tf.decode_raw(parsed['labels'], tf.float32), [-1, 1])
userIds = tf.sparse_tensor_to_dense(parsed['userIds'])
itemIds = tf.sparse_tensor_to_dense(parsed['itemIds'])
user_profiles_indices = tf.reshape(tf.decode_raw(parsed['user_profiles_indices'], tf.int64), [-1, 2])
user_profiles_values = tf.sparse_tensor_to_dense(parsed['user_profiles_values'])
user_profiles_weights = tf.sparse_tensor_to_dense(parsed['user_profiles_weights'])
user_profiles_shape = parsed['user_profiles_shape']
item_profiles_indices = tf.reshape(tf.decode_raw(parsed['item_profiles_indices'], tf.int64), [-1, 2])
item_profiles_values = tf.sparse_tensor_to_dense(parsed['item_profiles_values'])
item_profiles_weights = tf.sparse_tensor_to_dense(parsed['item_profiles_weights'])
item_profiles_shape = parsed['item_profiles_shape']
return labels, userIds, itemIds, \
user_profiles_indices, user_profiles_values, user_profiles_weights, user_profiles_shape, \
item_profiles_indices, item_profiles_values, item_profiles_weights, item_profiles_shape
示例12: _binary_parse_function_example
def _binary_parse_function_example(serialized_example_protocol):
'''
DESCRIPTION:
This function will deserialize, decompress and then transform
the image and label in the appropriate shape based on the (new) merged
structure of the dataset.
'''
#Parsing the exampe from the binary format
features={
'image': tf.FixedLenFeature((),tf.string),
'label': tf.FixedLenFeature((),tf.string)
}
parsed_feature=tf.parse_single_example(serialized_example_protocol,
features)
#Now setting the appropriate tranformation (decoding and reshape)
height=514
width=513
depth=40
#Decoding the image from biary
image=tf.decode_raw(parsed_feature['image'],tf.float32)#BEWARE of dtype
image.set_shape([depth*height*width])
#Now reshape in usual way since reshape automatically read in c-order
image=tf.reshape(image,[height,width,depth])
#Now decoding the label
target_len=6
label=tf.decode_raw(parsed_feature['label'],tf.float32)
label.set_shape([target_len])
#Reshaping appropriately
label=tf.reshape(label,[target_len,])
#Returing the example tuple finally
return image,label
示例13: read_to_numpy
def read_to_numpy(self, file_name, data_type=None):
"""
Reads entire TFRecords file as NumPy.
:param file_name: The TFRecords file name to read.
:type file_name: str
:param data_type: Data type of data. Used if that data type doesn't include things like labels.
:type data_type: str
:return: The images and labels NumPy
:rtype: (np.ndarray, np.ndarray)
"""
feature_types = self.attain_feature_types(data_type)
images = []
labels = []
for tfrecord in tf.python_io.tf_record_iterator(file_name):
with tf.Graph().as_default() as graph: # Create a separate as this runs slow when on one graph.
features = tf.parse_single_example(tfrecord, features=feature_types)
image_shape, label_shape = self.extract_shapes_from_tfrecords_features(features, data_type)
flat_image = tf.decode_raw(features['image_raw'], tf.uint8)
image_tensor = tf.reshape(flat_image, image_shape)
image_tensor = tf.squeeze(image_tensor)
if data_type != 'deploy':
flat_label = tf.decode_raw(features['label_raw'], tf.float32)
label_tensor = tf.reshape(flat_label, label_shape)
label_tensor = tf.squeeze(label_tensor)
else:
label_tensor = tf.constant(-1.0, dtype=tf.float32, shape=[1, 1, 1])
with tf.Session(graph=graph) as session:
initialize_op = tf.global_variables_initializer()
session.run(initialize_op)
image, label = session.run([image_tensor, label_tensor])
images.append(image)
labels.append(label)
return np.stack(images), np.stack(labels)
示例14: create_image_and_label_inputs_from_file_name_queue
def create_image_and_label_inputs_from_file_name_queue(self, file_name_queue, data_type=None):
"""
Creates the inputs for the image and label for a given file name queue.
:param file_name_queue: The file name queue to be used.
:type file_name_queue: tf.Queue
:param data_type: The type of data (train, validation, test, deploy, etc) to determine how to process.
:type data_type: str
:return: The image and label inputs.
:rtype: (tf.Tensor, tf.Tensor)
"""
reader = tf.TFRecordReader()
_, serialized_example = reader.read(file_name_queue)
feature_types = self.attain_feature_types(data_type)
features = tf.parse_single_example(serialized_example, features=feature_types)
image_shape, label_shape = self.extract_shapes_from_tfrecords_features(features, data_type)
flat_image = tf.decode_raw(features['image_raw'], tf.uint8)
image = tf.reshape(flat_image, image_shape)
if data_type != 'deploy':
flat_label = tf.decode_raw(features['label_raw'], tf.float32)
label = tf.reshape(flat_label, label_shape)
else:
# Makes a fake label tensor for preprocessing to work on.
label = tf.constant(-1.0, dtype=tf.float32, shape=[1, 1, 1])
return image, label
示例15: read_decode_tfrecord_list
def read_decode_tfrecord_list(file_list, do_augment = False):
''''Read TFRecord content'''
reader = tf.TFRecordReader()
_, serialized_example = reader.read(file_list)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'image': tf.FixedLenFeature([], tf.string),
'shape': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.float32),
})
shape = tf.decode_raw(features['shape'], tf.uint8)
#print('Shape (shape) is:', shape.shape)
image = tf.decode_raw(features['image'], tf.uint8)
#print('Shape (image) is:', image.shape)
label = tf.cast(features['label'], tf.float32)
# TODO: Infer from shape field from TFRecord
image.set_shape([256* 256* 3])
image = tf.reshape(image, [256, 256, 3])
image, label = process_features(image, label, do_augment)
return image, label