本文整理匯總了Python中tensorflow.uint8方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.uint8方法的具體用法?Python tensorflow.uint8怎麽用?Python tensorflow.uint8使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow
的用法示例。
在下文中一共展示了tensorflow.uint8方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uint8 [as 別名]
def __init__(self, resolution=1024, num_channels=3, dtype='uint8', dynamic_range=[0,255], label_size=0, label_dtype='float32'):
self.resolution = resolution
self.resolution_log2 = int(np.log2(resolution))
self.shape = [num_channels, resolution, resolution]
self.dtype = dtype
self.dynamic_range = dynamic_range
self.label_size = label_size
self.label_dtype = label_dtype
self._tf_minibatch_var = None
self._tf_lod_var = None
self._tf_minibatch_np = None
self._tf_labels_np = None
assert self.resolution == 2 ** self.resolution_log2
with tf.name_scope('Dataset'):
self._tf_minibatch_var = tf.Variable(np.int32(0), name='minibatch_var')
self._tf_lod_var = tf.Variable(np.int32(0), name='lod_var')
示例2: from_float32_to_uint8
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uint8 [as 別名]
def from_float32_to_uint8(
tensor,
tensor_key='tensor',
min_key='min',
max_key='max'):
"""
:param tensor:
:param tensor_key:
:param min_key:
:param max_key:
:returns:
"""
tensor_min = tf.reduce_min(tensor)
tensor_max = tf.reduce_max(tensor)
return {
tensor_key: tf.cast(
(tensor - tensor_min) / (tensor_max - tensor_min + 1e-16)
* 255.9999, dtype=tf.uint8),
min_key: tensor_min,
max_key: tensor_max
}
示例3: read_from_tfrecord
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uint8 [as 別名]
def read_from_tfrecord(filenames):
tfrecord_file_queue = tf.train.string_input_producer(filenames, name='queue')
reader = tf.TFRecordReader()
_, tfrecord_serialized = reader.read(tfrecord_file_queue)
tfrecord_features = tf.parse_single_example(tfrecord_serialized, features={
'label': tf.FixedLenFeature([],tf.int64),
'shape': tf.FixedLenFeature([],tf.string),
'image': tf.FixedLenFeature([],tf.string),
}, name='features')
image = tf.decode_raw(tfrecord_features['image'], tf.uint8)
shape = tf.decode_raw(tfrecord_features['shape'], tf.int32)
image = tf.reshape(image, shape)
label = tfrecord_features['label']
return label, shape, image
示例4: _extract_images
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uint8 [as 別名]
def _extract_images(filename, num_images):
"""Extract the images into a numpy array.
Args:
filename: The path to an MNIST images file.
num_images: The number of images in the file.
Returns:
A numpy array of shape [number_of_images, height, width, channels].
"""
print('Extracting images from: ', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(
_IMAGE_SIZE * _IMAGE_SIZE * num_images * _NUM_CHANNELS)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, _IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
return data
示例5: _extract_labels
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uint8 [as 別名]
def _extract_labels(filename, num_labels):
"""Extract the labels into a vector of int64 label IDs.
Args:
filename: The path to an MNIST labels file.
num_labels: The number of labels in the file.
Returns:
A numpy array of shape [number_of_labels]
"""
print('Extracting labels from: ', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_labels)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
示例6: read_and_decode
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uint8 [as 別名]
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'image_raw': tf.FixedLenFeature([], tf.string),
})
image = tf.decode_raw(features['image_raw'], tf.uint8)
image = tf.reshape(image, [227, 227, 6])
# Convert from [0, 255] -> [-0.5, 0.5] floats.
image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
return tf.split(image, 2, 2) # 3rd dimension two parts
示例7: read_and_decode_aug
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uint8 [as 別名]
def read_and_decode_aug(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'image_raw': tf.FixedLenFeature([], tf.string),
})
image = tf.decode_raw(features['image_raw'], tf.uint8)
image = tf.image.random_flip_left_right(tf.reshape(image, [227, 227, 6]))
# Convert from [0, 255] -> [-0.5, 0.5] floats.
image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
image = tf.image.random_brightness(image, 0.01)
image = tf.image.random_contrast(image, 0.95, 1.05)
return tf.split(image, 2, 2) # 3rd dimension two parts
示例8: image_summary
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uint8 [as 別名]
def image_summary(predictions, targets, hparams):
"""Reshapes predictions and passes it to tensorboard.
Args:
predictions : The predicted image (logits).
targets : The ground truth.
hparams: model hparams.
Returns:
summary_proto: containing the summary images.
weights: A Tensor of zeros of the same shape as predictions.
"""
del hparams
results = tf.cast(tf.argmax(predictions, axis=-1), tf.uint8)
gold = tf.cast(targets, tf.uint8)
summary1 = tf.summary.image("prediction", results, max_outputs=2)
summary2 = tf.summary.image("data", gold, max_outputs=2)
summary = tf.summary.merge([summary1, summary2])
return summary, tf.zeros_like(predictions)
示例9: summarize_video
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uint8 [as 別名]
def summarize_video(video, prefix, max_outputs=1):
"""Summarize the video using image summaries starting with prefix."""
video_shape = shape_list(video)
if len(video_shape) != 5:
raise ValueError("Assuming videos given as tensors in the format "
"[batch, time, height, width, channels] but got one "
"of shape: %s" % str(video_shape))
if tf.contrib.eager.in_eager_mode():
return
if video.get_shape().as_list()[1] is None:
tf.summary.image(
"%s_last_frame" % prefix,
tf.cast(video[:, -1, :, :, :], tf.uint8),
max_outputs=max_outputs)
else:
for k in range(video_shape[1]):
tf.summary.image(
"%s_frame_%d" % (prefix, k),
tf.cast(video[:, k, :, :, :], tf.uint8),
max_outputs=max_outputs)
示例10: get_mse_per_img
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uint8 [as 別名]
def get_mse_per_img(inp, otp, cast_to_int):
"""
:param inp: NCHW
:param otp: NCHW
:param cast_to_int: if True, both inp and otp are casted to int32 before the error is calculated,
to ensure real world errors (image pixels are always quantized). But the error is always casted back to
float32 before a mean per image is calculated and returned
:return: float32 tensor of shape (N,)
"""
with tf.name_scope('mse_{}'.format('int' if cast_to_int else 'float')):
if cast_to_int:
# Values are expected to be in 0...255, i.e., uint8, but tf.square does not support uint8's
inp, otp = tf.cast(inp, tf.int32), tf.cast(otp, tf.int32)
squared_error = tf.square(otp - inp)
squared_error_float = tf.to_float(squared_error)
mse_per_image = tf.reduce_mean(squared_error_float, axis=[1, 2, 3])
return mse_per_image
示例11: main
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uint8 [as 別名]
def main():
args = parse_args()
with tf.Session(graph=tf.Graph()) as session:
input_var = tf.placeholder(
tf.uint8, (None, 128, 64, 3), name="images")
image_var = tf.map_fn(
lambda x: _preprocess(x), tf.cast(input_var, tf.float32),
back_prop=False)
factory_fn = _network_factory()
features, _ = factory_fn(image_var, reuse=None)
features = tf.identity(features, name="features")
saver = tf.train.Saver(slim.get_variables_to_restore())
saver.restore(session, args.checkpoint_in)
output_graph_def = tf.graph_util.convert_variables_to_constants(
session, tf.get_default_graph().as_graph_def(),
[features.name.split(":")[0]])
with tf.gfile.GFile(args.graphdef_out, "wb") as file_handle:
file_handle.write(output_graph_def.SerializeToString())
示例12: __init__
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uint8 [as 別名]
def __init__(self, fin, scale=1.0, fmask=None):
self.fin = fin
# read in distort
with open(fin, 'r') as f:
header = f.readline().rstrip()
chunks = re.sub(r'[^0-9,]', '', header).split(',')
self.mapu = np.zeros((int(chunks[1]), int(chunks[0])),
dtype=np.float32)
self.mapv = np.zeros((int(chunks[1]), int(chunks[0])),
dtype=np.float32)
for line in f.readlines():
chunks = line.rstrip().split(' ')
self.mapu[int(chunks[0]), int(chunks[1])] = float(chunks[3])
self.mapv[int(chunks[0]), int(chunks[1])] = float(chunks[2])
# generate a mask
self.mask = np.ones(self.mapu.shape, dtype=np.uint8)
self.mask = cv2.remap(self.mask, self.mapu, self.mapv, cv2.INTER_LINEAR)
kernel = np.ones((30, 30), np.uint8)
self.mask = cv2.erode(self.mask, kernel, iterations=1)
# crop black regions out
h, w = self.mask.shape
self.x_lim = [f(np.where(self.mask[int(h/2), :])[0])
for f in [np.min, np.max]]
self.y_lim = [f(np.where(self.mask[:, int(w/2)])[0])
for f in [np.min, np.max]]
示例13: call
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uint8 [as 別名]
def call(self, inputs):
"""Standard Keras call() method."""
if inputs.dtype not in [tf.uint8, tf.int32, tf.int64]:
inputs = tf.cast(inputs, dtype=tf.int32)
if self.default_input_value is not None:
default_input_value_tensor = tf.constant(
int(self.default_input_value),
dtype=inputs.dtype,
name=DEFAULT_INPUT_VALUE_NAME)
replacement = tf.zeros_like(inputs) + (self.num_buckets - 1)
inputs = tf.where(
tf.equal(inputs, default_input_value_tensor), replacement, inputs)
# We can't use tf.gather_nd(self.kernel, inputs) as it doesn't support
# constraints (constraint functions are not supported for IndexedSlices).
# Instead we use matrix multiplication by one-hot encoding of the index.
if self.units == 1:
# This can be slightly faster as it uses matmul.
return tf.matmul(
tf.one_hot(tf.squeeze(inputs, axis=[-1]), depth=self.num_buckets),
self.kernel)
return tf.reduce_sum(
tf.one_hot(inputs, axis=1, depth=self.num_buckets) * self.kernel,
axis=1)
示例14: parse_fn
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uint8 [as 別名]
def parse_fn(self, serialized_example):
features={
'image/id_name': tf.FixedLenFeature([], tf.string),
'image/height' : tf.FixedLenFeature([], tf.int64),
'image/width' : tf.FixedLenFeature([], tf.int64),
'image/encoded': tf.FixedLenFeature([], tf.string),
}
for name in self.feature_list:
features[name] = tf.FixedLenFeature([], tf.int64)
example = tf.parse_single_example(serialized_example, features=features)
image = tf.decode_raw(example['image/encoded'], tf.uint8)
raw_height = tf.cast(example['image/height'], tf.int32)
raw_width = tf.cast(example['image/width'], tf.int32)
image = tf.reshape(image, [raw_height, raw_width, 3])
image = tf.image.resize_images(image, size=[self.height, self.width])
# from IPython import embed; embed(); exit()
feature_val_list = [tf.cast(example[name], tf.float32) for name in self.feature_list]
return image, feature_val_list
示例15: draw_keypoints_on_image_array
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import uint8 [as 別名]
def draw_keypoints_on_image_array(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil, keypoints, color, radius,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))