本文整理汇总了Python中tensorflow.image_summary函数的典型用法代码示例。如果您正苦于以下问题:Python image_summary函数的具体用法?Python image_summary怎么用?Python image_summary使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了image_summary函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: nerve_inputs
def nerve_inputs(batch_size):
""" Construct nerve input net.
Args:
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor. Possible of size [batch_size, 84x84x4].
mask: Images. 4D tensor. Possible of size [batch_size, 84x84x4].
"""
shape = (420,580)
tfrecord_filename = glb('../data/tfrecords/*')
print(tfrecord_filename)
filename_queue = tf.train.string_input_producer(tfrecord_filename)
image, mask = read_data(filename_queue, shape)
images, masks = _generate_image_label_batch(image, mask, batch_size)
# display in tf summary page
tf.image_summary('images', images)
tf.image_summary('mask', masks)
return images, masks
示例2: _conv
def _conv(inpOp, kH, kW, nOut, dH=1, dW=1, relu=True):
global conv_counter
global parameters
name = 'conv' + str(conv_counter)
conv_counter += 1
with tf.name_scope(name) as scope:
nIn = int(inpOp.get_shape()[-1])
stddev = 5e-3
kernel = tf.Variable(tf.truncated_normal([kH, kW, nIn, nOut],
dtype=tf.float32,
stddev=(kH*kW*nIn)**0.5*stddev), name='weights')
conv = tf.nn.conv2d(inpOp, kernel, [1, 1, 1, 1],
padding="SAME")
biases = tf.Variable(tf.constant(0.0, shape=[nOut], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
if relu:
bias = tf.nn.relu(bias, name=scope)
#parameters += [kernel, biases]
#bias = tf.Print(bias, [tf.sqrt(tf.reduce_mean(tf.square(inpOp - tf.reduce_mean(inpOp))))], message=kernel.name)
tf.histogram_summary(scope+"/output", bias)
tf.image_summary(scope+"/output", bias[:,:,:,0:3])
tf.image_summary(scope+"/kernel_weight", tf.expand_dims(kernel[:,:,0:3,0], 0))
# tf.image_summary(scope+"/point_weight", pointwise_filter)
return bias
示例3: read_image_data
def read_image_data():
dirname, filename = os.path.split(os.path.abspath(__file__))
#Create a list of filenames
#path = '/home/david/datasets/fs_ready/Aaron_Eckhart/'
jpeg_files = glob.glob(os.path.join(path, '*.jpg'))
path = '/home/david/datasets/fs_ready/Zooey_Deschanel/'
#Create a queue that produces the filenames to read
filename_queue = tf.train.string_input_producer(jpeg_files)
#Create a reader for the filequeue
reader = tf.WholeFileReader()
#Read in the files
key, value = reader.read(filename_queue)
#Convert the Tensor(of type string) to representing the Tensor of type uint8
# and shape [height, width, channels] representing the images
images = tf.image.decode_jpeg(value, channels=3)
#convert images to floats and attach image summary
float_images = tf.expand_dims(tf.cast(images, tf.float32),0)
tf.image_summary('images', float_images)
#Create session
sess = tf.Session()
summary_op = tf.merge_all_summaries()
tf.initialize_all_variables()
#Write summary
summary_writer = tf.train.SummaryWriter(dirname+'/log/', graph_def=sess.graph_def)
tf.train.start_queue_runners(sess=sess)
for i in xrange(10):
summary_str, float_image = sess.run([summary_op, float_images])
print (float_image.shape)
summary_writer.add_summary(summary_str)
#Close session
sess.close()
示例4: conv2d
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None,
summary_tag=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = intprod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = intprod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer,
collections=collections)
if summary_tag is not None:
tf.image_summary(summary_tag,
tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]),
[2, 0, 1, 3]),
max_images=10)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
示例5: inputs
def inputs(files, distort=False):
fqueue = tf.train.string_input_producer(files)
reader = tf.TFRecordReader()
key, value = reader.read(fqueue)
features = tf.parse_single_example(value, features={
'label': tf.FixedLenFeature([], tf.int64),
'image_raw': tf.FixedLenFeature([], tf.string),
})
image = tf.image.decode_jpeg(features['image_raw'], channels=3)
image = tf.cast(image, tf.float32)
if distort:
cropsize = random.randint(INPUT_SIZE, IMAGE_SIZE)
image = tf.image.random_crop(image, [cropsize, cropsize])
image = tf.image.random_flip_left_right(image)
image = tf.image.random_brightness(image, max_delta=0.63)
image = tf.image.random_contrast(image, lower=0.8, upper=1.2)
image = tf.image.random_hue(image, max_delta=0.02)
image = tf.image.random_saturation(image, lower=0.8, upper=1.2)
else:
image = tf.image.random_crop(image, [IMAGE_SIZE, IMAGE_SIZE])
image = tf.image.resize_image_with_crop_or_pad(image, INPUT_SIZE, INPUT_SIZE)
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(FLAGS.num_examples_per_epoch_for_train * min_fraction_of_examples_in_queue)
images, labels = tf.train.shuffle_batch(
[tf.image.per_image_whitening(image), tf.cast(features['label'], tf.int32)],
batch_size=BATCH_SIZE,
capacity=min_queue_examples + 3 * BATCH_SIZE,
min_after_dequeue=min_queue_examples
)
images = tf.image.resize_images(images, INPUT_SIZE, INPUT_SIZE)
tf.image_summary('images', images)
return images, labels
示例6: inputs
def inputs(eval_data, data_dir, batch_size):
filename = os.path.join(data_dir, TEST_FILE)
filename_queue = tf.train.string_input_producer([filename])
image, label = read_and_decode(filename_queue)
height = IMAGE_SIZE
width = IMAGE_SIZE
print ("THIS",image.get_shape)
resized_image = tf.image.resize_images(image, height, width)
print (resized_image.get_shape)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(resized_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_EVAL *
min_fraction_of_examples_in_queue)
images, label_batch = tf.train.batch(
[image, label],
batch_size=batch_size,
num_threads=1,
capacity=min_queue_examples + 3 * batch_size)
tf.image_summary('images', images)
return images, tf.reshape(label_batch, [batch_size])
示例7: distorted_inputs
def distorted_inputs (tfrecord_file_paths=[]):
fqueue = tf.train.string_input_producer(tfrecord_file_paths)
reader = tf.TFRecordReader()
key, serialized_example = reader.read(fqueue)
features = tf.parse_single_example(serialized_example, features={
'label': tf.FixedLenFeature([], tf.int64),
'image': tf.FixedLenFeature([], tf.string)
})
image = tf.image.decode_jpeg(features['image'], channels=size['depth'])
image = tf.cast(image, tf.float32)
image.set_shape([size['width'], size['height'], size['depth']])
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL * min_fraction_of_examples_in_queue)
images, labels = tf.train.shuffle_batch(
[tf.image.per_image_whitening(image), tf.cast(features['label'], tf.int32)],
batch_size=BATCH_SIZE,
capacity=min_queue_examples + 3 * BATCH_SIZE,
min_after_dequeue=min_queue_examples
)
images = tf.image.resize_images(images, size['input_width'], size['input_height'])
tf.image_summary('images', images)
return images, labels
示例8: _multichannel_image_summary
def _multichannel_image_summary(name, images, perm=[0, 3, 1, 2], max_summary_images=16):
_min = tf.reduce_min(images)
_max = tf.reduce_max(images)
_ = tf.mul(tf.div(tf.add(images, _min), tf.sub(_max, _min)), 255.0)
_ = tf.transpose(_, perm=perm)
shape = _.get_shape().as_list()
tf.image_summary(name, tf.reshape(tf.transpose(_, perm=perm), [reduce(lambda x,y:x*y, shape)/(shape[3]*shape[2]), shape[2], shape[3], 1]), max_images=max_summary_images)
示例9: _generate_image_and_label_batch
def _generate_image_and_label_batch(image, label, min_queue_examples):
"""Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [IMAGE_SIZE, IMAGE_SIZE, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'FLAGS.batch_size' images + labels from the example queue.
num_preprocess_threads = 16
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=FLAGS.batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * FLAGS.batch_size,
min_after_dequeue=min_queue_examples)
# Display the training images in the visualizer.
tf.image_summary('images', images)
return images, tf.reshape(label_batch, [FLAGS.batch_size])
示例10: _generate_image_and_label_batch
def _generate_image_and_label_batch(image, label, min_queue_examples,
batch_size, shuffle):
"""Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [height, width, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
batch_size: Number of images per batch.
shuffle: boolean indicating whether to use a shuffling queue.
Returns:
images: Images. 4D tensor of [batch_size, height, width, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
num_preprocess_threads = 16
if shuffle:
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
else:
images, label_batch = tf.train.batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size)
# Display the training images in the visualizer.
tf.image_summary('images', images)
return images, tf.reshape(label_batch, [batch_size])
示例11: model
def model(self):
"""
Define the model
"""
# Reshape the input for batchSize, dims_in[0] X dims_in[1] image, dims_in[2] channels
x_image = tf.reshape(self.input, [-1, self.dims_in[0], self.dims_in[1], self.dims_in[2]],
name='x_input_reshaped')
# Apply image resize
x_image_upscale = tf.image.resize_bilinear(x_image, np.array([self.dims_out[0],
self.dims_out[1]]), align_corners=None, name='x_input_upscale')
self.x_input_upscale = x_image_upscale
# Dump input image out
tf.image_summary('x_upscale', x_image_upscale)
# Model convolutions
conv_1 = ops.conv2d(x_image_upscale, output_dim=8, k_h=5, k_w=5, d_h=1, d_w=1, name="conv_1")
relu_1 = tf.nn.relu(conv_1)
conv_2 = ops.conv2d(relu_1, output_dim=4, k_h=3, k_w=3, d_h=1, d_w=1, name="conv_2")
relu_2 = tf.nn.relu(conv_2)
conv_3 = ops.conv2d(relu_2, output_dim=1, k_h=1, k_w=1, d_h=1, d_w=1, name="conv_3")
relu_3 = tf.nn.relu(conv_3)
conv_4 = ops.conv2d(relu_3, output_dim=1, k_h=3, k_w=3, d_h=1, d_w=1, name="conv_4")
predict = tf.reshape(conv_4, [-1, self.dims_out[0], self.dims_out[1], self.dims_out[2]], name='predict')
# Dump prediction out
tf.image_summary('predict', predict)
return predict
示例12: _generate_image_and_label_batch
def _generate_image_and_label_batch(image, label, filename, min_queue_examples,
batch_size, shuffle):
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
num_preprocess_threads = 16
capacity = min_queue_examples + 3 * batch_size
if shuffle:
images, label_batch, filename = tf.train.shuffle_batch(
[image, label, filename],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=capacity,
min_after_dequeue=min_queue_examples)
else:
images, label_batch, filename = tf.train.batch(
[image, label, filename],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size)
# Display the training images in the visualizer.
tf.image_summary('image', images, max_images = 100)
labels = tf.reshape(label_batch, [batch_size, NUM_CLASS])
return images, labels, filename
示例13: _generate_image_and_label_batch
def _generate_image_and_label_batch(image, label, min_queue_examples,
batch_size, shuffle):
""" generate a batch of images and labels.
Args:
image: the trained image.
label: label correspond to the image.
min_queue_examples: the least examples int the example's queue.
batch_size: the size of a batch.
shuffle: whether or not to shuffle the examples.
Returns:
A batch of examples including images and the corresponding label.
"""
num_preprocess_threads = 16
if shuffle:
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
else:
images, label_batch = tf.train.batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size)
# Display the training images in the visualizer.
tf.image_summary('images', images)
return images, label_batch
示例14: preprocess
def preprocess(self):
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, [None, 784], name='x-input')
image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])
tf.image_summary('input', image_shaped_input, max_images=100)
y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')
return (x, y_)
示例15: _deconv
def _deconv(inpOp, kH, kW, nOut, dH=1, dW=1, relu=True, name=None):
global deconv_counter
global parameters
if not name:
name = 'deconv' + str(deconv_counter)
deconv_counter += 1
with tf.variable_scope(name) as scope:
nIn = int(inpOp.get_shape()[-1])
in_shape = inpOp.get_shape()
stddev = 1e-3
kernel = tf.get_variable('weights',[kH, kW, nOut, nIn], initializer=tf.random_normal_initializer(stddev=(kH*kW*nIn)**0.5*stddev))
conv = tf.nn.deconv2d(inpOp, kernel, [int(in_shape[0]),int(in_shape[1]),int(in_shape[2]),nOut], [1, 1, 1, 1],
padding="SAME")
biases = tf.get_variable('biases', [nOut], initializer=tf.constant_initializer(value=0.0))
bias = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
if relu:
bias = tf.nn.relu(bias, name='relu')
#parameters += [kernel, biases]
#bias = tf.Print(bias, [tf.sqrt(tf.reduce_mean(tf.square(inpOp - tf.reduce_mean(inpOp))))], message=kernel.name)
tf.histogram_summary(bias.name+"/output", bias)
tf.image_summary(bias.name+"/output", bias[:,:,:,0:3])
#tf.image_summary(scope+"/depth_weight", depthwise_filter)
# tf.image_summary(scope+"/point_weight", pointwise_filter)
return bias