当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.image_summary方法代码示例

本文整理汇总了Python中tensorflow.image_summary方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.image_summary方法的具体用法?Python tensorflow.image_summary怎么用?Python tensorflow.image_summary使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.image_summary方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: visualization

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import image_summary [as 别名]
def visualization(self, n):
        fake_sum_train, superimage_train =\
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test =\
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test])

        hr_fake_sum_train, hr_superimage_train =\
            self.visualize_one_superimage(self.hr_fake_images[:n * n],
                                          self.hr_images[:n * n, :, :, :],
                                          n, "hr_train")
        hr_fake_sum_test, hr_superimage_test =\
            self.visualize_one_superimage(self.hr_fake_images[n * n:2 * n * n],
                                          self.hr_images[n * n:2 * n * n],
                                          n, "hr_test")
        self.hr_superimages =\
            tf.concat(0, [hr_superimage_train, hr_superimage_test])
        self.hr_image_summary =\
            tf.merge_summary([hr_fake_sum_train, hr_fake_sum_test]) 
开发者ID:hanzhanggit,项目名称:StackGAN,代码行数:26,代码来源:trainer.py

示例2: generator

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import image_summary [as 别名]
def generator(z, latent_c):
    depths = [32, 64, 64, 64, 64, 64, 3]
    sizes = zip(
        np.linspace(4, IMAGE_SIZE['resized'][0], len(depths)).astype(np.int),
        np.linspace(6, IMAGE_SIZE['resized'][1], len(depths)).astype(np.int))
    with slim.arg_scope([slim.conv2d_transpose],
                        normalizer_fn=slim.batch_norm,
                        kernel_size=3):
        with tf.variable_scope("gen"):
            size = sizes.pop(0)
            net = tf.concat(1, [z, latent_c])
            net = slim.fully_connected(net, depths[0] * size[0] * size[1])
            net = tf.reshape(net, [-1, size[0], size[1], depths[0]])
            for depth in depths[1:-1] + [None]:
                net = tf.image.resize_images(
                    net, sizes.pop(0),
                    tf.image.ResizeMethod.NEAREST_NEIGHBOR)
                if depth:
                    net = slim.conv2d_transpose(net, depth)
            net = slim.conv2d_transpose(
                net, depths[-1], activation_fn=tf.nn.tanh, stride=1, normalizer_fn=None)
            tf.image_summary("gen", net, max_images=8)
    return net 
开发者ID:marcbelmont,项目名称:gan-image-similarity,代码行数:25,代码来源:main.py

示例3: zap_data

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import image_summary [as 别名]
def zap_data(FLAGS, shuffle):
    files = glob(FLAGS.file_pattern)
    filename_queue = tf.train.string_input_producer(
        files,
        shuffle=shuffle,
        num_epochs=None if shuffle else 1)
    image = read_image(filename_queue, shuffle)

    # Mini batch
    num_preprocess_threads = 1 if FLAGS.debug else 4
    min_queue_examples = 100 if FLAGS.debug else 10000
    if shuffle:
        images = tf.train.shuffle_batch(
            image,
            batch_size=FLAGS.batch_size,
            num_threads=num_preprocess_threads,
            capacity=min_queue_examples + 3 * FLAGS.batch_size,
            min_after_dequeue=min_queue_examples)
    else:
        images = tf.train.batch(
            image,
            FLAGS.batch_size,
            allow_smaller_final_batch=True)
    # tf.image_summary('images', images, max_images=8)
    return dict(batch=images, size=len(files)) 
开发者ID:marcbelmont,项目名称:gan-image-similarity,代码行数:27,代码来源:zap50k.py

示例4: get_input

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import image_summary [as 别名]
def get_input(self):
        # Input data.
        # Load the training, validation and test data into constants that are
        # attached to the graph.
        self.mnist = input_data.read_data_sets('data',
                                    one_hot=True,
                                    fake_data=False)
        # Input placehoolders
        with tf.name_scope('input'):
            self.x = tf.placeholder(tf.float32, [None, 784], name='x-input')
            self.y_true = tf.placeholder(tf.float32, [None, 10], name='y-input')
        self.keep_prob = tf.placeholder(tf.float32, name='drop_out')
        # below is just for the sake of visualization
        with tf.name_scope('input_reshape'):
            image_shaped_input = tf.reshape(self.x, [-1, 28, 28, 1])
            tf.image_summary('input', image_shaped_input, 10)
        
        return 
开发者ID:LevinJ,项目名称:Supply-demand-forecasting,代码行数:20,代码来源:tfbasemodel.py

示例5: preprocess_for_eval

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import image_summary [as 别名]
def preprocess_for_eval(image, output_height, output_width):
  """Preprocesses the given image for evaluation.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.

  Returns:
    A preprocessed image.
  """
  tf.image_summary('image', tf.expand_dims(image, 0))
  # Transform the image to floats.
  image = tf.to_float(image)

  # Resize and crop if needed.
  resized_image = tf.image.resize_image_with_crop_or_pad(image,
                                                         output_width,
                                                         output_height)
  tf.image_summary('resized_image', tf.expand_dims(resized_image, 0))

  # Subtract off the mean and divide by the variance of the pixels.
  return tf.image.per_image_whitening(resized_image) 
开发者ID:coderSkyChen,项目名称:Action_Recognition_Zoo,代码行数:25,代码来源:cifarnet_preprocessing.py

示例6: inputs

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import image_summary [as 别名]
def inputs():
    data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
    filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)]
    for f in filenames:
        if not tf.gfile.Exists(f):
            raise ValueError('Failed to find file: ' + f)

    # Create a queue that produces the filenames to read.
    filename_queue = tf.train.string_input_producer(filenames)

    # Read examples from files in the filename queue.
    read_input = read_cifar10(filename_queue)
    num_preprocess_threads = 16
    min_queue_examples = int(0.4 * NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN)
    input_images, ref_images = tf.train.shuffle_batch([read_input.noise_image, read_input.uint8image],
                                                      batch_size=FLAGS.batch_size, num_threads=num_preprocess_threads,
                                                      capacity=min_queue_examples + 3 * FLAGS.batch_size,
                                                      min_after_dequeue=min_queue_examples)
    tf.image_summary("Input_Noise_images", input_images)
    tf.image_summary("Ref_images", ref_images)
    return input_images, ref_images 
开发者ID:shekkizh,项目名称:TensorflowProjects,代码行数:23,代码来源:Deblurring.py

示例7: visualize_one_superimage

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import image_summary [as 别名]
def visualize_one_superimage(self, img_var, images, rows, filename):
        stacked_img = []
        for row in range(rows):
            img = images[row * rows, :, :, :]
            row_img = [img]  # real image
            for col in range(rows):
                row_img.append(img_var[row * rows + col, :, :, :])
            # each rows is 1realimage +10_fakeimage
            stacked_img.append(tf.concat(1, row_img))
        imgs = tf.expand_dims(tf.concat(0, stacked_img), 0)
        current_img_summary = tf.image_summary(filename, imgs)
        return current_img_summary, imgs 
开发者ID:hanzhanggit,项目名称:StackGAN,代码行数:14,代码来源:trainer.py

示例8: visualization

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import image_summary [as 别名]
def visualization(self, n):
        fake_sum_train, superimage_train = \
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test = \
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test]) 
开发者ID:hanzhanggit,项目名称:StackGAN,代码行数:13,代码来源:trainer.py

示例9: epoch_sum_images

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import image_summary [as 别名]
def epoch_sum_images(self, sess, n):
        images_train, _, embeddings_train, captions_train, _ =\
            self.dataset.train.next_batch(n * n, cfg.TRAIN.NUM_EMBEDDING)
        images_train = self.preprocess(images_train, n)
        embeddings_train = self.preprocess(embeddings_train, n)

        images_test, _, embeddings_test, captions_test, _ = \
            self.dataset.test.next_batch(n * n, 1)
        images_test = self.preprocess(images_test, n)
        embeddings_test = self.preprocess(embeddings_test, n)

        images = np.concatenate([images_train, images_test], axis=0)
        embeddings =\
            np.concatenate([embeddings_train, embeddings_test], axis=0)

        if self.batch_size > 2 * n * n:
            images_pad, _, embeddings_pad, _, _ =\
                self.dataset.test.next_batch(self.batch_size - 2 * n * n, 1)
            images = np.concatenate([images, images_pad], axis=0)
            embeddings = np.concatenate([embeddings, embeddings_pad], axis=0)
        feed_dict = {self.images: images,
                     self.embeddings: embeddings}
        gen_samples, img_summary =\
            sess.run([self.superimages, self.image_summary], feed_dict)

        # save images generated for train and test captions
        scipy.misc.imsave('%s/train.jpg' % (self.log_dir), gen_samples[0])
        scipy.misc.imsave('%s/test.jpg' % (self.log_dir), gen_samples[1])

        # pfi_train = open(self.log_dir + "/train.txt", "w")
        pfi_test = open(self.log_dir + "/test.txt", "w")
        for row in range(n):
            # pfi_train.write('\n***row %d***\n' % row)
            # pfi_train.write(captions_train[row * n])

            pfi_test.write('\n***row %d***\n' % row)
            pfi_test.write(captions_test[row * n])
        # pfi_train.close()
        pfi_test.close()

        return img_summary 
开发者ID:hanzhanggit,项目名称:StackGAN,代码行数:43,代码来源:trainer.py

示例10: _generate_image_and_label_batch

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import image_summary [as 别名]
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size):
  """Construct a queued batch of images and labels.

  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.

  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  images, label_batch = tf.train.shuffle_batch(
      [image, label],
      batch_size=batch_size,
      num_threads=num_preprocess_threads,
      capacity=min_queue_examples + 3 * batch_size,
      min_after_dequeue=min_queue_examples)

  # Display the training images in the visualizer.
  # tf.image_summary('images', images)
  tf.summary.image('images', images)

  return images, tf.reshape(label_batch, [batch_size]) 
开发者ID:hohoins,项目名称:ml,代码行数:32,代码来源:cifar10_input.py

示例11: _generate_image_and_label_batch

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import image_summary [as 别名]
def _generate_image_and_label_batch(image, label, key, min_queue_examples,
                                    batch_size):
  """Construct a queued batch of images and labels.

  Args:
    image: 3-D Tensor of [height, width, 1] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.

  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 1] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  images, label_batch, key_batch = tf.train.shuffle_batch(
      [image, label, key],
      batch_size=batch_size,
      num_threads=num_preprocess_threads,
      capacity=min_queue_examples + 3 * batch_size,
      min_after_dequeue=min_queue_examples)

  # Display the training images in the visualizer.
  tf.image_summary('images', images)

  return images, tf.reshape(label_batch, [batch_size]), tf.reshape(key_batch, [batch_size]) 
开发者ID:twerkmeister,项目名称:iLID,代码行数:31,代码来源:image_input.py

示例12: set_activation_summary

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import image_summary [as 别名]
def set_activation_summary(self):
        '''Log each layers activations and sparsity.'''
        tf.image_summary("input images", self.input_layer.output, max_images=100)

        for var in tf.trainable_variables():
            tf.histogram_summary(var.op.name, var)

        for layer in self.hidden_layers:
            tf.histogram_summary(layer.name + '/activations', layer.output)
            tf.scalar_summary(layer.name + '/sparsity', tf.nn.zero_fraction(layer.output)) 
开发者ID:twerkmeister,项目名称:iLID,代码行数:12,代码来源:network.py

示例13: testImageSummary

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import image_summary [as 别名]
def testImageSummary(self):
    image = np.zeros((2, 2, 2, 3), dtype=np.uint8)
    self.check(tf.image_summary, (['img'], image), 'Tags must be a scalar') 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:5,代码来源:scalar_strict_test.py

示例14: testTFSummaryImage

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import image_summary [as 别名]
def testTFSummaryImage(self):
    """Verify processing of tf.summary.image."""
    event_sink = _EventGenerator(zero_out_timestamps=True)
    writer = SummaryToEventTransformer(event_sink)
    with self.test_session() as sess:
      ipt = tf.ones([10, 4, 4, 3], tf.uint8)
      # This is an interesting example, because the old tf.image_summary op
      # would throw an error here, because it would be tag reuse.
      # Using the tf node name instead allows argument re-use to the image
      # summary.
      with tf.name_scope('1'):
        tf.summary.image('images', ipt, max_outputs=1)
      with tf.name_scope('2'):
        tf.summary.image('images', ipt, max_outputs=2)
      with tf.name_scope('3'):
        tf.summary.image('images', ipt, max_outputs=3)
      merged = tf.merge_all_summaries()
      writer.add_graph(sess.graph)
      for i in xrange(10):
        summ = sess.run(merged)
        writer.add_summary(summ, global_step=i)

    accumulator = ea.EventAccumulator(event_sink)
    accumulator.Reload()

    tags = [
        u'1/images/image', u'2/images/image/0', u'2/images/image/1',
        u'3/images/image/0', u'3/images/image/1', u'3/images/image/2'
    ]

    self.assertTagsEqual(accumulator.Tags(), {
        ea.IMAGES: tags,
        ea.AUDIO: [],
        ea.SCALARS: [],
        ea.HISTOGRAMS: [],
        ea.COMPRESSED_HISTOGRAMS: [],
        ea.GRAPH: True,
        ea.META_GRAPH: False,
        ea.RUN_METADATA: []
    }) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:42,代码来源:event_accumulator_test.py

示例15: _generate_image_and_label_batch

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import image_summary [as 别名]
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size, shuffle):
  """Construct a queued batch of images and labels.

  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.
    shuffle: boolean indicating whether to use a shuffling queue.

  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  if shuffle:
    images, label_batch = tf.train.shuffle_batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size,
        min_after_dequeue=min_queue_examples)
  else:
    images, label_batch = tf.train.batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size)

  # Display the training images in the visualizer.
  tf.image_summary('images', images)

  return images, tf.reshape(label_batch, [batch_size]) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:39,代码来源:cifar10_input.py


注:本文中的tensorflow.image_summary方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。