当前位置: 首页>>代码示例>>Python>>正文


Python TensorflowUtils.unprocess_image方法代码示例

本文整理汇总了Python中TensorflowUtils.unprocess_image方法的典型用法代码示例。如果您正苦于以下问题:Python TensorflowUtils.unprocess_image方法的具体用法?Python TensorflowUtils.unprocess_image怎么用?Python TensorflowUtils.unprocess_image使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在TensorflowUtils的用法示例。


在下文中一共展示了TensorflowUtils.unprocess_image方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import unprocess_image [as 别名]
def main(argv=None):
    utils.maybe_download_and_extract(FLAGS.model_dir, DATA_URL)
    model_data = get_model_data()
    invert_image = get_image(FLAGS.image_path)
    print invert_image.shape

    mean = model_data['normalization'][0][0][0]
    mean_pixel = np.mean(mean, axis=(0, 1))

    processed_image = utils.process_image(invert_image, mean_pixel).astype(np.float32)
    weights = np.squeeze(model_data['layers'])

    invert_net = vgg_net(weights, processed_image)

    dummy_image = utils.weight_variable(invert_image.shape, stddev=np.std(invert_image) * 0.1)
    tf.histogram_summary("Image Output", dummy_image)
    image_net = vgg_net(weights, dummy_image)

    with tf.Session() as sess:
        invert_layer_features = invert_net[INVERT_LAYER].eval()
        loss = 2 * tf.nn.l2_loss(image_net[INVERT_LAYER] - invert_layer_features) / invert_layer_features.size
        tf.scalar_summary("Loss", loss)

        summary_op = tf.merge_all_summaries()
        train_op = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)

        best_loss = float('inf')
        best = None
        summary_writer = tf.train.SummaryWriter(FLAGS.log_dir)
        sess.run(tf.initialize_all_variables())

        for i in range(1, MAX_ITERATIONS):
            train_op.run()

            if i % 10 == 0 or i == MAX_ITERATIONS - 1:
                this_loss = loss.eval()
                print('Step %d' % (i)),
                print('    total loss: %g' % this_loss)
                summary_writer.add_summary(summary_op.eval(), global_step=i)
                if this_loss < best_loss:
                    best_loss = this_loss
                    best = dummy_image.eval()
                    output = utils.unprocess_image(best.reshape(invert_image.shape[1:]), mean_pixel)
                    scipy.misc.imsave("invert_check.png", output)

    output = utils.unprocess_image(best.reshape(invert_image.shape[1:]), mean_pixel)
    scipy.misc.imsave("output.png", output)
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:49,代码来源:ImageInversion.py

示例2: test

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import unprocess_image [as 别名]
def test(sess, output_image, mean_pixel):
    best = sess.run(output_image)
    output = utils.unprocess_image(best.reshape(best.shape[1:]), mean_pixel).astype(np.float32)
    scipy.misc.imsave("output.jpg", output)
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:6,代码来源:GenerativeNeuralStyle.py

示例3: save_image

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import unprocess_image [as 别名]
def save_image(filename, image, mean_pixel):
    output = utils.unprocess_image(image, mean_pixel)
    output = np.uint8(np.clip(output, 0, 255))
    scipy.misc.imsave(filename, output)
    print "Image saved!"
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:7,代码来源:DeepDream.py

示例4: main

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import unprocess_image [as 别名]
def main(argv=None):
    utils.maybe_download_and_extract(FLAGS.model_dir, DATA_URL)
    model_data = get_model_data()

    mean = model_data['normalization'][0][0][0]
    mean_pixel = np.mean(mean, axis=(0, 1))

    weights = np.squeeze(model_data['layers'])

    content_image = get_image(FLAGS.content_path)
    print content_image.shape
    processed_content = utils.process_image(content_image, mean_pixel).astype(np.float32)
    style_image = get_image(FLAGS.style_path)
    processed_style = utils.process_image(style_image, mean_pixel).astype(np.float32)

    content_net = vgg_net(weights, processed_content)

    style_net = vgg_net(weights, processed_style)

    dummy_image = utils.weight_variable(content_image.shape, stddev=np.std(content_image) * 0.1)
    image_net = vgg_net(weights, dummy_image)

    with tf.Session() as sess:
        content_losses = []
        for layer in CONTENT_LAYERS:
            feature = content_net[layer].eval()
            content_losses.append(tf.nn.l2_loss(image_net[layer] - feature))
        content_loss = CONTENT_WEIGHT * reduce(tf.add, content_losses)

        style_losses = []
        for layer in STYLE_LAYERS:
            features = style_net[layer].eval()
            features = np.reshape(features, (-1, features.shape[3]))
            style_gram = np.matmul(features.T, features) / features.size

            image_layer = image_net[layer]
            _, height, width, number = map(lambda i: i.value, image_layer.get_shape())
            size = height * width * number
            feats = tf.reshape(image_layer, (-1, number))
            image_gram = tf.matmul(tf.transpose(feats), feats) / size
            style_losses.append(0.5*tf.nn.l2_loss(image_gram - style_gram))
        style_loss = STYLE_WEIGHT * reduce(tf.add, style_losses)

        tv_y_size = utils.get_tensor_size(dummy_image[:, 1:, :, :])
        tv_x_size = utils.get_tensor_size(dummy_image[:, :, 1:, :])
        tv_loss = VARIATION_WEIGHT * (
            (tf.nn.l2_loss(dummy_image[:, 1:, :, :] - dummy_image[:, :content_image.shape[1] - 1, :, :]) /
             tv_y_size) +
           (tf.nn.l2_loss(dummy_image[:, :, 1:, :] - dummy_image[:, :, :content_image.shape[2] - 1, :]) /
             tv_x_size))

        loss = content_loss + style_loss + tv_loss
        train_step = tf.train.MomentumOptimizer(LEARNING_RATE,MOMENTUM).minimize(loss)

        best_loss = float('inf')
        best = None
        sess.run(tf.initialize_all_variables())

        for i in range(1, MAX_ITERATIONS):
            train_step.run()

            if i % 10 == 0 or i == MAX_ITERATIONS - 1:
                this_loss = loss.eval()
                print('Step %d' % (i)),
                print('    total loss: %g' % this_loss)

                if this_loss < best_loss:
                    best_loss = this_loss
                    best = dummy_image.eval()
                    output = utils.unprocess_image(best.reshape(content_image.shape[1:]), mean_pixel)
                    scipy.misc.imsave("output_check.png", output)

            if i % 100 == 0 or i == MAX_ITERATIONS - 1:
                print('  content loss: %g' % content_loss.eval()),
                print('    style loss: %g' % style_loss.eval()),
                print('       tv loss: %g' % tv_loss.eval())

    output = utils.unprocess_image(best.reshape(content_image.shape[1:]), mean_pixel)
    scipy.misc.imsave("output.png", output)
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:81,代码来源:NeuralStyle.py


注:本文中的TensorflowUtils.unprocess_image方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。