当前位置: 首页>>代码示例>>Python>>正文


Python TensorflowUtils.process_image方法代码示例

本文整理汇总了Python中TensorflowUtils.process_image方法的典型用法代码示例。如果您正苦于以下问题:Python TensorflowUtils.process_image方法的具体用法?Python TensorflowUtils.process_image怎么用?Python TensorflowUtils.process_image使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在TensorflowUtils的用法示例。


在下文中一共展示了TensorflowUtils.process_image方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: read_input

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import process_image [as 别名]
def read_input(model_params):
    if FLAGS.mode == "test":
        content_image = get_image(FLAGS.test_image_path)
        print content_image.shape
        processed_content = utils.process_image(content_image, model_params["mean_pixel"]).astype(np.float32) / 255.0
        return processed_content, None

    else:
        data_directory = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
        filenames = [os.path.join(data_directory, 'data_batch_%d.bin' % i) for i in xrange(1, 6)]
        for f in filenames:
            if not tf.gfile.Exists(f):
                raise ValueError('Failed to find file: ' + f)

        filename_queue = tf.train.string_input_producer(filenames)
        print "Reading cifar10 data"
        read_input = read_cifar10(model_params, filename_queue)
        num_preprocess_threads = 8
        min_queue_examples = int(0.4 * NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN)
        print "Shuffling train batch"
        input_images, input_content_features = tf.train.shuffle_batch([read_input.image, read_input.content_features],
                                                                      batch_size=FLAGS.batch_size,
                                                                      num_threads=num_preprocess_threads,
                                                                      capacity=min_queue_examples + 3 * FLAGS.batch_size,
                                                                      min_after_dequeue=min_queue_examples)
        return input_images, input_content_features
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:28,代码来源:GenerativeNeuralStyle.py

示例2: read_cifar10

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import process_image [as 别名]
def read_cifar10(model_params, filename_queue):
    class CIFAR10Record(object):
        pass

    result = CIFAR10Record()

    label_bytes = 1  # 2 for CIFAR-100
    result.height = IMAGE_SIZE
    result.width = IMAGE_SIZE
    result.depth = 3
    image_bytes = result.height * result.width * result.depth
    record_bytes = label_bytes + image_bytes

    reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
    result.key, value = reader.read(filename_queue)

    record_bytes = tf.decode_raw(value, tf.uint8)

    depth_major = tf.cast(tf.reshape(tf.slice(record_bytes, [label_bytes], [image_bytes]),
                                     [result.depth, result.height, result.width]), tf.float32)

    result.image = utils.process_image(tf.transpose(depth_major, [1, 2, 0]), model_params['mean_pixel']) / 255.0
    extended_image = 255 * tf.reshape(result.image, (1, result.height, result.width, result.depth))

    result.net = vgg_net(model_params["weights"], extended_image)
    content_feature = result.net[CONTENT_LAYER]
    result.content_features = content_feature
    return result
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:30,代码来源:GenerativeNeuralStyle.py

示例3: deepdream_image

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import process_image [as 别名]
def deepdream_image(model_params, image, octave_scale=1.4, no_of_octave=4):
    filename = "%s_deepdream_%s.jpg" % (os.path.splitext((FLAGS.image_path.split("/")[-1]))[0], DREAM_LAYER)

    processed_image = utils.process_image(image, model_params["mean_pixel"]).astype(np.float32)
    input_image = tf.placeholder(tf.float32)
    dream_net = vgg_net(model_params["weights"], input_image)

    def calc_grad_tiled(img, gradient, tile_size=512):
        sz = tile_size
        h, w = img.shape[1:3]
        sx, sy = np.random.randint(sz, size=2)
        img_shift = np.roll(np.roll(img, sx, 2), sy, 1)
        gradient_val = np.zeros_like(img)
        for y in xrange(0, max(h - sz // 2, sz), sz):
            for x in xrange(0, max(w - sz // 2, sz), sz):
                sub_img = img_shift[:, y:y + sz, x:x + sz]
                # print sub_img.shape
                g = sess.run(gradient, {input_image: sub_img})
                gradient_val[:, y:y + sz, x:x + sz] = g

        return np.roll(np.roll(gradient_val, -sx, 2), -sy, 1)

    step = LEARNING_RATE
    feature = DREAM_FEATURE
    with tf.Session() as sess:
        dream_layer_features = dream_net[DREAM_LAYER][:, :, :, feature]
        feature_score = tf.reduce_mean(dream_layer_features)
        grad_op = tf.gradients(feature_score, input_image)[0]

        dummy_image = processed_image.copy()+100.0
        for itr in xrange(5):
            octaves = []
            for i in xrange(no_of_octave - 1):
                hw = dummy_image.shape[1:3]
                lo = resize_image(dummy_image, np.int32(np.float32(hw) / octave_scale))
                hi = dummy_image - resize_image(dummy_image, hw)
                dummy_image = lo
                octaves.append(hi)

            for octave in xrange(no_of_octave):
                if octave > 0:
                    hi = octaves[-octave]
                    dummy_image = resize_image(dummy_image, hi.shape[1:3]) + hi
                for i in xrange(MAX_ITERATIONS):
                    grad = calc_grad_tiled(dummy_image, grad_op)
                    dummy_image += grad * (step / (np.abs(grad).mean() + 1e-8))
                    print '.',
                print "."

            # step /= 2.0  # halfing step size every itr
            feature += 15
            temp_file = "%d_%s" % (itr, filename)
            # print dummy_image.shape
            output = dummy_image.reshape(processed_image.shape[1:]) - 100.0
            save_image(os.path.join(FLAGS.logs_dir, "checkpoints", temp_file), output, model_params["mean_pixel"])
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:57,代码来源:DeepDream.py

示例4: main

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import process_image [as 别名]
def main(argv=None):
    utils.maybe_download_and_extract(FLAGS.model_dir, DATA_URL)
    model_data = get_model_data()
    invert_image = get_image(FLAGS.image_path)
    print invert_image.shape

    mean = model_data['normalization'][0][0][0]
    mean_pixel = np.mean(mean, axis=(0, 1))

    processed_image = utils.process_image(invert_image, mean_pixel).astype(np.float32)
    weights = np.squeeze(model_data['layers'])

    invert_net = vgg_net(weights, processed_image)

    dummy_image = utils.weight_variable(invert_image.shape, stddev=np.std(invert_image) * 0.1)
    tf.histogram_summary("Image Output", dummy_image)
    image_net = vgg_net(weights, dummy_image)

    with tf.Session() as sess:
        invert_layer_features = invert_net[INVERT_LAYER].eval()
        loss = 2 * tf.nn.l2_loss(image_net[INVERT_LAYER] - invert_layer_features) / invert_layer_features.size
        tf.scalar_summary("Loss", loss)

        summary_op = tf.merge_all_summaries()
        train_op = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)

        best_loss = float('inf')
        best = None
        summary_writer = tf.train.SummaryWriter(FLAGS.log_dir)
        sess.run(tf.initialize_all_variables())

        for i in range(1, MAX_ITERATIONS):
            train_op.run()

            if i % 10 == 0 or i == MAX_ITERATIONS - 1:
                this_loss = loss.eval()
                print('Step %d' % (i)),
                print('    total loss: %g' % this_loss)
                summary_writer.add_summary(summary_op.eval(), global_step=i)
                if this_loss < best_loss:
                    best_loss = this_loss
                    best = dummy_image.eval()
                    output = utils.unprocess_image(best.reshape(invert_image.shape[1:]), mean_pixel)
                    scipy.misc.imsave("invert_check.png", output)

    output = utils.unprocess_image(best.reshape(invert_image.shape[1:]), mean_pixel)
    scipy.misc.imsave("output.png", output)
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:49,代码来源:ImageInversion.py

示例5: main

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import process_image [as 别名]
def main(argv=None):
    utils.maybe_download_and_extract(FLAGS.model_dir, MODEL_URL)
    utils.maybe_download_and_extract(FLAGS.data_dir, DATA_URL, is_tarfile=True)
    model_data = get_model_data()
    model_params = {}

    mean = model_data['normalization'][0][0][0]
    model_params['mean_pixel'] = np.mean(mean, axis=(0, 1))

    model_params['weights'] = np.squeeze(model_data['layers'])

    style_image = get_image(FLAGS.style_path)
    processed_style = utils.process_image(style_image, model_params['mean_pixel']).astype(np.float32)
    style_net = vgg_net(model_params['weights'], processed_style)
    tf.image_summary("Style_Image", style_image)

    with tf.Session() as sess:
        print "Evaluating style features..."
        style_features = {}
        for layer in STYLE_LAYERS:
            features = style_net[layer].eval()
            features = np.reshape(features, (-1, features.shape[3]))
            style_gram = np.matmul(features.T, features) / features.size
            style_features[layer] = style_gram

        print "Reading image inputs"
        input_image, input_content = read_input(model_params)

        print "Setting up inference"
        output_image = 255 * inference_strided(input_image)

        print "Creating saver.."
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(FLAGS.log_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            print "Model restored..."

        if FLAGS.mode == "test":
            test(sess, output_image, model_params['mean_pixel'])
            return

        print "Calculating content loss..."
        image_net = vgg_net(model_params['weights'], output_image)
        content_loss = CONTENT_WEIGHT * tf.nn.l2_loss(image_net[CONTENT_LAYER] - input_content) / utils.get_tensor_size(
            input_content)
        print content_loss.get_shape()
        tf.scalar_summary("Content_loss", content_loss)

        print "Calculating style loss..."
        style_losses = []
        for layer in STYLE_LAYERS:
            image_layer = image_net[layer]
            _, height, width, number = map(lambda i: i.value, image_layer.get_shape())
            size = height * width * number
            feats = tf.reshape(image_layer, (-1, number))
            image_gram = tf.matmul(tf.transpose(feats), feats) / size
            style_losses.append(0.5 * tf.nn.l2_loss(image_gram - style_features[layer]))

        style_loss = STYLE_WEIGHT * reduce(tf.add, style_losses)
        print style_loss.get_shape()
        tf.scalar_summary("Style_loss", style_loss)

        print "Calculating variational loss..."
        tv_y_size = utils.get_tensor_size(output_image[:, 1:, :, :])
        tv_x_size = utils.get_tensor_size(output_image[:, :, 1:, :])
        tv_loss = VARIATION_WEIGHT * (
            (tf.nn.l2_loss(output_image[:, 1:, :, :] - output_image[:, :IMAGE_SIZE - 1, :, :]) /
             tv_y_size) +
            (tf.nn.l2_loss(output_image[:, :, 1:, :] - output_image[:, :, :IMAGE_SIZE - 1, :]) /
             tv_x_size))
        print tv_loss.get_shape()
        tf.scalar_summary("Variation_loss", tv_loss)

        loss = content_loss + style_loss + tv_loss
        tf.scalar_summary("Total_loss", loss)
        print "Setting up train operation..."
        train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)

        print "Setting up summary write"
        summary_writer = tf.train.SummaryWriter(FLAGS.log_dir, sess.graph_def)
        summary_op = tf.merge_all_summaries()

        print "initializing all variables"
        sess.run(tf.initialize_all_variables())

        tf.train.start_queue_runners(sess=sess)
        print "Running training..."

        for step in range(MAX_ITERATIONS):

            if step % 10 == 0:
                this_loss, summary_str = sess.run([loss, summary_op])
                summary_writer.add_summary(summary_str, global_step=step)

                print('%s : Step %d' % (datetime.now(), step)),
                print('total loss: %g' % this_loss)

            if step % 100 == 0:
                print ("Step %d" % step),
#.........这里部分代码省略.........
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:103,代码来源:GenerativeNeuralStyle.py

示例6: inference

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import process_image [as 别名]
def inference(image, keep_prob):
    """
    Semantic segmentation network definition
    :param image: input image. Should have values in range 0-255
    :param keep_prob:
    :return:
    """
    print("setting up vgg initialized conv layers ...")
    model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)

    mean = model_data['normalization'][0][0][0]
    mean_pixel = np.mean(mean, axis=(0, 1))

    weights = np.squeeze(model_data['layers'])

    processed_image = utils.process_image(image, mean_pixel)

    with tf.variable_scope("inference"):
        image_net = vgg_net(weights, processed_image)
        conv_final_layer = image_net["conv5_3"]

        pool5 = utils.max_pool_2x2(conv_final_layer)

        W6 = utils.weight_variable([7, 7, 512, 4096], name="W6")
        b6 = utils.bias_variable([4096], name="b6")
        conv6 = utils.conv2d_basic(pool5, W6, b6)
        relu6 = tf.nn.relu(conv6, name="relu6")
        if FLAGS.debug:
            utils.add_activation_summary(relu6)
        relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)

        W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7")
        b7 = utils.bias_variable([4096], name="b7")
        conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)
        relu7 = tf.nn.relu(conv7, name="relu7")
        if FLAGS.debug:
            utils.add_activation_summary(relu7)
        relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)

        W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSESS], name="W8")
        b8 = utils.bias_variable([NUM_OF_CLASSESS], name="b8")
        conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)
        # annotation_pred1 = tf.argmax(conv8, dimension=3, name="prediction1")

        # now to upscale to actual image size
        deconv_shape1 = image_net["pool4"].get_shape()
        W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name="W_t1")
        b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
        conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape(image_net["pool4"]))
        fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1")

        deconv_shape2 = image_net["pool3"].get_shape()
        W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2")
        b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
        conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net["pool3"]))
        fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2")

        shape = tf.shape(image)
        deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSESS])
        W_t3 = utils.weight_variable([16, 16, NUM_OF_CLASSESS, deconv_shape2[3].value], name="W_t3")
        b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name="b_t3")
        conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8)

        annotation_pred = tf.argmax(conv_t3, dimension=3, name="prediction")

    return tf.expand_dims(annotation_pred, dim=3), conv_t3
开发者ID:sdjsngs,项目名称:FCN.tensorflow,代码行数:68,代码来源:FCN.py

示例7: main

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import process_image [as 别名]
def main(argv=None):
    utils.maybe_download_and_extract(FLAGS.model_dir, DATA_URL)
    model_data = get_model_data()

    mean = model_data['normalization'][0][0][0]
    mean_pixel = np.mean(mean, axis=(0, 1))

    weights = np.squeeze(model_data['layers'])

    content_image = get_image(FLAGS.content_path)
    print content_image.shape
    processed_content = utils.process_image(content_image, mean_pixel).astype(np.float32)
    style_image = get_image(FLAGS.style_path)
    processed_style = utils.process_image(style_image, mean_pixel).astype(np.float32)

    content_net = vgg_net(weights, processed_content)

    style_net = vgg_net(weights, processed_style)

    dummy_image = utils.weight_variable(content_image.shape, stddev=np.std(content_image) * 0.1)
    image_net = vgg_net(weights, dummy_image)

    with tf.Session() as sess:
        content_losses = []
        for layer in CONTENT_LAYERS:
            feature = content_net[layer].eval()
            content_losses.append(tf.nn.l2_loss(image_net[layer] - feature))
        content_loss = CONTENT_WEIGHT * reduce(tf.add, content_losses)

        style_losses = []
        for layer in STYLE_LAYERS:
            features = style_net[layer].eval()
            features = np.reshape(features, (-1, features.shape[3]))
            style_gram = np.matmul(features.T, features) / features.size

            image_layer = image_net[layer]
            _, height, width, number = map(lambda i: i.value, image_layer.get_shape())
            size = height * width * number
            feats = tf.reshape(image_layer, (-1, number))
            image_gram = tf.matmul(tf.transpose(feats), feats) / size
            style_losses.append(0.5*tf.nn.l2_loss(image_gram - style_gram))
        style_loss = STYLE_WEIGHT * reduce(tf.add, style_losses)

        tv_y_size = utils.get_tensor_size(dummy_image[:, 1:, :, :])
        tv_x_size = utils.get_tensor_size(dummy_image[:, :, 1:, :])
        tv_loss = VARIATION_WEIGHT * (
            (tf.nn.l2_loss(dummy_image[:, 1:, :, :] - dummy_image[:, :content_image.shape[1] - 1, :, :]) /
             tv_y_size) +
           (tf.nn.l2_loss(dummy_image[:, :, 1:, :] - dummy_image[:, :, :content_image.shape[2] - 1, :]) /
             tv_x_size))

        loss = content_loss + style_loss + tv_loss
        train_step = tf.train.MomentumOptimizer(LEARNING_RATE,MOMENTUM).minimize(loss)

        best_loss = float('inf')
        best = None
        sess.run(tf.initialize_all_variables())

        for i in range(1, MAX_ITERATIONS):
            train_step.run()

            if i % 10 == 0 or i == MAX_ITERATIONS - 1:
                this_loss = loss.eval()
                print('Step %d' % (i)),
                print('    total loss: %g' % this_loss)

                if this_loss < best_loss:
                    best_loss = this_loss
                    best = dummy_image.eval()
                    output = utils.unprocess_image(best.reshape(content_image.shape[1:]), mean_pixel)
                    scipy.misc.imsave("output_check.png", output)

            if i % 100 == 0 or i == MAX_ITERATIONS - 1:
                print('  content loss: %g' % content_loss.eval()),
                print('    style loss: %g' % style_loss.eval()),
                print('       tv loss: %g' % tv_loss.eval())

    output = utils.unprocess_image(best.reshape(content_image.shape[1:]), mean_pixel)
    scipy.misc.imsave("output.png", output)
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:81,代码来源:NeuralStyle.py


注:本文中的TensorflowUtils.process_image方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。