当前位置: 首页>>代码示例>>Python>>正文


Python TensorflowUtils.maybe_download_and_extract方法代码示例

本文整理汇总了Python中TensorflowUtils.maybe_download_and_extract方法的典型用法代码示例。如果您正苦于以下问题:Python TensorflowUtils.maybe_download_and_extract方法的具体用法?Python TensorflowUtils.maybe_download_and_extract怎么用?Python TensorflowUtils.maybe_download_and_extract使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在TensorflowUtils的用法示例。


在下文中一共展示了TensorflowUtils.maybe_download_and_extract方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: read_dataset

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import maybe_download_and_extract [as 别名]
def read_dataset(data_dir):
    pickle_filename = "flowers_data.pickle"
    pickle_filepath = os.path.join(data_dir, pickle_filename)
    if not os.path.exists(pickle_filepath):
        utils.maybe_download_and_extract(data_dir, DATA_URL, is_tarfile=True)
        flower_folder = os.path.splitext(DATA_URL.split("/")[-1])[0]
        result = create_image_lists(os.path.join(data_dir, flower_folder))
        print "Training set: %d" % len(result['train'])
        print "Test set: %d" % len(result['test'])
        print "Validation set: %d" % len(result['validation'])
        print "Pickling ..."
        with open(pickle_filepath, 'wb') as f:
            pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)
    else:
        print "Found pickle file!"

    with open(pickle_filepath, 'rb') as f:
        result = pickle.load(f)
        training_images = result['train']
        testing_images = result['test']
        validation_images = result['validation']

        del result

    print ("Training: %d, Validation: %d, Test: %d" % (
        len(training_images), len(validation_images), len(testing_images)))
    return training_images, testing_images, validation_images
开发者ID:shekkizh,项目名称:TensorflowProjects,代码行数:29,代码来源:read_FlowersDataset.py

示例2: main

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import maybe_download_and_extract [as 别名]
def main(argv=None):
    utils.maybe_download_and_extract(FLAGS.model_dir, DATA_URL)
    model_data = get_model_data()
    model_params = {}
    mean = model_data['normalization'][0][0][0]
    model_params["mean_pixel"] = np.mean(mean, axis=(0, 1))
    model_params["weights"] = np.squeeze(model_data['layers'])
    visualize_layer(model_params)
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:10,代码来源:LayerVisualization.py

示例3: main

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import maybe_download_and_extract [as 别名]
def main(argv=None):
    utils.maybe_download_and_extract(FLAGS.model_dir, DATA_URL)
    model_data = get_model_data()
    dream_image = get_image(FLAGS.image_path)
    # dream_image = np.random.uniform(size=(1, 300, 300, 3)) + 100.0
    print dream_image.shape

    model_params = {}
    mean = model_data['normalization'][0][0][0]
    model_params["mean_pixel"] = np.mean(mean, axis=(0, 1))
    model_params["weights"] = np.squeeze(model_data['layers'])
    deepdream_image(model_params, dream_image, no_of_octave=3)
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:14,代码来源:DeepDream.py

示例4: main

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import maybe_download_and_extract [as 别名]
def main(argv=None):
    utils.maybe_download_and_extract(FLAGS.model_dir, DATA_URL)
    model_data = get_model_data()
    invert_image = get_image(FLAGS.image_path)
    print invert_image.shape

    mean = model_data['normalization'][0][0][0]
    mean_pixel = np.mean(mean, axis=(0, 1))

    processed_image = utils.process_image(invert_image, mean_pixel).astype(np.float32)
    weights = np.squeeze(model_data['layers'])

    invert_net = vgg_net(weights, processed_image)

    dummy_image = utils.weight_variable(invert_image.shape, stddev=np.std(invert_image) * 0.1)
    tf.histogram_summary("Image Output", dummy_image)
    image_net = vgg_net(weights, dummy_image)

    with tf.Session() as sess:
        invert_layer_features = invert_net[INVERT_LAYER].eval()
        loss = 2 * tf.nn.l2_loss(image_net[INVERT_LAYER] - invert_layer_features) / invert_layer_features.size
        tf.scalar_summary("Loss", loss)

        summary_op = tf.merge_all_summaries()
        train_op = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)

        best_loss = float('inf')
        best = None
        summary_writer = tf.train.SummaryWriter(FLAGS.log_dir)
        sess.run(tf.initialize_all_variables())

        for i in range(1, MAX_ITERATIONS):
            train_op.run()

            if i % 10 == 0 or i == MAX_ITERATIONS - 1:
                this_loss = loss.eval()
                print('Step %d' % (i)),
                print('    total loss: %g' % this_loss)
                summary_writer.add_summary(summary_op.eval(), global_step=i)
                if this_loss < best_loss:
                    best_loss = this_loss
                    best = dummy_image.eval()
                    output = utils.unprocess_image(best.reshape(invert_image.shape[1:]), mean_pixel)
                    scipy.misc.imsave("invert_check.png", output)

    output = utils.unprocess_image(best.reshape(invert_image.shape[1:]), mean_pixel)
    scipy.misc.imsave("output.png", output)
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:49,代码来源:ImageInversion.py

示例5: read_dataset

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import maybe_download_and_extract [as 别名]
def read_dataset(data_dir):
    pickle_filename = "MITSceneParsing.pickle"
    pickle_filepath = os.path.join(data_dir, pickle_filename)
    if not os.path.exists(pickle_filepath):
        utils.maybe_download_and_extract(data_dir, DATA_URL, is_zipfile=True)
        SceneParsing_folder = os.path.splitext(DATA_URL.split("/")[-1])[0]
        result = create_image_lists(os.path.join(data_dir, SceneParsing_folder))
        print ("Pickling ...")
        with open(pickle_filepath, 'wb') as f:
            pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)
    else:
        print ("Found pickle file!")

    with open(pickle_filepath, 'rb') as f:
        result = pickle.load(f)
        training_records = result['training']
        validation_records = result['validation']
        del result

    return training_records, validation_records
开发者ID:hephaex,项目名称:tensorflow_note,代码行数:22,代码来源:read_MITSceneParsingData.py

示例6: main

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import maybe_download_and_extract [as 别名]
def main(argv=None):
    utils.maybe_download_and_extract(FLAGS.data_dir, DATA_URL, is_tarfile=True)
    print "Setting up model..."
    global_step = tf.Variable(0, trainable=False)
    gray, color = inputs()
    pred = 255 * inference(gray) + 128
    tf.image_summary("Gray", gray, max_images=1)
    tf.image_summary("Ground_truth", color, max_images=1)
    tf.image_summary("Prediction", pred, max_images=1)

    image_loss = loss(pred, color)
    train_op = train(image_loss, global_step)

    summary_op = tf.merge_all_summaries()
    with tf.Session() as sess:
        print "Setting up summary writer, queue, saver..."
        sess.run(tf.initialize_all_variables())

        summary_writer = tf.train.SummaryWriter(FLAGS.logs_dir, sess.graph)
        saver = tf.train.Saver()

        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print "Restoring model from checkpoint..."
            saver.restore(sess, ckpt.model_checkpoint_path)
        tf.train.start_queue_runners(sess)
        for step in xrange(MAX_ITERATIONS):
            if step % 400 == 0:
                loss_val, summary_str = sess.run([image_loss, summary_op])
                print "Step %d, Loss: %g" % (step, loss_val)
                summary_writer.add_summary(summary_str, global_step=step)

            if step % 1000 == 0:
                saver.save(sess, FLAGS.logs_dir + "model.ckpt", global_step=step)
                print "%s" % datetime.now()

            sess.run(train_op)
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:39,代码来源:ImageColoring.py

示例7: main

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import maybe_download_and_extract [as 别名]
def main(argv=None):
    utils.maybe_download_and_extract(FLAGS.model_dir, MODEL_URL)
    utils.maybe_download_and_extract(FLAGS.data_dir, DATA_URL, is_tarfile=True)
    model_data = get_model_data()
    model_params = {}

    mean = model_data['normalization'][0][0][0]
    model_params['mean_pixel'] = np.mean(mean, axis=(0, 1))

    model_params['weights'] = np.squeeze(model_data['layers'])

    style_image = get_image(FLAGS.style_path)
    processed_style = utils.process_image(style_image, model_params['mean_pixel']).astype(np.float32)
    style_net = vgg_net(model_params['weights'], processed_style)
    tf.image_summary("Style_Image", style_image)

    with tf.Session() as sess:
        print "Evaluating style features..."
        style_features = {}
        for layer in STYLE_LAYERS:
            features = style_net[layer].eval()
            features = np.reshape(features, (-1, features.shape[3]))
            style_gram = np.matmul(features.T, features) / features.size
            style_features[layer] = style_gram

        print "Reading image inputs"
        input_image, input_content = read_input(model_params)

        print "Setting up inference"
        output_image = 255 * inference_strided(input_image)

        print "Creating saver.."
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(FLAGS.log_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            print "Model restored..."

        if FLAGS.mode == "test":
            test(sess, output_image, model_params['mean_pixel'])
            return

        print "Calculating content loss..."
        image_net = vgg_net(model_params['weights'], output_image)
        content_loss = CONTENT_WEIGHT * tf.nn.l2_loss(image_net[CONTENT_LAYER] - input_content) / utils.get_tensor_size(
            input_content)
        print content_loss.get_shape()
        tf.scalar_summary("Content_loss", content_loss)

        print "Calculating style loss..."
        style_losses = []
        for layer in STYLE_LAYERS:
            image_layer = image_net[layer]
            _, height, width, number = map(lambda i: i.value, image_layer.get_shape())
            size = height * width * number
            feats = tf.reshape(image_layer, (-1, number))
            image_gram = tf.matmul(tf.transpose(feats), feats) / size
            style_losses.append(0.5 * tf.nn.l2_loss(image_gram - style_features[layer]))

        style_loss = STYLE_WEIGHT * reduce(tf.add, style_losses)
        print style_loss.get_shape()
        tf.scalar_summary("Style_loss", style_loss)

        print "Calculating variational loss..."
        tv_y_size = utils.get_tensor_size(output_image[:, 1:, :, :])
        tv_x_size = utils.get_tensor_size(output_image[:, :, 1:, :])
        tv_loss = VARIATION_WEIGHT * (
            (tf.nn.l2_loss(output_image[:, 1:, :, :] - output_image[:, :IMAGE_SIZE - 1, :, :]) /
             tv_y_size) +
            (tf.nn.l2_loss(output_image[:, :, 1:, :] - output_image[:, :, :IMAGE_SIZE - 1, :]) /
             tv_x_size))
        print tv_loss.get_shape()
        tf.scalar_summary("Variation_loss", tv_loss)

        loss = content_loss + style_loss + tv_loss
        tf.scalar_summary("Total_loss", loss)
        print "Setting up train operation..."
        train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)

        print "Setting up summary write"
        summary_writer = tf.train.SummaryWriter(FLAGS.log_dir, sess.graph_def)
        summary_op = tf.merge_all_summaries()

        print "initializing all variables"
        sess.run(tf.initialize_all_variables())

        tf.train.start_queue_runners(sess=sess)
        print "Running training..."

        for step in range(MAX_ITERATIONS):

            if step % 10 == 0:
                this_loss, summary_str = sess.run([loss, summary_op])
                summary_writer.add_summary(summary_str, global_step=step)

                print('%s : Step %d' % (datetime.now(), step)),
                print('total loss: %g' % this_loss)

            if step % 100 == 0:
                print ("Step %d" % step),
#.........这里部分代码省略.........
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:103,代码来源:GenerativeNeuralStyle.py

示例8: read_caltech

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import maybe_download_and_extract [as 别名]
def read_caltech(data_dir):
    pickle_filename = "caltech.pickle"
    pickle_filepath = os.path.join(data_dir, pickle_filename)
    if not os.path.exists(pickle_filepath):
        utils.maybe_download_and_extract(data_dir)
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:7,代码来源:readCaltech101.py

示例9: main

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import maybe_download_and_extract [as 别名]
def main(argv=None):
    utils.maybe_download_and_extract(FLAGS.model_dir, DATA_URL)
    model_data = get_model_data()

    mean = model_data['normalization'][0][0][0]
    mean_pixel = np.mean(mean, axis=(0, 1))

    weights = np.squeeze(model_data['layers'])

    content_image = get_image(FLAGS.content_path)
    print content_image.shape
    processed_content = utils.process_image(content_image, mean_pixel).astype(np.float32)
    style_image = get_image(FLAGS.style_path)
    processed_style = utils.process_image(style_image, mean_pixel).astype(np.float32)

    content_net = vgg_net(weights, processed_content)

    style_net = vgg_net(weights, processed_style)

    dummy_image = utils.weight_variable(content_image.shape, stddev=np.std(content_image) * 0.1)
    image_net = vgg_net(weights, dummy_image)

    with tf.Session() as sess:
        content_losses = []
        for layer in CONTENT_LAYERS:
            feature = content_net[layer].eval()
            content_losses.append(tf.nn.l2_loss(image_net[layer] - feature))
        content_loss = CONTENT_WEIGHT * reduce(tf.add, content_losses)

        style_losses = []
        for layer in STYLE_LAYERS:
            features = style_net[layer].eval()
            features = np.reshape(features, (-1, features.shape[3]))
            style_gram = np.matmul(features.T, features) / features.size

            image_layer = image_net[layer]
            _, height, width, number = map(lambda i: i.value, image_layer.get_shape())
            size = height * width * number
            feats = tf.reshape(image_layer, (-1, number))
            image_gram = tf.matmul(tf.transpose(feats), feats) / size
            style_losses.append(0.5*tf.nn.l2_loss(image_gram - style_gram))
        style_loss = STYLE_WEIGHT * reduce(tf.add, style_losses)

        tv_y_size = utils.get_tensor_size(dummy_image[:, 1:, :, :])
        tv_x_size = utils.get_tensor_size(dummy_image[:, :, 1:, :])
        tv_loss = VARIATION_WEIGHT * (
            (tf.nn.l2_loss(dummy_image[:, 1:, :, :] - dummy_image[:, :content_image.shape[1] - 1, :, :]) /
             tv_y_size) +
           (tf.nn.l2_loss(dummy_image[:, :, 1:, :] - dummy_image[:, :, :content_image.shape[2] - 1, :]) /
             tv_x_size))

        loss = content_loss + style_loss + tv_loss
        train_step = tf.train.MomentumOptimizer(LEARNING_RATE,MOMENTUM).minimize(loss)

        best_loss = float('inf')
        best = None
        sess.run(tf.initialize_all_variables())

        for i in range(1, MAX_ITERATIONS):
            train_step.run()

            if i % 10 == 0 or i == MAX_ITERATIONS - 1:
                this_loss = loss.eval()
                print('Step %d' % (i)),
                print('    total loss: %g' % this_loss)

                if this_loss < best_loss:
                    best_loss = this_loss
                    best = dummy_image.eval()
                    output = utils.unprocess_image(best.reshape(content_image.shape[1:]), mean_pixel)
                    scipy.misc.imsave("output_check.png", output)

            if i % 100 == 0 or i == MAX_ITERATIONS - 1:
                print('  content loss: %g' % content_loss.eval()),
                print('    style loss: %g' % style_loss.eval()),
                print('       tv loss: %g' % tv_loss.eval())

    output = utils.unprocess_image(best.reshape(content_image.shape[1:]), mean_pixel)
    scipy.misc.imsave("output.png", output)
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:81,代码来源:NeuralStyle.py

示例10: main

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import maybe_download_and_extract [as 别名]
def main(argv=None):
    global_step = tf.Variable(0, trainable=False)

    img_A = tf.placeholder(tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE, 3])
    img_B = tf.placeholder(tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE, 3])
    img_C = tf.placeholder(tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE, 3])
    img_D = tf.placeholder(tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE, 3])

    tf.image_summary("A", img_A, max_images=2)
    tf.image_summary("B", img_B, max_images=2)
    tf.image_summary("C", img_C, max_images=2)
    tf.image_summary("Ground_truth", img_D, max_images=2)

    print "Setting up encoder.."
    with tf.variable_scope("encoder") as scope:
        enc_A = encoder_conv(img_A)
        scope.reuse_variables()
        enc_B = encoder_conv(img_B)
        enc_C = encoder_conv(img_C)
        enc_D = encoder_conv(img_D)

    print "Setting up analogy calc.."
    # analogy calculation
    analogy_input = tf.concat(1, [enc_B - enc_A, enc_C])
    W_analogy1 = utils.weight_variable([1024, 512], name="W_analogy1")
    b_analogy1 = utils.bias_variable([512], name="b_analogy1")
    analogy_fc1 = tf.nn.relu(tf.matmul(analogy_input, W_analogy1) + b_analogy1)

    W_analogy2 = utils.weight_variable([512, 512], name="W_analogy2")
    b_analogy2 = utils.bias_variable([512], name="b_analogy2")
    analogy_fc2 = tf.nn.relu(tf.matmul(analogy_fc1, W_analogy2) + b_analogy2)

    pred = decoder_conv(enc_C + analogy_fc2)
    tf.image_summary("Pred_image", pred, max_images=2)

    print "Setting up regularization/ summary variables..."
    for var in tf.trainable_variables():
        add_to_regularization_and_summary(var)

    print "Loss and train setup..."
    loss1 = tf.sqrt(2*tf.nn.l2_loss(pred - img_D)) / FLAGS.batch_size
    tf.scalar_summary("image_loss", loss1)
    loss2 = tf.sqrt(2* tf.nn.l2_loss(enc_D - enc_C - analogy_fc2)) / FLAGS.batch_size
    tf.scalar_summary("analogy_loss", loss2)
    loss3 = tf.add_n(tf.get_collection("reg_loss"))
    tf.scalar_summary("regularization", loss3)

    total_loss = loss1 + ANALOGY_COEFF * loss2 + REGULARIZER * loss3
    tf.scalar_summary("Total_loss", total_loss)
    train_op = train(total_loss, global_step)

    summary_op = tf.merge_all_summaries()

    utils.maybe_download_and_extract(FLAGS.data_dir, DATA_URL, is_tarfile=True)
    print "Initializing Loader class..."
    loader = AnalogyDataLoader.Loader(FLAGS.data_dir, FLAGS.batch_size)

    eval_A, eval_B, eval_C, eval_D = read_eval_inputs(loader)
    eval_feed = {img_A: eval_A, img_B: eval_B, img_C: eval_C, img_D: eval_D}
    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())
        print "Setting up summary and saver..."
        summary_writer = tf.train.SummaryWriter(FLAGS.logs_dir, sess.graph)
        saver = tf.train.Saver()

        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            print "Model restored!"

        for step in xrange(MAX_ITERATIONS):
            A, B, C, D = read_train_inputs(loader)
            feed_dict = {img_A: A, img_B: B, img_C: C, img_D: D}
            if step % 1000 == 0:
                eval_loss = sess.run([loss1, loss2, loss3, total_loss], feed_dict=eval_feed)
                print "Evaluation: (Image loss %f, Variation loss %f, Reg loss %f) total loss %f" % tuple(eval_loss)

            sess.run(train_op, feed_dict=feed_dict)

            if step % 100 == 0:
                [loss_val, summary_str] = sess.run([total_loss, summary_op], feed_dict=feed_dict)
                print "%s Step %d: Training loss %f" % (datetime.now(), step, loss_val)
                summary_writer.add_summary(summary_str, global_step=step)
                saver.save(sess, FLAGS.logs_dir + "model.ckpt", global_step=step)
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:86,代码来源:ImageAnalogy.py


注:本文中的TensorflowUtils.maybe_download_and_extract方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。