当前位置: 首页>>代码示例>>Python>>正文


Python TensorflowUtils.add_to_regularization_and_summary方法代码示例

本文整理汇总了Python中TensorflowUtils.add_to_regularization_and_summary方法的典型用法代码示例。如果您正苦于以下问题:Python TensorflowUtils.add_to_regularization_and_summary方法的具体用法?Python TensorflowUtils.add_to_regularization_and_summary怎么用?Python TensorflowUtils.add_to_regularization_and_summary使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在TensorflowUtils的用法示例。


在下文中一共展示了TensorflowUtils.add_to_regularization_and_summary方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import add_to_regularization_and_summary [as 别名]
def main(argv=None):
    print "Reading notMNIST data..."
    train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels = \
        read_notMNIST.get_notMNISTData(FLAGS.data_dir)

    print "Setting up tf model..."
    dataset = tf.placeholder(tf.float32, shape=(None, IMAGE_SIZE * IMAGE_SIZE))

    labels = tf.placeholder(tf.float32, shape=(None, NUMBER_OF_CLASSES))

    global_step = tf.Variable(0, trainable=False)

    logits = inference_fully_convolutional(dataset)

    for var in tf.trainable_variables():
        utils.add_to_regularization_and_summary(var)

    loss_val = loss(logits, labels)
    train_op = train(loss_val, global_step)
    summary_op = tf.merge_all_summaries()
    with tf.Session() as sess:
        print "Setting up summary and saver..."
        sess.run(tf.initialize_all_variables())
        summary_writer = tf.train.SummaryWriter(FLAGS.logs_dir, sess.graph)
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            print "Model restored!"

        if FLAGS.mode == "train":
            for step in xrange(MAX_ITERATIONS):
                offset = (step * BATCH_SIZE) % (train_labels.shape[0] - BATCH_SIZE)

                batch_data = train_dataset[offset:(offset + BATCH_SIZE), :]
                batch_labels = train_labels[offset:(offset + BATCH_SIZE), :]

                feed_dict = {dataset: batch_data, labels: batch_labels}
                if step % 100 == 0:
                    l, summary_str = sess.run([loss_val, summary_op], feed_dict=feed_dict)
                    print "Step: %d Mini batch loss: %g"%(step, l)
                    summary_writer.add_summary(summary_str, step)

                if step % 1000 == 0:
                    valid_loss = sess.run(loss_val, feed_dict={dataset:valid_dataset, labels:valid_labels})
                    print "-- Validation loss %g" % valid_loss
                    saver.save(sess, FLAGS.logs_dir +"model.ckpt", global_step=step)

                sess.run(train_op, feed_dict=feed_dict)

        test_loss = sess.run(loss_val, feed_dict={dataset:test_dataset, labels:test_labels})
        print "Test loss: %g" % test_loss
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:54,代码来源:notMNISTFullyConvultional.py

示例2: main

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import add_to_regularization_and_summary [as 别名]
def main(argv=None):
    keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
    image = tf.placeholder(tf.float32, shape=[None, IMAGE_HEIGHT, IMAGE_WIDTH, 3], name="input_image")
    annotation = tf.placeholder(tf.int32, shape=[None, IMAGE_HEIGHT, IMAGE_WIDTH, 1], name="annotation")

    pred_annotation, logits = inference(image, keep_probability)
    tf.summary.image("input_image", image, max_outputs=2)
    tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2)
    tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2)
    loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
                                                                          labels=tf.squeeze(annotation, squeeze_dims=[3]),
                                                                          name="entropy")))
    tf.summary.scalar("entropy", loss)

    trainable_var = tf.trainable_variables()
    if FLAGS.debug:
        for var in trainable_var:
            utils.add_to_regularization_and_summary(var)
    train_op = train(loss, trainable_var)

    print("Setting up summary op...")
    summary_op = tf.summary.merge_all()

    '''
    print("Setting up image reader...")
    train_records, valid_records = scene_parsing.read_dataset(FLAGS.data_dir)
    print(len(train_records))
    print(len(valid_records))

    print("Setting up dataset reader")
    image_options = {'resize': True, 'resize_size': IMAGE_SIZE}
    if FLAGS.mode == 'train':
        train_dataset_reader = dataset.BatchDatset(train_records, image_options)
    validation_dataset_reader = dataset.BatchDatset(valid_records, image_options)
    '''
    train_dataset_reader = BatchDatset('data/trainlist.mat')

    sess = tf.Session()

    print("Setting up Saver...")
    saver = tf.train.Saver()
    summary_writer = tf.summary.FileWriter(FLAGS.logs_dir, sess.graph)

    sess.run(tf.global_variables_initializer())
    ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")

    #if FLAGS.mode == "train":
    itr = 0
    train_images, train_annotations = train_dataset_reader.next_batch()
    while len(train_annotations) > 0:
        #train_images, train_annotations = train_dataset_reader.next_batch(FLAGS.batch_size)
        #print('==> batch data: ', train_images[0][100][100], '===', train_annotations[0][100][100])
        feed_dict = {image: train_images, annotation: train_annotations, keep_probability: 0.5}

        sess.run(train_op, feed_dict=feed_dict)

        if itr % 100 == 0:
            train_loss, summary_str, rpred = sess.run([loss, summary_op, pred_annotation], feed_dict=feed_dict)
            print("Step: %d, Train_loss:%g" % (itr, train_loss))
            summary_writer.add_summary(summary_str, itr)
            print(np.sum(rpred))
            print('=============')
            print(np.sum(train_annotations))
            print('------------>>>')

        #if itr % 10000 == 0 and itr > 0:
        '''
        valid_images, valid_annotations = validation_dataset_reader.next_batch(FLAGS.batch_size)
        valid_loss = sess.run(loss, feed_dict={image: valid_images, annotation: valid_annotations,
                                                       keep_probability: 1.0})
        print("%s ---> Validation_loss: %g" % (datetime.datetime.now(), valid_loss))'''

        itr += 1
        train_images, train_annotations = train_dataset_reader.next_batch()

    saver.save(sess, FLAGS.logs_dir + "model.ckpt", itr)

    '''elif FLAGS.mode == "visualize":
开发者ID:Selimam,项目名称:AutoPortraitMatting,代码行数:83,代码来源:FCN.py

示例3: main

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import add_to_regularization_and_summary [as 别名]
def main(argv=None):
    keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
    image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="input_image")
    annotation = tf.placeholder(tf.int32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 1], name="annotation")

    pred_annotation, logits = inference(image, keep_probability)
    tf.summary.image("input_image", image, max_outputs=2)
    tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2)
    tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2)
    loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
                                                                          labels=tf.squeeze(annotation, squeeze_dims=[3]),
                                                                          name="entropy")))
    loss_summary = tf.summary.scalar("entropy", loss)

    trainable_var = tf.trainable_variables()
    if FLAGS.debug:
        for var in trainable_var:
            utils.add_to_regularization_and_summary(var)
    train_op = train(loss, trainable_var)

    print("Setting up summary op...")
    summary_op = tf.summary.merge_all()

    print("Setting up image reader...")
    train_records, valid_records = scene_parsing.read_dataset(FLAGS.data_dir)
    print(len(train_records))
    print(len(valid_records))

    print("Setting up dataset reader")
    image_options = {'resize': True, 'resize_size': IMAGE_SIZE}
    if FLAGS.mode == 'train':
        train_dataset_reader = dataset.BatchDatset(train_records, image_options)
    validation_dataset_reader = dataset.BatchDatset(valid_records, image_options)

    sess = tf.Session()

    print("Setting up Saver...")
    saver = tf.train.Saver()

    # create two summary writers to show training loss and validation loss in the same graph
    # need to create two folders 'train' and 'validation' inside FLAGS.logs_dir
    train_writer = tf.summary.FileWriter(FLAGS.logs_dir + '/train', sess.graph)
    validation_writer = tf.summary.FileWriter(FLAGS.logs_dir + '/validation')

    sess.run(tf.global_variables_initializer())
    ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")

    if FLAGS.mode == "train":
        for itr in xrange(MAX_ITERATION):
            train_images, train_annotations = train_dataset_reader.next_batch(FLAGS.batch_size)
            feed_dict = {image: train_images, annotation: train_annotations, keep_probability: 0.85}

            sess.run(train_op, feed_dict=feed_dict)

            if itr % 10 == 0:
                train_loss, summary_str = sess.run([loss, loss_summary], feed_dict=feed_dict)
                print("Step: %d, Train_loss:%g" % (itr, train_loss))
                train_writer.add_summary(summary_str, itr)

            if itr % 500 == 0:
                valid_images, valid_annotations = validation_dataset_reader.next_batch(FLAGS.batch_size)
                valid_loss, summary_sva = sess.run([loss, loss_summary], feed_dict={image: valid_images, annotation: valid_annotations,
                                                       keep_probability: 1.0})
                print("%s ---> Validation_loss: %g" % (datetime.datetime.now(), valid_loss))

                # add validation loss to TensorBoard
                validation_writer.add_summary(summary_sva, itr)
                saver.save(sess, FLAGS.logs_dir + "model.ckpt", itr)

    elif FLAGS.mode == "visualize":
        valid_images, valid_annotations = validation_dataset_reader.get_random_batch(FLAGS.batch_size)
        pred = sess.run(pred_annotation, feed_dict={image: valid_images, annotation: valid_annotations,
                                                    keep_probability: 1.0})
        valid_annotations = np.squeeze(valid_annotations, axis=3)
        pred = np.squeeze(pred, axis=3)

        for itr in range(FLAGS.batch_size):
            utils.save_image(valid_images[itr].astype(np.uint8), FLAGS.logs_dir, name="inp_" + str(5+itr))
            utils.save_image(valid_annotations[itr].astype(np.uint8), FLAGS.logs_dir, name="gt_" + str(5+itr))
            utils.save_image(pred[itr].astype(np.uint8), FLAGS.logs_dir, name="pred_" + str(5+itr))
            print("Saved image: %d" % itr)
开发者ID:sdjsngs,项目名称:FCN.tensorflow,代码行数:86,代码来源:FCN.py

示例4: main

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import add_to_regularization_and_summary [as 别名]
def main(argv=None):
    data = mnist.input_data.read_data_sets("MNIST_data", one_hot=False)
    images = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE * IMAGE_SIZE], name="input_image")
    tf.image_summary("Input", tf.reshape(images, [-1, IMAGE_SIZE, IMAGE_SIZE, 1]), max_images=2)

    mu, log_var = encoder_fc(images)
    epsilon = tf.random_normal(tf.shape(mu), name="epsilon")
    z = mu + tf.mul(tf.exp(log_var * 0.5), epsilon)

    pred_image = decoder_fc(z)
    entropy_loss = tf.reduce_sum(
        tf.nn.sigmoid_cross_entropy_with_logits(pred_image, images, name="entropy_loss"), reduction_indices=1)
    tf.histogram_summary("Entropy_loss", entropy_loss)
    pred_image_sigmoid = tf.nn.sigmoid(pred_image)
    tf.image_summary("Output", tf.reshape(pred_image_sigmoid, [-1, IMAGE_SIZE, IMAGE_SIZE, 1]), max_images=2)

    KL_loss = -0.5 * tf.reduce_sum(1 + log_var - tf.pow(mu, 2) - tf.exp(log_var), reduction_indices=1)
    tf.histogram_summary("KL_Divergence", KL_loss)

    train_variables = tf.trainable_variables()
    for v in train_variables:
        utils.add_to_regularization_and_summary(var=v)

    reg_loss = tf.add_n(tf.get_collection("reg_loss"))
    tf.scalar_summary("Reg_loss", reg_loss)
    total_loss = tf.reduce_mean(KL_loss + entropy_loss) + FLAGS.regularization * reg_loss
    tf.scalar_summary("total_loss", total_loss)
    train_op = train(total_loss, train_variables)

    sess = tf.Session()
    summary_op = tf.merge_all_summaries()
    saver = tf.train.Saver()
    summary_writer = tf.train.SummaryWriter(FLAGS.logs_dir, sess.graph)

    sess.run(tf.initialize_all_variables())
    ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        print ("Model restored...")

    for itr in xrange(MAX_ITERATIONS):
        batch_images, batch_labels = data.train.next_batch(FLAGS.batch_size)
        sess.run(train_op, feed_dict={images: batch_images})

        if itr % 500 == 0:
            entr_loss, KL_div, tot_loss, summary_str = sess.run([entropy_loss, KL_loss, total_loss, summary_op],
                                                                feed_dict={images: batch_images})
            print (
                "Step: %d, Entropy loss: %g, KL Divergence: %g, Total loss: %g" % (itr, np.mean(entr_loss), np.mean(KL_div), tot_loss))
            summary_writer.add_summary(summary_str, itr)

        if itr % 1000 == 0:
            saver.save(sess, FLAGS.logs_dir + "model.ckpt", global_step=itr)

    def test():
        z_vec = sess.run(z, feed_dict={images: data.test.images})
        write_array = np.hstack((z_vec, np.reshape(data.test.labels, (-1, 1))))
        df = pd.DataFrame(write_array)
        df.to_csv("z_vae_output.csv", header=False, index=False)

    test()
开发者ID:shekkizh,项目名称:TensorflowProjects,代码行数:63,代码来源:MNIST_VAE.py

示例5: main

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import add_to_regularization_and_summary [as 别名]
def main(argv=None):
    input_data = tf.placeholder(tf.float32, [None, 784])
    truth_labels = tf.placeholder(tf.float32, [None, 10])
    dataset = mnist.input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
    pred_labels = inference(input_data)
    entropy = -tf.reduce_sum(truth_labels * tf.log(pred_labels))
    tf.scalar_summary('Cross_entropy', entropy)
    train_vars = tf.trainable_variables()
    for v in train_vars:
        utils.add_to_regularization_and_summary(v)
    train_op = train(entropy, train_vars)
    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(pred_labels, 1), tf.argmax(truth_labels, 1)), tf.float32))

    # Session start
    sess = tf.InteractiveSession()
    sess.run(tf.initialize_all_variables())
    saver = tf.train.Saver()
    summary_writer = tf.train.SummaryWriter(FLAGS.logs_dir, sess.graph)
    summary_op = tf.merge_all_summaries()

    def test():
        test_accuracy = accuracy.eval(feed_dict={input_data: dataset.test.images, truth_labels: dataset.test.labels})
        return test_accuracy

    ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")
    else:
        for i in xrange(MAX_ITERATIONS):
            batch = dataset.train.next_batch(FLAGS.batch_size)
            feed_dict = {input_data: batch[0], truth_labels: batch[1]}
            train_op.run(feed_dict=feed_dict)

            if i % 10 == 0:
                summary_str = sess.run(summary_op, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, i)

            if i % 100 == 0:
                train_accuracy = accuracy.eval(feed_dict=feed_dict)
                print("step: %d, training accuracy: %g" % (i, train_accuracy))

            if i % 5000 == 0:
                saver.save(sess, FLAGS.logs_dir + 'model.ckpt', i)
    print(len(train_vars))
    train_vars_copy = sess.run([tf.identity(var) for var in train_vars])
    print('Variables Perecent: %d, Test accuracy: %g' % (100, test()))

    k = tf.placeholder(tf.int32)

    def scatter_add(variables):
        shape = utils.get_tensor_size(variables)
        values, indices = tf.nn.top_k(-1 * variables, tf.cast(k * shape / 100, tf.int32))
        return tf.scatter_add(variables, indices, values)

    def scatter_subtract(variables1, variables2):
        shape = utils.get_tensor_size(variables2)
        values, indices = tf.nn.top_k(-1 * variables1, tf.cast(k * shape / 100, tf.int32))
        return tf.scatter_sub(variables2, indices, values)

    scatter_add_op = [scatter_add(var) for var in train_vars]
    scatter_sub_op = [scatter_subtract(var1, var2) for var1, var2 in zip(train_vars_copy, train_vars)]
    for count in range(1, 20):
        sess.run(scatter_add_op, feed_dict={k: count})
        print('Variables Perecent: %d, Test accuracy: %g' % ((100 - count), test()))
        sess.run(scatter_sub_op, feed_dict={k: count})
开发者ID:shekkizh,项目名称:TensorflowProjects,代码行数:68,代码来源:MagnitudeBased_2.py

示例6: main

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import add_to_regularization_and_summary [as 别名]
def main(argv=None):
    print("Setting up image reader...")
    train_images, valid_images, test_images = flowers.read_dataset(FLAGS.data_dir)
    # image_options = {"crop": True, "crop_size": MODEL_IMAGE_SIZE, "resize": True, "resize_size": IMAGE_SIZE}
    # dataset_reader = dataset.BatchDatset(train_images, image_options)
    # images = tf.placeholder(tf.float32, [FLAGS.batch_size, IMAGE_SIZE, IMAGE_SIZE, NUM_OF_CHANNELS])
    filename_queue = tf.train.string_input_producer(train_images)
    images = read_input_queue(filename_queue)

    train_phase = tf.placeholder(tf.bool)
    z_vec = tf.placeholder(tf.float32, [None, FLAGS.z_dim], name="z")

    print("Setting up network model...")
    tf.histogram_summary("z", z_vec)
    tf.image_summary("image_real", images, max_images=1)
    gen_images = generator(z_vec, train_phase)
    tf.image_summary("image_generated", gen_images, max_images=3)

    with tf.variable_scope("discriminator") as scope:
        discriminator_real_prob, logits_real, feature_real = discriminator(images, train_phase)
        utils.add_activation_summary(tf.identity(discriminator_real_prob, name='disc_real_prob'))
        scope.reuse_variables()
        discriminator_fake_prob, logits_fake, feature_fake = discriminator(gen_images, train_phase)
        utils.add_activation_summary(tf.identity(discriminator_fake_prob, name='disc_fake_prob'))

    discriminator_loss_real = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(logits_real, tf.ones_like(logits_real)))
    discrimintator_loss_fake = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(logits_fake, tf.zeros_like(logits_fake)))
    discriminator_loss = discrimintator_loss_fake + discriminator_loss_real
    gen_loss_1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits_fake, tf.ones_like(logits_fake)))
    gen_loss_2 = tf.reduce_mean(tf.nn.l2_loss(feature_real - feature_fake)) / (IMAGE_SIZE * IMAGE_SIZE)
    gen_loss = gen_loss_1 + 0.1 * gen_loss_2

    tf.scalar_summary("Discriminator_loss_real", discriminator_loss_real)
    tf.scalar_summary("Discrimintator_loss_fake", discrimintator_loss_fake)
    tf.scalar_summary("Discriminator_loss", discriminator_loss)
    tf.scalar_summary("Generator_loss", gen_loss)

    train_variables = tf.trainable_variables()
    generator_variables = [v for v in train_variables if v.name.startswith("generator")]
    # print(map(lambda x: x.op.name, generator_variables))
    discriminator_variables = [v for v in train_variables if v.name.startswith("discriminator")]
    # print(map(lambda x: x.op.name, discriminator_variables))
    generator_train_op = train(gen_loss, generator_variables)
    discriminator_train_op = train(discriminator_loss, discriminator_variables)

    for v in train_variables:
        utils.add_to_regularization_and_summary(var=v)

    def visualize():
        count = 20
        z_feed = np.random.uniform(-1.0, 1.0, size=(count, FLAGS.z_dim)).astype(np.float32)
        # z_feed = np.tile(np.random.uniform(-1.0, 1.0, size=(1, FLAGS.z_dim)).astype(np.float32), (count, 1))
        # z_feed[:, 25] = sorted(10.0 * np.random.randn(count))
        image = sess.run(gen_images, feed_dict={z_vec: z_feed, train_phase: False})

        for iii in xrange(count):
            print(image.shape)
            utils.save_image(image[iii, :, :, :], IMAGE_SIZE, FLAGS.logs_dir, name=str(iii))
            print("Saving image" + str(iii))

    sess = tf.Session()
    summary_op = tf.merge_all_summaries()
    saver = tf.train.Saver()
    summary_writer = tf.train.SummaryWriter(FLAGS.logs_dir, sess.graph)

    sess.run(tf.initialize_all_variables())
    ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")
        visualize()
        return

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess, coord)
    try:
        for itr in xrange(MAX_ITERATIONS):
            batch_z = np.random.uniform(-1.0, 1.0, size=[FLAGS.batch_size, FLAGS.z_dim]).astype(np.float32)
            # feed_dict = {images: dataset_reader.next_batch(FLAGS.batch_size), z_vec: batch_z, train_phase: True}
            feed_dict = {z_vec: batch_z, train_phase: True}

            sess.run(discriminator_train_op, feed_dict=feed_dict)
            sess.run(generator_train_op, feed_dict=feed_dict)
            sess.run(generator_train_op, feed_dict=feed_dict)

            if itr % 10 == 0:
                g_loss_val, d_loss_val, summary_str = sess.run([gen_loss, discriminator_loss, summary_op],
                                                               feed_dict=feed_dict)
                print("Step: %d, generator loss: %g, discriminator_loss: %g" % (itr, g_loss_val, d_loss_val))
                summary_writer.add_summary(summary_str, itr)

            if itr % 500 == 0:
                saver.save(sess, FLAGS.logs_dir + "model.ckpt", global_step=itr)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    except KeyboardInterrupt:
        print("Ending Training...")
#.........这里部分代码省略.........
开发者ID:shekkizh,项目名称:TensorflowProjects,代码行数:103,代码来源:Flowers_GAN.py


注:本文中的TensorflowUtils.add_to_regularization_and_summary方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。