本文整理汇总了Python中TensorflowUtils.add_activation_summary方法的典型用法代码示例。如果您正苦于以下问题:Python TensorflowUtils.add_activation_summary方法的具体用法?Python TensorflowUtils.add_activation_summary怎么用?Python TensorflowUtils.add_activation_summary使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TensorflowUtils
的用法示例。
在下文中一共展示了TensorflowUtils.add_activation_summary方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: activation_function
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import add_activation_summary [as 别名]
def activation_function(x, name=""):
activation_dict = {'relu': tf.nn.relu(x, name), 'elu': tf.nn.elu(x, name), 'lrelu': utils.leaky_relu(x, 0.2, name),
'tanh': tf.nn.tanh(x, name),
'sigmoid': tf.nn.sigmoid(x, name)}
act = activation_dict[FLAGS.activation]
utils.add_activation_summary(act)
return act
示例2: generator
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import add_activation_summary [as 别名]
def generator(z, train_mode):
with tf.variable_scope("generator") as scope:
W_0 = utils.weight_variable([FLAGS.z_dim, 64 * GEN_DIMENSION / 2 * IMAGE_SIZE / 16 * IMAGE_SIZE / 16],
name="W_0")
b_0 = utils.bias_variable([64 * GEN_DIMENSION / 2 * IMAGE_SIZE / 16 * IMAGE_SIZE / 16], name="b_0")
z_0 = tf.matmul(z, W_0) + b_0
h_0 = tf.reshape(z_0, [-1, IMAGE_SIZE / 16, IMAGE_SIZE / 16, 64 * GEN_DIMENSION / 2])
h_bn0 = utils.batch_norm(h_0, 64 * GEN_DIMENSION / 2, train_mode, scope="gen_bn0")
h_relu0 = tf.nn.relu(h_bn0, name='relu0')
utils.add_activation_summary(h_relu0)
# W_1 = utils.weight_variable([5, 5, 64 * GEN_DIMENSION/2, 64 * GEN_DIMENSION], name="W_1")
# b_1 = utils.bias_variable([64 * GEN_DIMENSION/2], name="b_1")
# deconv_shape = tf.pack([tf.shape(h_relu0)[0], IMAGE_SIZE / 16, IMAGE_SIZE / 16, 64 * GEN_DIMENSION/2])
# h_conv_t1 = utils.conv2d_transpose_strided(h_relu0, W_1, b_1, output_shape=deconv_shape)
# h_bn1 = utils.batch_norm(h_conv_t1, 64 * GEN_DIMENSION/2, train_mode, scope="gen_bn1")
# h_relu1 = tf.nn.relu(h_bn1, name='relu1')
# utils.add_activation_summary(h_relu1)
W_2 = utils.weight_variable([5, 5, 64 * GEN_DIMENSION / 4, 64 * GEN_DIMENSION / 2],
name="W_2")
b_2 = utils.bias_variable([64 * GEN_DIMENSION / 4], name="b_2")
deconv_shape = tf.pack([tf.shape(h_relu0)[0], IMAGE_SIZE / 8, IMAGE_SIZE / 8, 64 * GEN_DIMENSION / 4])
h_conv_t2 = utils.conv2d_transpose_strided(h_relu0, W_2, b_2, output_shape=deconv_shape)
h_bn2 = utils.batch_norm(h_conv_t2, 64 * GEN_DIMENSION / 4, train_mode, scope="gen_bn2")
h_relu2 = tf.nn.relu(h_bn2, name='relu2')
utils.add_activation_summary(h_relu2)
W_3 = utils.weight_variable([5, 5, 64 * GEN_DIMENSION / 8, 64 * GEN_DIMENSION / 4],
name="W_3")
b_3 = utils.bias_variable([64 * GEN_DIMENSION / 8], name="b_3")
deconv_shape = tf.pack([tf.shape(h_relu2)[0], IMAGE_SIZE / 4, IMAGE_SIZE / 4, 64 * GEN_DIMENSION / 8])
h_conv_t3 = utils.conv2d_transpose_strided(h_relu2, W_3, b_3, output_shape=deconv_shape)
h_bn3 = utils.batch_norm(h_conv_t3, 64 * GEN_DIMENSION / 8, train_mode, scope="gen_bn3")
h_relu3 = tf.nn.relu(h_bn3, name='relu3')
utils.add_activation_summary(h_relu3)
W_4 = utils.weight_variable([5, 5, 64 * GEN_DIMENSION / 16, 64 * GEN_DIMENSION / 8],
name="W_4")
b_4 = utils.bias_variable([64 * GEN_DIMENSION / 16], name="b_4")
deconv_shape = tf.pack([tf.shape(h_relu3)[0], IMAGE_SIZE / 2, IMAGE_SIZE / 2, 64 * GEN_DIMENSION / 16])
h_conv_t4 = utils.conv2d_transpose_strided(h_relu3, W_4, b_4, output_shape=deconv_shape)
h_bn4 = utils.batch_norm(h_conv_t4, 64 * GEN_DIMENSION / 16, train_mode, scope="gen_bn4")
h_relu4 = tf.nn.relu(h_bn4, name='relu4')
utils.add_activation_summary(h_relu4)
W_5 = utils.weight_variable([5, 5, NUM_OF_CHANNELS, 64 * GEN_DIMENSION / 16], name="W_5")
b_5 = utils.bias_variable([NUM_OF_CHANNELS], name="b_5")
deconv_shape = tf.pack([tf.shape(h_relu4)[0], IMAGE_SIZE, IMAGE_SIZE, NUM_OF_CHANNELS])
h_conv_t5 = utils.conv2d_transpose_strided(h_relu4, W_5, b_5, output_shape=deconv_shape)
pred_image = tf.nn.tanh(h_conv_t5, name='pred_image')
utils.add_activation_summary(pred_image)
return pred_image
示例3: inference
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import add_activation_summary [as 别名]
def inference(data):
with tf.variable_scope("inference") as scope:
W_1 = utils.weight_variable([IMAGE_SIZE * IMAGE_SIZE * 50], name="W_1")
b_1 = utils.bias_variable([50], name="b_1")
h_1 = tf.nn.relu(tf.matmul(data, tf.reshape(W_1, [IMAGE_SIZE * IMAGE_SIZE, 50])) + b_1, name='h_1')
utils.add_activation_summary(h_1)
W_2 = utils.weight_variable([50 * 50], name="W_2")
b_2 = utils.bias_variable([50], name="b_2")
h_2 = tf.nn.relu(tf.matmul(h_1, tf.reshape(W_2, [50, 50])) + b_2, name='h_2')
utils.add_activation_summary(h_2)
W_3 = utils.weight_variable([50 * 50], name="W_3")
b_3 = utils.bias_variable([50], name="b_3")
h_3 = tf.nn.relu(tf.matmul(h_2, tf.reshape(W_3, [50, 50])) + b_3, name='h_3')
utils.add_activation_summary(h_3)
W_4 = utils.weight_variable([50 * 50], name="W_4")
b_4 = utils.bias_variable([50], name="b_4")
h_4 = tf.nn.relu(tf.matmul(h_3, tf.reshape(W_4, [50, 50])) + b_4, name='h_4')
utils.add_activation_summary(h_4)
W_final = utils.weight_variable([50 * 10], name="W_final")
b_final = utils.bias_variable([10], name="b_final")
pred = tf.nn.softmax(tf.matmul(h_4, tf.reshape(W_final, [50, 10])) + b_final, name='h_final')
# utils.add_activation_summary(pred)
return pred
示例4: inference
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import add_activation_summary [as 别名]
def inference(data, keep_prob):
with tf.variable_scope("inference") as scope:
weight_variable_size = IMAGE_SIZE * IMAGE_SIZE * 50 + 50 * 50 * 3 + 50 * 10
bias_variable_size = 4 * 50 + 10
print (weight_variable_size + bias_variable_size)
variable = utils.weight_variable([weight_variable_size + bias_variable_size], name="variables")
weight_variable = tf.slice(variable, [0], [weight_variable_size], name="weights")
bias_variable = tf.slice(variable, [weight_variable_size], [bias_variable_size], name="biases")
weight_offset = 0
bias_offset = 0
W_1 = tf.slice(weight_variable, [weight_offset], [IMAGE_SIZE * IMAGE_SIZE * 50], name="W_1")
b_1 = tf.slice(bias_variable, [bias_offset], [50], name="b_1")
h_1_relu = tf.nn.relu(tf.matmul(data, tf.reshape(W_1, [IMAGE_SIZE * IMAGE_SIZE, 50])) + b_1, name='h_1')
h_1 = tf.nn.dropout(h_1_relu, keep_prob)
utils.add_activation_summary(h_1)
weight_offset += IMAGE_SIZE * IMAGE_SIZE * 50
bias_offset += 50
W_2 = tf.slice(weight_variable, [weight_offset], [50 * 50], name="W_2")
b_2 = tf.slice(bias_variable, [bias_offset], [50], name="b_2")
h_2_relu = tf.nn.relu(tf.matmul(h_1, tf.reshape(W_2, [50, 50])) + b_2, name='h_2')
h_2 = tf.nn.dropout(h_2_relu, keep_prob)
utils.add_activation_summary(h_2)
weight_offset += 50 * 50
bias_offset += 50
W_3 = tf.slice(weight_variable, [weight_offset], [50 * 50], name="W_3")
b_3 = tf.slice(bias_variable, [bias_offset], [50], name="b_3")
h_3_relu = tf.nn.relu(tf.matmul(h_2, tf.reshape(W_3, [50, 50])) + b_3, name='h_3')
h_3 = tf.nn.dropout(h_3_relu, keep_prob)
utils.add_activation_summary(h_3)
weight_offset += 50 * 50
bias_offset += 50
W_4 = tf.slice(weight_variable, [weight_offset], [50 * 50], name="W_4")
b_4 = tf.slice(bias_variable, [bias_offset], [50], name="b_4")
h_4_relu = tf.nn.relu(tf.matmul(h_3, tf.reshape(W_4, [50, 50])) + b_4, name='h_4')
h_4 = tf.nn.dropout(h_4_relu, keep_prob)
utils.add_activation_summary(h_4)
weight_offset += 50 * 50
bias_offset += 50
W_final = tf.slice(weight_variable, [weight_offset], [50 * 10], name="W_final")
b_final = tf.slice(bias_variable, [bias_offset], [10], name="b_final")
pred = tf.nn.softmax(tf.matmul(h_4, tf.reshape(W_final, [50, 10])) + b_final, name='h_final')
# utils.add_activation_summary(pred)
return pred
示例5: encoder_fc
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import add_activation_summary [as 别名]
def encoder_fc(images):
with tf.variable_scope("encoder") as scope:
W_fc1 = utils.weight_variable([IMAGE_SIZE * IMAGE_SIZE, 50], name="W_fc1")
b_fc1 = utils.bias_variable([50], name="b_fc1")
h_relu1 = activation_function(tf.matmul(images, W_fc1) + b_fc1, name="hfc_1")
W_fc2 = utils.weight_variable([50, 50], name="W_fc2")
b_fc2 = utils.bias_variable([50], name="b_fc2")
h_relu2 = activation_function(tf.matmul(h_relu1, W_fc2) + b_fc2, name="hfc_2")
W_fc3 = utils.weight_variable([50, FLAGS.z_dim], name="W_fc3")
b_fc3 = utils.bias_variable([FLAGS.z_dim], name="b_fc3")
mu = tf.add(tf.matmul(h_relu2, W_fc3), b_fc3, name="mu")
utils.add_activation_summary(mu)
W_fc4 = utils.weight_variable([50, FLAGS.z_dim], name="W_fc4")
b_fc4 = utils.bias_variable([FLAGS.z_dim], name="b_fc4")
log_var = tf.add(tf.matmul(h_relu2, W_fc4), b_fc4, name="log_var")
utils.add_activation_summary(log_var)
return mu, log_var
示例6: vgg_net
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import add_activation_summary [as 别名]
def vgg_net(weights, image):
layers = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
'relu5_3', 'conv5_4', 'relu5_4'
)
net = {}
current = image
for i, name in enumerate(layers):
if name in ['conv3_4', 'relu3_4', 'conv4_4', 'relu4_4', 'conv5_4', 'relu5_4']:
continue
kind = name[:4]
if kind == 'conv':
kernels, bias = weights[i][0][0][0][0]
# matconvnet: weights are [width, height, in_channels, out_channels]
# tensorflow: weights are [height, width, in_channels, out_channels]
kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + "_w")
bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
current = utils.conv2d_basic(current, kernels, bias)
elif kind == 'relu':
current = tf.nn.relu(current, name=name)
if FLAGS.debug:
utils.add_activation_summary(current)
elif kind == 'pool':
current = utils.avg_pool_2x2(current)
net[name] = current
return net
示例7: discriminator
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import add_activation_summary [as 别名]
def discriminator(input_images, train_mode):
# dropout_prob = 1.0
# if train_mode:
# dropout_prob = 0.5
W_conv0 = utils.weight_variable([5, 5, NUM_OF_CHANNELS, 64 * 1], name="W_conv0")
b_conv0 = utils.bias_variable([64 * 1], name="b_conv0")
h_conv0 = utils.conv2d_strided(input_images, W_conv0, b_conv0)
h_bn0 = h_conv0 # utils.batch_norm(h_conv0, 64 * 1, train_mode, scope="disc_bn0")
h_relu0 = utils.leaky_relu(h_bn0, 0.2, name="h_relu0")
utils.add_activation_summary(h_relu0)
W_conv1 = utils.weight_variable([5, 5, 64 * 1, 64 * 2], name="W_conv1")
b_conv1 = utils.bias_variable([64 * 2], name="b_conv1")
h_conv1 = utils.conv2d_strided(h_relu0, W_conv1, b_conv1)
h_bn1 = utils.batch_norm(h_conv1, 64 * 2, train_mode, scope="disc_bn1")
h_relu1 = utils.leaky_relu(h_bn1, 0.2, name="h_relu1")
utils.add_activation_summary(h_relu1)
W_conv2 = utils.weight_variable([5, 5, 64 * 2, 64 * 4], name="W_conv2")
b_conv2 = utils.bias_variable([64 * 4], name="b_conv2")
h_conv2 = utils.conv2d_strided(h_relu1, W_conv2, b_conv2)
h_bn2 = utils.batch_norm(h_conv2, 64 * 4, train_mode, scope="disc_bn2")
h_relu2 = utils.leaky_relu(h_bn2, 0.2, name="h_relu2")
utils.add_activation_summary(h_relu2)
W_conv3 = utils.weight_variable([5, 5, 64 * 4, 64 * 8], name="W_conv3")
b_conv3 = utils.bias_variable([64 * 8], name="b_conv3")
h_conv3 = utils.conv2d_strided(h_relu2, W_conv3, b_conv3)
h_bn3 = utils.batch_norm(h_conv3, 64 * 8, train_mode, scope="disc_bn3")
h_relu3 = utils.leaky_relu(h_bn3, 0.2, name="h_relu3")
utils.add_activation_summary(h_relu3)
shape = h_relu3.get_shape().as_list()
h_3 = tf.reshape(h_relu3, [FLAGS.batch_size, (IMAGE_SIZE // 16) * (IMAGE_SIZE // 16) * shape[3]])
W_4 = utils.weight_variable([h_3.get_shape().as_list()[1], 1], name="W_4")
b_4 = utils.bias_variable([1], name="b_4")
h_4 = tf.matmul(h_3, W_4) + b_4
return tf.nn.sigmoid(h_4), h_4, h_relu3
示例8: inference
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import add_activation_summary [as 别名]
def inference(image, keep_prob):
"""
Semantic segmentation network definition
:param image: input image. Should have values in range 0-255
:param keep_prob:
:return:
"""
print("setting up vgg initialized conv layers ...")
model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)
mean = model_data['normalization'][0][0][0]
mean_pixel = np.mean(mean, axis=(0, 1))
weights = np.squeeze(model_data['layers'])
#processed_image = utils.process_image(image, mean_pixel)
with tf.variable_scope("inference"):
image_net = vgg_net(weights, image)
conv_final_layer = image_net["conv5_3"]
pool5 = utils.max_pool_2x2(conv_final_layer)
W6 = utils.weight_variable([7, 7, 512, 4096], name="W6")
b6 = utils.bias_variable([4096], name="b6")
conv6 = utils.conv2d_basic(pool5, W6, b6)
relu6 = tf.nn.relu(conv6, name="relu6")
if FLAGS.debug:
utils.add_activation_summary(relu6)
relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)
W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7")
b7 = utils.bias_variable([4096], name="b7")
conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)
relu7 = tf.nn.relu(conv7, name="relu7")
if FLAGS.debug:
utils.add_activation_summary(relu7)
relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)
W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSESS], name="W8")
b8 = utils.bias_variable([NUM_OF_CLASSESS], name="b8")
conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)
# annotation_pred1 = tf.argmax(conv8, dimension=3, name="prediction1")
# now to upscale to actual image size
deconv_shape1 = image_net["pool4"].get_shape()
W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name="W_t1")
b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape(image_net["pool4"]))
fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1")
deconv_shape2 = image_net["pool3"].get_shape()
W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2")
b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net["pool3"]))
fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2")
shape = tf.shape(image)
deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSESS])
W_t3 = utils.weight_variable([16, 16, NUM_OF_CLASSESS, deconv_shape2[3].value], name="W_t3")
b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name="b_t3")
conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8)
annotation_pred = tf.argmax(conv_t3, dimension=3, name="prediction")
return tf.expand_dims(annotation_pred, dim=3), conv_t3
示例9: main
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import add_activation_summary [as 别名]
def main(argv=None):
print("Setting up image reader...")
train_images, valid_images, test_images = flowers.read_dataset(FLAGS.data_dir)
# image_options = {"crop": True, "crop_size": MODEL_IMAGE_SIZE, "resize": True, "resize_size": IMAGE_SIZE}
# dataset_reader = dataset.BatchDatset(train_images, image_options)
# images = tf.placeholder(tf.float32, [FLAGS.batch_size, IMAGE_SIZE, IMAGE_SIZE, NUM_OF_CHANNELS])
filename_queue = tf.train.string_input_producer(train_images)
images = read_input_queue(filename_queue)
train_phase = tf.placeholder(tf.bool)
z_vec = tf.placeholder(tf.float32, [None, FLAGS.z_dim], name="z")
print("Setting up network model...")
tf.histogram_summary("z", z_vec)
tf.image_summary("image_real", images, max_images=1)
gen_images = generator(z_vec, train_phase)
tf.image_summary("image_generated", gen_images, max_images=3)
with tf.variable_scope("discriminator") as scope:
discriminator_real_prob, logits_real, feature_real = discriminator(images, train_phase)
utils.add_activation_summary(tf.identity(discriminator_real_prob, name='disc_real_prob'))
scope.reuse_variables()
discriminator_fake_prob, logits_fake, feature_fake = discriminator(gen_images, train_phase)
utils.add_activation_summary(tf.identity(discriminator_fake_prob, name='disc_fake_prob'))
discriminator_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits_real, tf.ones_like(logits_real)))
discrimintator_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits_fake, tf.zeros_like(logits_fake)))
discriminator_loss = discrimintator_loss_fake + discriminator_loss_real
gen_loss_1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits_fake, tf.ones_like(logits_fake)))
gen_loss_2 = tf.reduce_mean(tf.nn.l2_loss(feature_real - feature_fake)) / (IMAGE_SIZE * IMAGE_SIZE)
gen_loss = gen_loss_1 + 0.1 * gen_loss_2
tf.scalar_summary("Discriminator_loss_real", discriminator_loss_real)
tf.scalar_summary("Discrimintator_loss_fake", discrimintator_loss_fake)
tf.scalar_summary("Discriminator_loss", discriminator_loss)
tf.scalar_summary("Generator_loss", gen_loss)
train_variables = tf.trainable_variables()
generator_variables = [v for v in train_variables if v.name.startswith("generator")]
# print(map(lambda x: x.op.name, generator_variables))
discriminator_variables = [v for v in train_variables if v.name.startswith("discriminator")]
# print(map(lambda x: x.op.name, discriminator_variables))
generator_train_op = train(gen_loss, generator_variables)
discriminator_train_op = train(discriminator_loss, discriminator_variables)
for v in train_variables:
utils.add_to_regularization_and_summary(var=v)
def visualize():
count = 20
z_feed = np.random.uniform(-1.0, 1.0, size=(count, FLAGS.z_dim)).astype(np.float32)
# z_feed = np.tile(np.random.uniform(-1.0, 1.0, size=(1, FLAGS.z_dim)).astype(np.float32), (count, 1))
# z_feed[:, 25] = sorted(10.0 * np.random.randn(count))
image = sess.run(gen_images, feed_dict={z_vec: z_feed, train_phase: False})
for iii in xrange(count):
print(image.shape)
utils.save_image(image[iii, :, :, :], IMAGE_SIZE, FLAGS.logs_dir, name=str(iii))
print("Saving image" + str(iii))
sess = tf.Session()
summary_op = tf.merge_all_summaries()
saver = tf.train.Saver()
summary_writer = tf.train.SummaryWriter(FLAGS.logs_dir, sess.graph)
sess.run(tf.initialize_all_variables())
ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Model restored...")
visualize()
return
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
try:
for itr in xrange(MAX_ITERATIONS):
batch_z = np.random.uniform(-1.0, 1.0, size=[FLAGS.batch_size, FLAGS.z_dim]).astype(np.float32)
# feed_dict = {images: dataset_reader.next_batch(FLAGS.batch_size), z_vec: batch_z, train_phase: True}
feed_dict = {z_vec: batch_z, train_phase: True}
sess.run(discriminator_train_op, feed_dict=feed_dict)
sess.run(generator_train_op, feed_dict=feed_dict)
sess.run(generator_train_op, feed_dict=feed_dict)
if itr % 10 == 0:
g_loss_val, d_loss_val, summary_str = sess.run([gen_loss, discriminator_loss, summary_op],
feed_dict=feed_dict)
print("Step: %d, generator loss: %g, discriminator_loss: %g" % (itr, g_loss_val, d_loss_val))
summary_writer.add_summary(summary_str, itr)
if itr % 500 == 0:
saver.save(sess, FLAGS.logs_dir + "model.ckpt", global_step=itr)
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
except KeyboardInterrupt:
print("Ending Training...")
#.........这里部分代码省略.........