本文整理汇总了Python中networks.discriminator方法的典型用法代码示例。如果您正苦于以下问题:Python networks.discriminator方法的具体用法?Python networks.discriminator怎么用?Python networks.discriminator使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类networks
的用法示例。
在下文中一共展示了networks.discriminator方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _define_model
# 需要导入模块: import networks [as 别名]
# 或者: from networks import discriminator [as 别名]
def _define_model(images_x, images_y):
"""Defines a CycleGAN model that maps between images_x and images_y.
Args:
images_x: A 4D float `Tensor` of NHWC format. Images in set X.
images_y: A 4D float `Tensor` of NHWC format. Images in set Y.
Returns:
A `CycleGANModel` namedtuple.
"""
cyclegan_model = tfgan.cyclegan_model(
generator_fn=networks.generator,
discriminator_fn=networks.discriminator,
data_x=images_x,
data_y=images_y)
# Add summaries for generated images.
tfgan.eval.add_image_comparison_summaries(
cyclegan_model, num_comparisons=3, display_diffs=False)
tfgan.eval.add_gan_model_image_summaries(
cyclegan_model, grid_size=int(np.sqrt(FLAGS.batch_size)))
return cyclegan_model
示例2: _get_optimizer
# 需要导入模块: import networks [as 别名]
# 或者: from networks import discriminator [as 别名]
def _get_optimizer(gen_lr, dis_lr):
"""Returns generator optimizer and discriminator optimizer.
Args:
gen_lr: A scalar float `Tensor` or a Python number. The Generator learning
rate.
dis_lr: A scalar float `Tensor` or a Python number. The Discriminator
learning rate.
Returns:
A tuple of generator optimizer and discriminator optimizer.
"""
# beta1 follows
# https://github.com/junyanz/CycleGAN/blob/master/options.lua
gen_opt = tf.train.AdamOptimizer(gen_lr, beta1=0.5, use_locking=True)
dis_opt = tf.train.AdamOptimizer(dis_lr, beta1=0.5, use_locking=True)
return gen_opt, dis_opt
示例3: _define_model
# 需要导入模块: import networks [as 别名]
# 或者: from networks import discriminator [as 别名]
def _define_model(images_x, images_y):
"""Defines a CycleGAN model that maps between images_x and images_y.
Args:
images_x: A 4D float `Tensor` of NHWC format. Images in set X.
images_y: A 4D float `Tensor` of NHWC format. Images in set Y.
Returns:
A `CycleGANModel` namedtuple.
"""
cyclegan_model = tfgan.cyclegan_model(
generator_fn=networks.generator,
discriminator_fn=networks.discriminator,
data_x=images_x,
data_y=images_y)
# Add summaries for generated images.
tfgan.eval.add_cyclegan_image_summaries(cyclegan_model)
return cyclegan_model
示例4: define_train_ops
# 需要导入模块: import networks [as 别名]
# 或者: from networks import discriminator [as 别名]
def define_train_ops(gan_model, gan_loss, **kwargs):
"""Defines progressive GAN train ops.
Args:
gan_model: A `GANModel` namedtuple.
gan_loss: A `GANLoss` namedtuple.
**kwargs: A dictionary of
'adam_beta1': A float of Adam optimizer beta1.
'adam_beta2': A float of Adam optimizer beta2.
'generator_learning_rate': A float of generator learning rate.
'discriminator_learning_rate': A float of discriminator learning rate.
Returns:
A tuple of `GANTrainOps` namedtuple and a list variables tracking the state
of optimizers.
"""
with tf.variable_scope('progressive_gan_train_ops') as var_scope:
beta1, beta2 = kwargs['adam_beta1'], kwargs['adam_beta2']
gen_opt = tf.train.AdamOptimizer(kwargs['generator_learning_rate'], beta1,
beta2)
dis_opt = tf.train.AdamOptimizer(kwargs['discriminator_learning_rate'],
beta1, beta2)
gan_train_ops = tfgan.gan_train_ops(gan_model, gan_loss, gen_opt, dis_opt)
return gan_train_ops, tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope=var_scope.name)
示例5: test_discriminator
# 需要导入模块: import networks [as 别名]
# 或者: from networks import discriminator [as 别名]
def test_discriminator(self):
batch_size = 5
image = tf.random_uniform([batch_size, 32, 32, 3], -1, 1)
dis_output = networks.discriminator(image, None)
with self.test_session(use_gpu=True) as sess:
sess.run(tf.global_variables_initializer())
dis_output_np = dis_output.eval()
self.assertAllEqual([batch_size, 1], dis_output_np.shape)
示例6: test_discriminator_run
# 需要导入模块: import networks [as 别名]
# 或者: from networks import discriminator [as 别名]
def test_discriminator_run(self):
img_batch = tf.zeros([3, 70, 70, 3])
disc_output = networks.discriminator(img_batch)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(disc_output)
示例7: test_discriminator_graph
# 需要导入模块: import networks [as 别名]
# 或者: from networks import discriminator [as 别名]
def test_discriminator_graph(self):
# Check graph construction for a number of image size/depths and batch
# sizes.
for batch_size, patch_size in zip([3, 6], [70, 128]):
tf.reset_default_graph()
img = tf.ones([batch_size, patch_size, patch_size, 3])
disc_output = networks.discriminator(img)
self.assertEqual(2, disc_output.shape.ndims)
self.assertEqual(batch_size, disc_output.shape[0])
示例8: test_discriminator_invalid_input
# 需要导入模块: import networks [as 别名]
# 或者: from networks import discriminator [as 别名]
def test_discriminator_invalid_input(self):
wrong_dim_input = tf.zeros([5, 32, 32])
with self.assertRaisesRegexp(ValueError, 'Shape must be rank 4'):
networks.discriminator(wrong_dim_input)
not_fully_defined = tf.placeholder(tf.float32, [3, None, 32, 3])
with self.assertRaisesRegexp(ValueError, 'Shape .* is not fully defined'):
networks.compression_model(not_fully_defined)
示例9: _optimizer
# 需要导入模块: import networks [as 别名]
# 或者: from networks import discriminator [as 别名]
def _optimizer(gen_lr, dis_lr):
# First is generator optimizer, second is discriminator.
adam_kwargs = {
'epsilon': 1e-8,
'beta1': 0.5,
}
return (tf.train.AdamOptimizer(gen_lr, **adam_kwargs),
tf.train.AdamOptimizer(dis_lr, **adam_kwargs))
示例10: _get_gan_model
# 需要导入模块: import networks [as 别名]
# 或者: from networks import discriminator [as 别名]
def _get_gan_model(generator_inputs, generated_data, real_data,
generator_scope):
"""Manually construct and return a GANModel tuple."""
generator_vars = tf.contrib.framework.get_trainable_variables(generator_scope)
discriminator_fn = networks.discriminator
with tf.variable_scope('discriminator') as dis_scope:
discriminator_gen_outputs = discriminator_fn(generated_data)
with tf.variable_scope(dis_scope, reuse=True):
discriminator_real_outputs = discriminator_fn(real_data)
discriminator_vars = tf.contrib.framework.get_trainable_variables(
dis_scope)
# Manually construct GANModel tuple.
gan_model = tfgan.GANModel(
generator_inputs=generator_inputs,
generated_data=generated_data,
generator_variables=generator_vars,
generator_scope=generator_scope,
generator_fn=None, # not necessary
real_data=real_data,
discriminator_real_outputs=discriminator_real_outputs,
discriminator_gen_outputs=discriminator_gen_outputs,
discriminator_variables=discriminator_vars,
discriminator_scope=dis_scope,
discriminator_fn=discriminator_fn)
return gan_model
示例11: test_discriminator_graph
# 需要导入模块: import networks [as 别名]
# 或者: from networks import discriminator [as 别名]
def test_discriminator_graph(self):
# Check graph construction for a number of image size/depths and batch
# sizes.
for batch_size, patch_size in zip([3, 6], [70, 128]):
tf.reset_default_graph()
img = tf.ones([batch_size, patch_size, patch_size, 3])
disc_output = networks.discriminator(img)
self.assertEqual(2, disc_output.shape.ndims)
self.assertEqual(batch_size, disc_output.shape.as_list()[0])
示例12: _lr
# 需要导入模块: import networks [as 别名]
# 或者: from networks import discriminator [as 别名]
def _lr(gen_lr_base, dis_lr_base):
"""Return the generator and discriminator learning rates."""
gen_lr = tf.train.exponential_decay(
learning_rate=gen_lr_base,
global_step=tf.train.get_or_create_global_step(),
decay_steps=100000,
decay_rate=0.8,
staircase=True,)
dis_lr = dis_lr_base
return gen_lr, dis_lr