本文整理汇总了Python中models.Discriminator方法的典型用法代码示例。如果您正苦于以下问题:Python models.Discriminator方法的具体用法?Python models.Discriminator怎么用?Python models.Discriminator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类models
的用法示例。
在下文中一共展示了models.Discriminator方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_model
# 需要导入模块: import models [as 别名]
# 或者: from models import Discriminator [as 别名]
def build_model(self):
Gen=GeneratorTypes[self.gan_type]
config=self.config
self.gen=Gen(config.batch_size,config.gen_hidden_size,config.gen_z_dim)
with tf.variable_scope('Disc') as scope:
self.D1 = Discriminator(self.data.X, config.disc_hidden_size)
scope.reuse_variables()
self.D2 = Discriminator(self.gen.X, config.disc_hidden_size)
d_var = tf.contrib.framework.get_variables(scope)
d_loss_real=tf.reduce_mean( sxe(self.D1,1) )
d_loss_fake=tf.reduce_mean( sxe(self.D2,0) )
self.loss_d = d_loss_real + d_loss_fake
self.loss_g = tf.reduce_mean( sxe(self.D2,1) )
optimizer=tf.train.AdamOptimizer
g_optimizer=optimizer(self.config.lr_gen)
d_optimizer=optimizer(self.config.lr_disc)
self.opt_d = d_optimizer.minimize(self.loss_d,var_list= d_var)
self.opt_g = g_optimizer.minimize(self.loss_g,var_list= self.gen.tr_var,
global_step=self.gen.step)
with tf.control_dependencies([self.inc_step]):
self.train_op=tf.group(self.opt_d,self.opt_g)
示例2: build_models
# 需要导入模块: import models [as 别名]
# 或者: from models import Discriminator [as 别名]
def build_models(hps, current_res_w, use_ema_sampling=False, num_classes=None, label_list=None): # todo: fix num_classes
mapping_network = MappingNetwork() if hps.do_mapping_network else None
gen_model = Generator(current_res_w, hps.res_w, use_pixel_norm=hps.do_pixel_norm,
start_shape=(hps.start_res_h, hps.start_res_w),
equalized_lr=hps.do_equalized_lr,
traditional_input=hps.do_traditional_input,
add_noise=hps.do_add_noise,
resize_method=hps.resize_method,
use_mapping_network=hps.do_mapping_network,
cond_layers=hps.cond_layers,
map_cond=hps.map_cond)
dis_model = Discriminator(current_res_w, equalized_lr=hps.do_equalized_lr,
do_minibatch_stddev=hps.do_minibatch_stddev,
end_shape=(hps.start_res_h, hps.start_res_w),
resize_method=hps.resize_method, cgan_nclasses=num_classes,
label_list=label_list)
if use_ema_sampling:
sampling_model = Generator(current_res_w, hps.res_w, use_pixel_norm=hps.do_pixel_norm,
start_shape=(hps.start_res_h, hps.start_res_w),
equalized_lr=hps.do_equalized_lr,
traditional_input=hps.do_traditional_input,
add_noise=hps.do_add_noise,
resize_method=hps.resize_method,
use_mapping_network=hps.do_mapping_network,
cond_layers=hps.cond_layers,
map_cond=hps.map_cond)
return gen_model, mapping_network, dis_model, sampling_model
else:
return gen_model, mapping_network, dis_model
示例3: __init__
# 需要导入模块: import models [as 别名]
# 或者: from models import Discriminator [as 别名]
def __init__(self, z_dim=32, h_dim=128, filter_num=64, channel_num=3,
lr=1e-3, cuda=False):
# Are we cuda'ing it
self.cuda = cuda
# Encoder, decoder, discriminator
self.encoder = self.cudafy_(
Encoder(z_dim, h_dim=h_dim, filter_num=filter_num,
channel_num=channel_num)
)
self.encoder.apply(weight_init)
self.decoder = self.cudafy_(
Decoder(z_dim, filter_num=filter_num, channel_num=channel_num)
)
self.decoder.apply(weight_init)
self.discrim = self.cudafy_(Discriminator(z_dim))
self.discrim.apply(weight_init)
# Optimizers
generator_params = list(self.encoder.parameters()) + \
list(self.decoder.parameters())
self.optim_enc = optim.Adam(self.encoder.parameters(), lr=lr)
self.optim_dec = optim.Adam(self.decoder.parameters(), lr=lr)
self.optim_dis = optim.Adam(self.discrim.parameters(), lr=lr)
self.optim_gen = optim.Adam(generator_params, lr=lr)
self.start_epoch = 0
示例4: build_model
# 需要导入模块: import models [as 别名]
# 或者: from models import Discriminator [as 别名]
def build_model(self):
# Define a generator and a discriminator
from models import Discriminator
from models import AdaInGEN as Generator
self.count = 0
self.D = Discriminator(
self.config, debug=self.config.mode == 'train' and self.verbose)
self.D = to_cuda(self.D)
self.G = Generator(
self.config, debug=self.config.mode == 'train' and self.verbose)
self.G = to_cuda(self.G)
if self.config.mode == 'train':
self.d_optimizer = self.set_optimizer(
self.D, self.config.d_lr, self.config.beta1, self.config.beta2)
self.g_optimizer = self.set_optimizer(
self.G, self.config.g_lr, self.config.beta1, self.config.beta2)
# Start with trained model
if self.config.pretrained_model and self.verbose:
self.load_pretrained_model()
if self.config.mode == 'train' and self.verbose:
self.print_network(self.D, 'Discriminator')
self.print_network(self.G, 'Generator')
# ==================================================================#
# ==================================================================#
示例5: __init__
# 需要导入模块: import models [as 别名]
# 或者: from models import Discriminator [as 别名]
def __init__(self,
device,
model,
model_num_labels,
image_nc,
box_min,
box_max,
model_path):
output_nc = image_nc
self.device = device
self.model_num_labels = model_num_labels
self.model = model
self.input_nc = image_nc
self.output_nc = output_nc
self.box_min = box_min
self.box_max = box_max
self.model_path = model_path
self.gen_input_nc = image_nc
self.netG = models.Generator(self.gen_input_nc, image_nc).to(device)
self.netDisc = models.Discriminator(image_nc).to(device)
# initialize all weights
self.netG.apply(weights_init)
self.netDisc.apply(weights_init)
# initialize optimizers
self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
lr=0.001)
self.optimizer_D = torch.optim.Adam(self.netDisc.parameters(),
lr=0.001)
开发者ID:PacktPublishing,项目名称:Hands-On-Generative-Adversarial-Networks-with-PyTorch-1.x,代码行数:33,代码来源:advGAN.py