本文整理汇总了Python中tflib.params_with_name方法的典型用法代码示例。如果您正苦于以下问题:Python tflib.params_with_name方法的具体用法?Python tflib.params_with_name怎么用?Python tflib.params_with_name使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tflib
的用法示例。
在下文中一共展示了tflib.params_with_name方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _getOptimizer
# 需要导入模块: import tflib [as 别名]
# 或者: from tflib import params_with_name [as 别名]
def _getOptimizer(self, wgan_gp, gen_cost, disc_cost, G_var, D_var):
clip_disc_weights = None
if wgan_gp.MODE == 'wgan':
gen_train_op = tf.train.RMSPropOptimizer(learning_rate=self.g_lr).minimize(gen_cost,
var_list=G_var, colocate_gradients_with_ops=True)
disc_train_op = tf.train.RMSPropOptimizer(learning_rate=self.d_lr).minimize(disc_cost,
var_list=D_var, colocate_gradients_with_ops=True)
clip_ops = []
for var in lib.params_with_name('Discriminator'):
clip_bounds = [-.01, .01]
clip_ops.append(tf.assign(var, tf.clip_by_value(var, clip_bounds[0], clip_bounds[1])))
clip_disc_weights = tf.group(*clip_ops)
elif wgan_gp.MODE == 'wgan-gp':
gen_train_op = tf.train.AdamOptimizer(learning_rate=self.g_lr, beta1=0.5, beta2=0.9).minimize(gen_cost,
var_list=G_var, colocate_gradients_with_ops=True)
disc_train_op = tf.train.AdamOptimizer(learning_rate=self.d_lr, beta1=0.5, beta2=0.9).minimize(disc_cost,
var_list=D_var, colocate_gradients_with_ops=True)
elif wgan_gp.MODE == 'dcgan':
gen_train_op = tf.train.AdamOptimizer(learning_rate=self.g_lr, beta1=0.5).minimize(gen_cost,
var_list=G_var, colocate_gradients_with_ops=True)
disc_train_op = tf.train.AdamOptimizer(learning_rate=self.d_lr, beta1=0.5).minimize(disc_cost,
var_list=D_var, colocate_gradients_with_ops=True)
elif wgan_gp.MODE == 'lsgan':
gen_train_op = tf.train.RMSPropOptimizer(learning_rate=self.g_lr).minimize(gen_cost,
var_list=G_var, colocate_gradients_with_ops=True)
disc_train_op = tf.train.RMSPropOptimizer(learning_rate=self.d_lr).minimize(disc_cost,
var_list=D_var, colocate_gradients_with_ops=True)
else:
raise Exception()
return gen_train_op, disc_train_op, clip_disc_weights
示例2: _getOptimizer
# 需要导入模块: import tflib [as 别名]
# 或者: from tflib import params_with_name [as 别名]
def _getOptimizer(self, wgan_gp, gen_cost1, gen_cost2, disc_cost, G_var1, G_var2, D_var):
clip_disc_weights = None
if wgan_gp.MODE == 'wgan':
gen_train_op1 = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(gen_cost1,
var_list=G_var1, colocate_gradients_with_ops=True)
gen_train_op2 = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(gen_cost2,
var_list=G_var2, colocate_gradients_with_ops=True)
disc_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(disc_cost,
var_list=D_var, colocate_gradients_with_ops=True)
clip_ops = []
for var in lib.params_with_name('Discriminator'):
clip_bounds = [-.01, .01]
clip_ops.append(tf.assign(var, tf.clip_by_value(var, clip_bounds[0], clip_bounds[1])))
clip_disc_weights = tf.group(*clip_ops)
elif wgan_gp.MODE == 'wgan-gp':
gen_train_op1 = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(gen_cost1,
var_list=G_var1, colocate_gradients_with_ops=True)
gen_train_op2 = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(gen_cost2,
var_list=G_var2, colocate_gradients_with_ops=True)
disc_train_op = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(disc_cost,
var_list=D_var, colocate_gradients_with_ops=True)
elif wgan_gp.MODE == 'dcgan':
gen_train_op1 = tf.train.AdamOptimizer(learning_rate=2e-5, beta1=0.5).minimize(gen_cost1,
var_list=G_var1, colocate_gradients_with_ops=True)
gen_train_op2 = tf.train.AdamOptimizer(learning_rate=2e-5, beta1=0.5).minimize(gen_cost2,
var_list=G_var2, colocate_gradients_with_ops=True)
disc_train_op = tf.train.AdamOptimizer(learning_rate=2e-5, beta1=0.5).minimize(disc_cost,
var_list=D_var, colocate_gradients_with_ops=True)
elif wgan_gp.MODE == 'lsgan':
gen_train_op1 = tf.train.RMSPropOptimizer(learning_rate=1e-4).minimize(gen_cost1,
var_list=G_var1, colocate_gradients_with_ops=True)
gen_train_op2 = tf.train.RMSPropOptimizer(learning_rate=1e-4).minimize(gen_cost2,
var_list=G_var2, colocate_gradients_with_ops=True)
disc_train_op = tf.train.RMSPropOptimizer(learning_rate=1e-4).minimize(disc_cost,
var_list=D_var, colocate_gradients_with_ops=True)
else:
raise Exception()
return gen_train_op1, gen_train_op2, disc_train_op, clip_disc_weights
示例3: build_model
# 需要导入模块: import tflib [as 别名]
# 或者: from tflib import params_with_name [as 别名]
def build_model(self):
G1, DiffMap, self.G_var1, self.G_var2 = GeneratorCNN_Pose_UAEAfterResidual_UAEnoFCAfter2Noise(
self.x, self.pose_target,
self.channel, self.z_num, self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, noise_dim=0, reuse=False)
G2 = G1 + DiffMap
self.G1 = denorm_img(G1, self.data_format)
self.G2 = denorm_img(G2, self.data_format)
self.G = self.G2
self.DiffMap = denorm_img(DiffMap, self.data_format)
self.wgan_gp = WGAN_GP(DATA_DIR='', MODE='dcgan', DIM=64, BATCH_SIZE=self.batch_size, ITERS=200000, LAMBDA=10, G_OUTPUT_DIM=128*64*3)
Dis = self._getDiscriminator(self.wgan_gp, arch=self.D_arch)
triplet = tf.concat([self.x_target, self.x, G1, G2], 0)
## WGAN-GP code uses NCHW
self.D_z = Dis(tf.transpose( triplet, [0,3,1,2] ), input_dim=3)
self.D_var = lib.params_with_name('Discriminator.')
D_z_pos_x_target, D_z_neg_x, D_z_neg_g1, D_z_neg_g2 = tf.split(self.D_z, 4)
self.PoseMaskLoss1 = tf.reduce_mean(tf.abs(G1 - self.x_target) * (self.mask_target))
self.g_loss1 = tf.reduce_mean(tf.abs(G1-self.x_target)) + self.PoseMaskLoss1
self.g_loss2, self.d_loss, self.g2_g1_loss = self._gan_loss(self.wgan_gp, Dis, D_z_pos_x_target, D_z_neg_x, D_z_neg_g1, D_z_neg_g2, arch=self.D_arch)
self.PoseMaskLoss2 = tf.reduce_mean(tf.abs(G2 - self.x_target) * (self.mask_target))
self.L1Loss2 = tf.reduce_mean(tf.abs(G2 - self.x_target)) + self.PoseMaskLoss2
self.g_loss2 += self.L1Loss2 * 10
self.g_optim1, self.g_optim2, self.d_optim, self.clip_disc_weights = self._getOptimizer(self.wgan_gp,
self.g_loss1, self.g_loss2, self.d_loss, self.G_var1,self.G_var2, self.D_var)
self.summary_op = tf.summary.merge([
tf.summary.image("G1", self.G1),
tf.summary.image("G2", self.G2),
tf.summary.image("DiffMap", self.DiffMap),
tf.summary.scalar("loss/PoseMaskLoss1", self.PoseMaskLoss1),
tf.summary.scalar("loss/PoseMaskLoss2", self.PoseMaskLoss2),
tf.summary.scalar("loss/L1Loss2", self.L1Loss2),
tf.summary.scalar("loss/g_loss1", self.g_loss1),
tf.summary.scalar("loss/g_loss2", self.g_loss2),
tf.summary.scalar("loss/d_loss", self.d_loss),
tf.summary.scalar("loss/g2_g1_loss", self.g2_g1_loss),
tf.summary.scalar("misc/d_lr", self.d_lr),
tf.summary.scalar("misc/g_lr", self.g_lr),
])
示例4: build_model
# 需要导入模块: import tflib [as 别名]
# 或者: from tflib import params_with_name [as 别名]
def build_model(self):
self._define_input()
with tf.variable_scope("Encoder") as vs:
pb_list = tf.split(self.part_bbox, self.part_num, axis=1)
pv_list = tf.split(self.part_vis, self.part_num, axis=1)
## Part 1-3 (totally 3)
# self.embs, self.Encoder_var = GeneratorCNN_ID_Encoder_BodyROI(self.x, self.part_bbox, len(indices), 64,
# self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, reuse=False)
# Part 1-7 (totally 7)
indices = range(7)
select_part_bbox = tf.concat([pb_list[i] for i in indices], axis=1)
self.embs, _, self.Encoder_var = GeneratorCNN_ID_Encoder_BodyROI(self.x, select_part_bbox, len(indices), 32,
self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, keep_part_prob=1.0, reuse=False)
# self.embs, _, self.Encoder_var = GeneratorCNN_ID_Encoder_BodyROI2(self.x, select_part_bbox, len(indices), 32,
# self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, keep_part_prob=0.9, reuse=False)
## Part 1,4-8 (totally 6)
# indices = [1] + range(4,9)
# select_part_bbox = tf.concat([pb_list[i] for i in indices], axis=1)
# self.embs, _, self.Encoder_var = GeneratorCNN_ID_Encoder_BodyROI(self.x, select_part_bbox, len(indices), 32,
# self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, keep_part_prob=1.0, reuse=False)
## Part 1,8-16 (totally 10)
# indices = [0] + range(7,16)
# select_part_bbox = tf.concat([pb_list[i] for i in indices], axis=1)
# select_part_vis = tf.cast(tf.concat([pv_list[i] for i in indices], axis=1), tf.float32)
# self.embs, _, self.Encoder_var = GeneratorCNN_ID_Encoder_BodyROIVis(self.x, select_part_bbox, select_part_vis, len(indices), 32,
# self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, keep_part_prob=1.0, reuse=False)
self.embs_rep = tf.tile(tf.expand_dims(self.embs,-1), [1, 1, self.img_H*self.img_W])
self.embs_rep = tf.reshape(self.embs_rep, [self.batch_size, -1, self.img_H, self.img_W])
self.embs_rep = nchw_to_nhwc(self.embs_rep)
with tf.variable_scope("ID_AE") as vs:
G, _, self.G_var = self.Generator_fn(
self.embs_rep, self.pose,
self.channel, self.z_num, self.repeat_num, self.conv_hidden_num, self.data_format, activation_fn=tf.nn.relu, reuse=False)
self.G_var += self.Encoder_var
self.G = denorm_img(G, self.data_format)
pair = tf.concat([self.x, G], 0)
self.D_z = self.Discriminator_fn(tf.transpose( pair, [0,3,1,2] ), input_dim=3)
self.D_var = lib.params_with_name('Discriminator.')
D_z_pos, D_z_neg = tf.split(self.D_z, 2)
self.g_loss, self.d_loss = self._gan_loss(self.wgan_gp, self.Discriminator_fn, D_z_pos, D_z_neg, arch=self.D_arch)
self.PoseMaskLoss = tf.reduce_mean(tf.abs(G - self.x) * (self.mask_r6))
self.L1Loss = tf.reduce_mean(tf.abs(G - self.x))
self.g_loss_only = self.g_loss
self._define_loss_optim()
self.summary_op = tf.summary.merge([
tf.summary.image("G", self.G),
tf.summary.scalar("loss/PoseMaskLoss", self.PoseMaskLoss),
tf.summary.scalar("loss/L1Loss", self.L1Loss),
tf.summary.scalar("loss/g_loss", self.g_loss),
tf.summary.scalar("loss/g_loss_only", self.g_loss_only),
tf.summary.scalar("loss/d_loss", self.d_loss),
tf.summary.scalar("misc/d_lr", self.d_lr),
tf.summary.scalar("misc/g_lr", self.g_lr),
])