本文整理汇总了Python中tensorflow.contrib.data.shuffle_and_repeat方法的典型用法代码示例。如果您正苦于以下问题:Python data.shuffle_and_repeat方法的具体用法?Python data.shuffle_and_repeat怎么用?Python data.shuffle_and_repeat使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.data
的用法示例。
在下文中一共展示了data.shuffle_and_repeat方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_model
# 需要导入模块: from tensorflow.contrib import data [as 别名]
# 或者: from tensorflow.contrib.data import shuffle_and_repeat [as 别名]
def build_model(self):
""" Graph Input """
# images
Image_Data_Class = ImageData(self.img_size, self.c_dim, self.custom_dataset)
inputs = tf.data.Dataset.from_tensor_slices(self.data)
gpu_device = '/gpu:0'
inputs = inputs.\
apply(shuffle_and_repeat(self.dataset_num)).\
apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=16, drop_remainder=True)).\
apply(prefetch_to_device(gpu_device, self.batch_size))
inputs_iterator = inputs.make_one_shot_iterator()
self.inputs = inputs_iterator.get_next()
# noises
self.z = tf.random_normal(shape=[self.batch_size, 1, 1, self.z_dim], name='random_z')
""" Loss Function """
# output of D for real images
real_logits = self.discriminator(self.inputs)
# output of D for fake images
fake_images = self.generator(self.z)
fake_logits = self.discriminator(fake_images, reuse=True)
if self.gan_type.__contains__('wgan') or self.gan_type == 'dragan':
GP = self.gradient_penalty(real=self.inputs, fake=fake_images)
else:
GP = 0
# get loss for discriminator
self.d_loss = discriminator_loss(self.gan_type, real=real_logits, fake=fake_logits, moment=self.moment) + GP
# get loss for generator
self.g_loss = generator_loss(self.gan_type, fake=fake_logits, moment=self.moment)
""" Training """
# divide trainable variables into a group for D and a group for G
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'discriminator' in var.name]
g_vars = [var for var in t_vars if 'generator' in var.name]
# optimizers
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) :
self.d_optim = tf.train.AdamOptimizer(self.d_learning_rate, beta1=self.beta1, beta2=self.beta2).minimize(self.d_loss, var_list=d_vars)
self.g_optim = tf.train.AdamOptimizer(self.g_learning_rate, beta1=self.beta1, beta2=self.beta2).minimize(self.g_loss, var_list=g_vars)
"""" Testing """
# for test
self.fake_images = self.generator(self.z, is_training=False, reuse=True)
""" Summary """
self.d_sum = tf.summary.scalar("d_loss", self.d_loss)
self.g_sum = tf.summary.scalar("g_loss", self.g_loss)
##################################################################################
# Train
##################################################################################
示例2: build_model
# 需要导入模块: from tensorflow.contrib import data [as 别名]
# 或者: from tensorflow.contrib.data import shuffle_and_repeat [as 别名]
def build_model(self):
""" Graph Input """
# images
if self.custom_dataset :
Image_Data_Class = ImageData(self.img_size, self.c_dim)
inputs = tf.data.Dataset.from_tensor_slices(self.data)
gpu_device = '/gpu:0'
inputs = inputs.apply(shuffle_and_repeat(self.dataset_num)).apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=16, drop_remainder=True)).apply(prefetch_to_device(gpu_device, self.batch_size))
inputs_iterator = inputs.make_one_shot_iterator()
self.inputs = inputs_iterator.get_next()
else :
self.inputs = tf.placeholder(tf.float32, [self.batch_size, self.img_size, self.img_size, self.c_dim], name='real_images')
# noises
self.z = tf.placeholder(tf.float32, [self.batch_size, 1, 1, self.z_dim], name='z')
""" Loss Function """
# output of D for real images
real_logits = self.discriminator(self.inputs)
# output of D for fake images
fake_images = self.generator(self.z)
fake_logits = self.discriminator(fake_images, reuse=True)
if self.gan_type.__contains__('wgan') or self.gan_type == 'dragan' :
GP = self.gradient_penalty(real=self.inputs, fake=fake_images)
else :
GP = 0
# get loss for discriminator
self.d_loss = discriminator_loss(self.gan_type, real=real_logits, fake=fake_logits) + GP
# get loss for generator
self.g_loss = generator_loss(self.gan_type, fake=fake_logits)
""" Training """
# divide trainable variables into a group for D and a group for G
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'discriminator' in var.name]
g_vars = [var for var in t_vars if 'generator' in var.name]
# optimizers
self.d_optim = tf.train.AdamOptimizer(self.d_learning_rate, beta1=self.beta1, beta2=self.beta2).minimize(self.d_loss, var_list=d_vars)
self.g_optim = tf.train.AdamOptimizer(self.g_learning_rate, beta1=self.beta1, beta2=self.beta2).minimize(self.g_loss, var_list=g_vars)
"""" Testing """
# for test
self.fake_images = self.generator(self.z, is_training=False, reuse=True)
""" Summary """
self.d_sum = tf.summary.scalar("d_loss", self.d_loss)
self.g_sum = tf.summary.scalar("g_loss", self.g_loss)
##################################################################################
# Train
##################################################################################
示例3: build_model
# 需要导入模块: from tensorflow.contrib import data [as 别名]
# 或者: from tensorflow.contrib.data import shuffle_and_repeat [as 别名]
def build_model(self):
""" Graph Input """
# images
if self.custom_dataset :
Image_Data_Class = ImageData(self.img_size, self.c_dim)
inputs = tf.data.Dataset.from_tensor_slices(self.data)
gpu_device = '/gpu:0'
inputs = inputs.apply(shuffle_and_repeat(self.dataset_num)).apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=16, drop_remainder=True)).apply(prefetch_to_device(gpu_device, self.batch_size))
inputs_iterator = inputs.make_one_shot_iterator()
self.inputs = inputs_iterator.get_next()
else :
self.inputs = tf.placeholder(tf.float32, [self.batch_size, self.img_size, self.img_size, self.c_dim], name='real_images')
# noises
self.z = tf.placeholder(tf.float32, [self.batch_size, 1, 1, self.z_dim], name='z')
""" Loss Function """
# output of D for real images
real_logits = self.discriminator(self.inputs)
# output of D for fake images
fake_images = self.generator(self.z)
fake_logits = self.discriminator(fake_images, reuse=True)
if self.gan_type.__contains__('gp') or self.gan_type.__contains__('lp') or self.gan_type.__contains__('dragan') :
GP = self.gradient_penalty(real=self.inputs, fake=fake_images)
else :
GP = 0
# get loss for discriminator
self.d_loss = discriminator_loss(self.Ra, self.gan_type, real=real_logits, fake=fake_logits) + GP
# get loss for generator
self.g_loss = generator_loss(self.Ra, self.gan_type, real=real_logits, fake=fake_logits)
""" Training """
# divide trainable variables into a group for D and a group for G
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'discriminator' in var.name]
g_vars = [var for var in t_vars if 'generator' in var.name]
# optimizers
self.d_optim = tf.train.AdamOptimizer(self.d_learning_rate, beta1=self.beta1, beta2=self.beta2).minimize(self.d_loss, var_list=d_vars)
self.g_optim = tf.train.AdamOptimizer(self.g_learning_rate, beta1=self.beta1, beta2=self.beta2).minimize(self.g_loss, var_list=g_vars)
"""" Testing """
# for test
self.fake_images = self.generator(self.z, is_training=False, reuse=True)
""" Summary """
self.d_sum = tf.summary.scalar("d_loss", self.d_loss)
self.g_sum = tf.summary.scalar("g_loss", self.g_loss)
##################################################################################
# Train
##################################################################################