本文整理汇总了Python中tensorflow.keras.backend.random_normal方法的典型用法代码示例。如果您正苦于以下问题:Python backend.random_normal方法的具体用法?Python backend.random_normal怎么用?Python backend.random_normal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.backend
的用法示例。
在下文中一共展示了backend.random_normal方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _sample
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import random_normal [as 别名]
def _sample(self, args):
"""
sample from a normal distribution
args should be [mu, log_var], where log_var is the log of the squared sigma
This is probably equivalent to
K.random_normal(shape, args[0], exp(args[1]/2.0))
"""
mu, log_var = args
# sample from N(0, 1)
noise = tf.random_normal(tf.shape(mu), 0, 1, dtype=tf.float32)
# make it a sample from N(mu, sigma^2)
z = mu + tf.exp(log_var/2.0) * noise
return z
示例2: sampling
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import random_normal [as 别名]
def sampling(args):
"""Reparameterization trick by sampling
fr an isotropic unit Gaussian.
# Arguments:
args (tensor): mean and log of variance of Q(z|X)
# Returns:
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
# K is the keras backend
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
示例3: sampling
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import random_normal [as 别名]
def sampling(args):
"""Implements reparameterization trick by sampling
from a gaussian with zero mean and std=1.
Arguments:
args (tensor): mean and log of variance of Q(z|X)
Returns:
sampled latent vector (tensor)
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
示例4: sampling
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import random_normal [as 别名]
def sampling(args):
"""Reparameterization trick by sampling
fr an isotropic unit Gaussian.
# Arguments:
args (tensor): mean and log of variance of Q(z|X)
# Returns:
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
示例5: call
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import random_normal [as 别名]
def call(self, x, mode):
# Possible modes for reconstruction:
# 0: img -> img + gate
# 1: img -> img
# 2: img -> gate
x = self.q_img(x)
means = self.mean_params(x)
stddev = tf.math.exp(0.5 * self.stddev_params(x))
eps = random_normal(tf.shape(stddev))
z = means + eps * stddev
if mode == 0:
img_recon = self.p_img(z)
gate_recon = self.p_gate(z)
return img_recon, gate_recon, means, stddev, z
elif mode == 1:
img_recon = self.p_img(z)
gate_recon = False
return img_recon, gate_recon, means, stddev, z
elif mode == 2:
img_recon = False
gate_recon = self.p_gate(z)
return img_recon, gate_recon, means, stddev, z
示例6: call
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import random_normal [as 别名]
def call(self, x):
if self.random_gain:
noise_x = x + K.random_normal(
shape=K.shape(x), mean=0.0, stddev=np.random.uniform(0.0, self.power)
)
else:
noise_x = x + K.random_normal(shape=K.shape(x), mean=0.0, stddev=self.power)
return K.in_train_phase(noise_x, x)
示例7: call
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import random_normal [as 别名]
def call(self, inputs):
if self.n_dims == 2:
rand_flow = K.random_normal(
shape=tf.convert_to_tensor(
[tf.shape(inputs)[0], tf.shape(inputs)[1], tf.shape(inputs)[2], self.n_dims]),
mean=0., stddev=1., dtype='float32')
rand_flow = tf.nn.depthwise_conv2d(rand_flow, self.blur_kernel, strides=[1] * (self.n_dims + 2),
padding='SAME')
elif self.n_dims == 3:
rand_flow = K.random_normal(
shape=tf.convert_to_tensor(
[tf.shape(inputs)[0], tf.shape(inputs)[1], tf.shape(inputs)[2], tf.shape(inputs)[3], self.n_dims]),
mean=0., stddev=1., dtype='float32')
if self.blur_kernel is not None:
rand_flow_list = tf.unstack(rand_flow, num=3, axis=-1)
flow_chans = []
for c in range(self.n_dims):
flow_chan = tf.nn.conv3d(tf.expand_dims(rand_flow_list[c], axis=-1),
self.blur_kernel, strides=[1] * (self.n_dims + 2), padding='SAME')
flow_chans.append(flow_chan[:, :, :, :, 0])
rand_flow = tf.stack(flow_chans, axis=-1)
if self.normalize_max:
rand_flow = K.cast(
tf.add_n([rand_flow * 0, rand_flow / tf.reduce_max(tf.abs(rand_flow)) * self.flow_sigma]),
dtype='float32')
else:
rand_flow = K.cast(rand_flow * self.flow_sigma, dtype='float32')
return rand_flow
示例8: encode
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import random_normal [as 别名]
def encode(self, x):
x = self.q_img(x)
means = self.mean_params(x)
stddev = tf.math.exp(0.5 * self.stddev_params(x))
eps = random_normal(tf.shape(stddev))
z = means + eps * stddev
return z, means, stddev
示例9: train_step
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import random_normal [as 别名]
def train_step(self, images, style, noise, perform_gp = True, perform_pl = False):
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
#Get style information
w_space = []
pl_lengths = self.pl_mean
for i in range(len(style)):
w_space.append(self.GAN.S(style[i]))
#Generate images
generated_images = self.GAN.G(w_space + [noise])
#Discriminate
real_output = self.GAN.D(images, training=True)
fake_output = self.GAN.D(generated_images, training=True)
#Hinge loss function
gen_loss = K.mean(fake_output)
divergence = K.mean(K.relu(1 + real_output) + K.relu(1 - fake_output))
disc_loss = divergence
if perform_gp:
#R1 gradient penalty
disc_loss += gradient_penalty(images, real_output, 10)
if perform_pl:
#Slightly adjust W space
w_space_2 = []
for i in range(len(style)):
std = 0.1 / (K.std(w_space[i], axis = 0, keepdims = True) + 1e-8)
w_space_2.append(w_space[i] + K.random_normal(tf.shape(w_space[i])) / (std + 1e-8))
#Generate from slightly adjusted W space
pl_images = self.GAN.G(w_space_2 + [noise])
#Get distance after adjustment (path length)
delta_g = K.mean(K.square(pl_images - generated_images), axis = [1, 2, 3])
pl_lengths = delta_g
if self.pl_mean > 0:
gen_loss += K.mean(K.square(pl_lengths - self.pl_mean))
#Get gradients for respective areas
gradients_of_generator = gen_tape.gradient(gen_loss, self.GAN.GM.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, self.GAN.D.trainable_variables)
#Apply gradients
self.GAN.GMO.apply_gradients(zip(gradients_of_generator, self.GAN.GM.trainable_variables))
self.GAN.DMO.apply_gradients(zip(gradients_of_discriminator, self.GAN.D.trainable_variables))
return disc_loss, gen_loss, divergence, pl_lengths