当前位置: 首页>>代码示例>>Python>>正文


Python backend.random_normal方法代码示例

本文整理汇总了Python中tensorflow.keras.backend.random_normal方法的典型用法代码示例。如果您正苦于以下问题:Python backend.random_normal方法的具体用法?Python backend.random_normal怎么用?Python backend.random_normal使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.keras.backend的用法示例。


在下文中一共展示了backend.random_normal方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _sample

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import random_normal [as 别名]
def _sample(self, args):
        """
        sample from a normal distribution

        args should be [mu, log_var], where log_var is the log of the squared sigma

        This is probably equivalent to 
            K.random_normal(shape, args[0], exp(args[1]/2.0))
        """
        mu, log_var = args

        # sample from N(0, 1)
        noise = tf.random_normal(tf.shape(mu), 0, 1, dtype=tf.float32)

        # make it a sample from N(mu, sigma^2)
        z = mu + tf.exp(log_var/2.0) * noise
        return z 
开发者ID:adalca,项目名称:neuron,代码行数:19,代码来源:layers.py

示例2: sampling

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import random_normal [as 别名]
def sampling(args):
    """Reparameterization trick by sampling 
        fr an isotropic unit Gaussian.

    # Arguments:
        args (tensor): mean and log of variance of Q(z|X)

    # Returns:
        z (tensor): sampled latent vector
    """

    z_mean, z_log_var = args
    # K is the keras backend
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
开发者ID:PacktPublishing,项目名称:Advanced-Deep-Learning-with-Keras,代码行数:20,代码来源:vae-mlp-mnist-8.1.1.py

示例3: sampling

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import random_normal [as 别名]
def sampling(args):
    """Implements reparameterization trick by sampling
    from a gaussian with zero mean and std=1.

    Arguments:
        args (tensor): mean and log of variance of Q(z|X)

    Returns:
        sampled latent vector (tensor)
    """

    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
开发者ID:PacktPublishing,项目名称:Advanced-Deep-Learning-with-Keras,代码行数:19,代码来源:cvae-cnn-mnist-8.2.1.py

示例4: sampling

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import random_normal [as 别名]
def sampling(args):
    """Reparameterization trick by sampling 
        fr an isotropic unit Gaussian.

    # Arguments:
        args (tensor): mean and log of variance of Q(z|X)

    # Returns:
        z (tensor): sampled latent vector
    """

    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
开发者ID:PacktPublishing,项目名称:Advanced-Deep-Learning-with-Keras,代码行数:19,代码来源:vae-cnn-mnist-8.1.2.py

示例5: call

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import random_normal [as 别名]
def call(self, x, mode):
        # Possible modes for reconstruction:
        # 0: img -> img + gate
        # 1: img -> img
        # 2: img -> gate
        x = self.q_img(x)
        means = self.mean_params(x)
        stddev = tf.math.exp(0.5 * self.stddev_params(x))
        eps = random_normal(tf.shape(stddev))
        z = means + eps * stddev
        if mode == 0:
            img_recon = self.p_img(z)
            gate_recon = self.p_gate(z)
            return img_recon, gate_recon, means, stddev, z
        elif mode == 1:
            img_recon = self.p_img(z)
            gate_recon = False
            return img_recon, gate_recon, means, stddev, z
        elif mode == 2:
            img_recon = False
            gate_recon = self.p_gate(z)
            return img_recon, gate_recon, means, stddev, z 
开发者ID:microsoft,项目名称:AirSim-Drone-Racing-VAE-Imitation,代码行数:24,代码来源:cmvae.py

示例6: call

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import random_normal [as 别名]
def call(self, x):
        if self.random_gain:
            noise_x = x + K.random_normal(
                shape=K.shape(x), mean=0.0, stddev=np.random.uniform(0.0, self.power)
            )
        else:
            noise_x = x + K.random_normal(shape=K.shape(x), mean=0.0, stddev=self.power)

        return K.in_train_phase(noise_x, x) 
开发者ID:keunwoochoi,项目名称:kapre,代码行数:11,代码来源:augmentation.py

示例7: call

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import random_normal [as 别名]
def call(self, inputs):
        if self.n_dims == 2:
            rand_flow = K.random_normal(
                shape=tf.convert_to_tensor(
                    [tf.shape(inputs)[0], tf.shape(inputs)[1], tf.shape(inputs)[2], self.n_dims]),
                mean=0., stddev=1., dtype='float32')
            rand_flow = tf.nn.depthwise_conv2d(rand_flow, self.blur_kernel, strides=[1] * (self.n_dims + 2),
                                               padding='SAME')
        elif self.n_dims == 3:
            rand_flow = K.random_normal(
                shape=tf.convert_to_tensor(
                    [tf.shape(inputs)[0], tf.shape(inputs)[1], tf.shape(inputs)[2], tf.shape(inputs)[3], self.n_dims]),
                mean=0., stddev=1., dtype='float32')
            if self.blur_kernel is not None:
                rand_flow_list = tf.unstack(rand_flow, num=3, axis=-1)
                flow_chans = []
                for c in range(self.n_dims):
                    flow_chan = tf.nn.conv3d(tf.expand_dims(rand_flow_list[c], axis=-1),
                                             self.blur_kernel, strides=[1] * (self.n_dims + 2), padding='SAME')
                    flow_chans.append(flow_chan[:, :, :, :, 0])
                rand_flow = tf.stack(flow_chans, axis=-1)

        if self.normalize_max:
            rand_flow = K.cast(
                tf.add_n([rand_flow * 0, rand_flow / tf.reduce_max(tf.abs(rand_flow)) * self.flow_sigma]),
                dtype='float32')
        else:
            rand_flow = K.cast(rand_flow * self.flow_sigma, dtype='float32')
        return rand_flow 
开发者ID:xamyzhao,项目名称:brainstorm,代码行数:31,代码来源:networks.py

示例8: encode

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import random_normal [as 别名]
def encode(self, x):
        x = self.q_img(x)
        means = self.mean_params(x)
        stddev = tf.math.exp(0.5 * self.stddev_params(x))
        eps = random_normal(tf.shape(stddev))
        z = means + eps * stddev
        return z, means, stddev 
开发者ID:microsoft,项目名称:AirSim-Drone-Racing-VAE-Imitation,代码行数:9,代码来源:cmvae.py

示例9: train_step

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import random_normal [as 别名]
def train_step(self, images, style, noise, perform_gp = True, perform_pl = False):

        with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
            #Get style information
            w_space = []
            pl_lengths = self.pl_mean
            for i in range(len(style)):
                w_space.append(self.GAN.S(style[i]))

            #Generate images
            generated_images = self.GAN.G(w_space + [noise])

            #Discriminate
            real_output = self.GAN.D(images, training=True)
            fake_output = self.GAN.D(generated_images, training=True)

            #Hinge loss function
            gen_loss = K.mean(fake_output)
            divergence = K.mean(K.relu(1 + real_output) + K.relu(1 - fake_output))
            disc_loss = divergence

            if perform_gp:
                #R1 gradient penalty
                disc_loss += gradient_penalty(images, real_output, 10)

            if perform_pl:
                #Slightly adjust W space
                w_space_2 = []
                for i in range(len(style)):
                    std = 0.1 / (K.std(w_space[i], axis = 0, keepdims = True) + 1e-8)
                    w_space_2.append(w_space[i] + K.random_normal(tf.shape(w_space[i])) / (std + 1e-8))

                #Generate from slightly adjusted W space
                pl_images = self.GAN.G(w_space_2 + [noise])

                #Get distance after adjustment (path length)
                delta_g = K.mean(K.square(pl_images - generated_images), axis = [1, 2, 3])
                pl_lengths = delta_g

                if self.pl_mean > 0:
                    gen_loss += K.mean(K.square(pl_lengths - self.pl_mean))

        #Get gradients for respective areas
        gradients_of_generator = gen_tape.gradient(gen_loss, self.GAN.GM.trainable_variables)
        gradients_of_discriminator = disc_tape.gradient(disc_loss, self.GAN.D.trainable_variables)

        #Apply gradients
        self.GAN.GMO.apply_gradients(zip(gradients_of_generator, self.GAN.GM.trainable_variables))
        self.GAN.DMO.apply_gradients(zip(gradients_of_discriminator, self.GAN.D.trainable_variables))

        return disc_loss, gen_loss, divergence, pl_lengths 
开发者ID:manicman1999,项目名称:StyleGAN2-Tensorflow-2.0,代码行数:53,代码来源:stylegan_two.py


注:本文中的tensorflow.keras.backend.random_normal方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。