当前位置: 首页>>代码示例>>Python>>正文


Python backend.random_uniform方法代码示例

本文整理汇总了Python中keras.backend.random_uniform方法的典型用法代码示例。如果您正苦于以下问题:Python backend.random_uniform方法的具体用法?Python backend.random_uniform怎么用?Python backend.random_uniform使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.backend的用法示例。


在下文中一共展示了backend.random_uniform方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: loss

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import random_uniform [as 别名]
def loss(self, y_true, y_pred):

        # get the value for the true and fake images
        disc_true = self.disc(y_true)
        disc_pred = self.disc(y_pred)

        # sample a x_hat by sampling along the line between true and pred
        # z = tf.placeholder(tf.float32, shape=[None, 1])
        # shp = y_true.get_shape()[0]
        # WARNING: SHOULD REALLY BE shape=[batch_size, 1] !!!
        # self.batch_size does not work, since it's not None!!!
        alpha = K.random_uniform(shape=[K.shape(y_pred)[0], 1, 1, 1])
        diff = y_pred - y_true
        interp = y_true + alpha * diff

        # take gradient of D(x_hat)
        gradients = K.gradients(self.disc(interp), [interp])[0]
        grad_pen = K.mean(K.square(K.sqrt(K.sum(K.square(gradients), axis=1))-1))

        # compute loss
        return (K.mean(disc_pred) - K.mean(disc_true)) + self.lambda_gp * grad_pen 
开发者ID:voxelmorph,项目名称:voxelmorph,代码行数:23,代码来源:metrics.py

示例2: softmax_activation

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import random_uniform [as 别名]
def softmax_activation(self, mem):
        """Softmax activation."""

        # spiking_samples = k.less_equal(k.random_uniform([self.config.getint(
        #     'simulation', 'batch_size'), 1]), 300 * self.dt / 1000.)
        # spiking_neurons = k.T.repeat(spiking_samples, 10, axis=1)
        # activ = k.T.nnet.softmax(mem)
        # max_activ = k.max(activ, axis=1, keepdims=True)
        # output_spikes = k.equal(activ, max_activ).astype(k.floatx())
        # output_spikes = k.T.set_subtensor(output_spikes[k.equal(
        #     spiking_neurons, 0).nonzero()], 0.)
        # new_and_reset_mem = k.T.set_subtensor(mem[spiking_neurons.nonzero()],
        #                                       0.)
        # self.add_update([(self.mem, new_and_reset_mem)])
        # return output_spikes

        return k.T.mul(k.less_equal(k.random_uniform(mem.shape),
                                    k.softmax(mem)), self.v_thresh) 
开发者ID:NeuromorphicProcessorProject,项目名称:snn_toolbox,代码行数:20,代码来源:temporal_mean_rate_theano.py

示例3: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import random_uniform [as 别名]
def call(self, x, mask=None):
        sims = []
        for n, sim in zip(self.n, self.similarities):
            for _ in range(n):
                batch_size = K.shape(x)[0]
                idx = K.random_uniform((batch_size,), low=0, high=batch_size,
                                       dtype='int32')
                x_shuffled = K.gather(x, idx)
                pair_sim = sim(x, x_shuffled)
                for _ in range(K.ndim(x) - 1):
                    pair_sim = K.expand_dims(pair_sim, dim=1)
                sims.append(pair_sim)

        return K.concatenate(sims, axis=-1) 
开发者ID:codekansas,项目名称:gandlf,代码行数:16,代码来源:core.py

示例4: _merge_function

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import random_uniform [as 别名]
def _merge_function(self, inputs):
        alpha = K.random_uniform((32, 1, 1, 1))
        return (alpha * inputs[0]) + ((1 - alpha) * inputs[1]) 
开发者ID:eriklindernoren,项目名称:Keras-GAN,代码行数:5,代码来源:wgan_gp.py

示例5: __call__

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import random_uniform [as 别名]
def __call__(self, shape, dtype=None):
        dtype = dtype or K.floatx()

        init_range = 1.0 / np.sqrt(shape[1])
        return K.random_uniform(shape, -init_range, init_range, dtype=dtype)


# Obtained from https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py 
开发者ID:titu1994,项目名称:keras-efficientnets,代码行数:10,代码来源:custom_objects.py

示例6: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import random_uniform [as 别名]
def call(self, inputs, training=None):

        def drop_connect():
            keep_prob = 1.0 - self.drop_connect_rate

            # Compute drop_connect tensor
            batch_size = tf.shape(inputs)[0]
            random_tensor = keep_prob
            random_tensor += K.random_uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
            binary_tensor = tf.floor(random_tensor)
            output = (inputs / keep_prob) * binary_tensor
            return output

        return K.in_train_phase(drop_connect, inputs, training=training) 
开发者ID:titu1994,项目名称:keras-efficientnets,代码行数:16,代码来源:custom_objects.py

示例7: _merge_function

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import random_uniform [as 别名]
def _merge_function(self, inputs):
        alpha = K.random_uniform((self.bs, 1, 1, 1))
        return (alpha * inputs[0]) + ((1 - alpha) * inputs[1]) 
开发者ID:hoangthang1607,项目名称:StarGAN-Keras,代码行数:5,代码来源:StarGAN.py

示例8: _merge_function

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import random_uniform [as 别名]
def _merge_function (self, inputs):
        weights = K.random_uniform((BATCH_SIZE, 1, 1))
        return (weights * inputs[0]) + ((1 - weights) * inputs[1]) 
开发者ID:deepakbaby,项目名称:se_relativisticgan,代码行数:5,代码来源:run_rsgan-gp_se.py

示例9: _merge_function

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import random_uniform [as 别名]
def _merge_function(self, inputs):
        weights = K.random_uniform((BATCH_SIZE, 1, 1, 1))
        return (weights * inputs[0]) + ((1 - weights) * inputs[1]) 
开发者ID:keras-team,项目名称:keras-contrib,代码行数:5,代码来源:improved_wgan.py

示例10: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import random_uniform [as 别名]
def call(self, inputs, training=None):
        def dropped_inputs():
            keep_prob = 1. - self.rate
            tile_shape = tf.expand_dims(tf.shape(inputs)[-1], axis=0)
            tiled_keep_prob = K.tile(keep_prob, tile_shape)
            keep_prob = tf.transpose(K.reshape(tiled_keep_prob, [tile_shape[0], tf.shape(keep_prob)[0]]))
            binary_tensor = tf.floor(keep_prob + K.random_uniform(shape=tf.shape(inputs)))
            return inputs * binary_tensor
        return K.in_train_phase(dropped_inputs, inputs,
                                training=training) 
开发者ID:d909b,项目名称:perfect_match,代码行数:12,代码来源:per_sample_dropout.py

示例11: _merge_function

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import random_uniform [as 别名]
def _merge_function(self, inputs):
    weights = K.random_uniform((1, 1, 1, 1))
    return (weights * inputs[0]) + ((1 - weights) * inputs[1]) 
开发者ID:tlatkowski,项目名称:inpainting-gmcnn-keras,代码行数:5,代码来源:custom_layers.py

示例12: __init__

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import random_uniform [as 别名]
def __init__(self,mode='mul', strength=0.4, axes=(0,3), normalize=False,**kwargs):
        super(GDropLayer,self).__init__(**kwargs)
        assert mode in ('drop', 'mul', 'prop')
        #self.random     = K.random_uniform(1, minval=1, maxval=2147462579, dtype=tf.float32, seed=None, name=None)
        self.mode       = mode
        self.strength   = strength
        self.axes       = [axes] if isinstance(axes, int) else list(axes)
        self.normalize  = normalize # If true, retain overall signal variance.
        self.gain       = None      # For experimentation. 
开发者ID:MSC-BUAA,项目名称:Keras-progressive_growing_of_gans,代码行数:11,代码来源:layers.py

示例13: uniform_latent_sampling

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import random_uniform [as 别名]
def uniform_latent_sampling(latent_shape, low=0.0, high=1.0):
    """
    Sample from uniform distribution
    :param latent_shape: batch shape
    :return: normal samples, shape=(n,)+latent_shape
    """
    return Lambda(lambda x: K.random_uniform((K.shape(x)[0],) + latent_shape, low, high),
                  output_shape=lambda x: ((x[0],) + latent_shape)) 
开发者ID:bstriner,项目名称:keras-adversarial,代码行数:10,代码来源:adversarial_utils.py

示例14: _dense_kernel_initializer

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import random_uniform [as 别名]
def _dense_kernel_initializer(shape, dtype=None):
    fan_in, fan_out = _compute_fans(shape)
    stddev = 1. / np.sqrt(fan_in)
    return K.random_uniform(shape, -stddev, stddev, dtype) 
开发者ID:izikgo,项目名称:AnomalyDetectionTransformations,代码行数:6,代码来源:wide_residual_network.py

示例15: random_laplace

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import random_uniform [as 别名]
def random_laplace(shape, mu=0., b=1.):
    '''
    Draw random samples from a Laplace distriubtion.

    See: https://en.wikipedia.org/wiki/Laplace_distribution#Generating_random_variables_according_to_the_Laplace_distribution
    '''
    U = K.random_uniform(shape, -0.5, 0.5)
    return mu - b * K.sign(U) * K.log(1 - 2 * K.abs(U)) 
开发者ID:jhartford,项目名称:DeepIV,代码行数:10,代码来源:samplers.py


注:本文中的keras.backend.random_uniform方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。