當前位置: 首頁>>代碼示例>>Python>>正文


Python backend.random_uniform方法代碼示例

本文整理匯總了Python中keras.backend.random_uniform方法的典型用法代碼示例。如果您正苦於以下問題:Python backend.random_uniform方法的具體用法?Python backend.random_uniform怎麽用?Python backend.random_uniform使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.backend的用法示例。


在下文中一共展示了backend.random_uniform方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: loss

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import random_uniform [as 別名]
def loss(self, y_true, y_pred):

        # get the value for the true and fake images
        disc_true = self.disc(y_true)
        disc_pred = self.disc(y_pred)

        # sample a x_hat by sampling along the line between true and pred
        # z = tf.placeholder(tf.float32, shape=[None, 1])
        # shp = y_true.get_shape()[0]
        # WARNING: SHOULD REALLY BE shape=[batch_size, 1] !!!
        # self.batch_size does not work, since it's not None!!!
        alpha = K.random_uniform(shape=[K.shape(y_pred)[0], 1, 1, 1])
        diff = y_pred - y_true
        interp = y_true + alpha * diff

        # take gradient of D(x_hat)
        gradients = K.gradients(self.disc(interp), [interp])[0]
        grad_pen = K.mean(K.square(K.sqrt(K.sum(K.square(gradients), axis=1))-1))

        # compute loss
        return (K.mean(disc_pred) - K.mean(disc_true)) + self.lambda_gp * grad_pen 
開發者ID:voxelmorph,項目名稱:voxelmorph,代碼行數:23,代碼來源:metrics.py

示例2: softmax_activation

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import random_uniform [as 別名]
def softmax_activation(self, mem):
        """Softmax activation."""

        # spiking_samples = k.less_equal(k.random_uniform([self.config.getint(
        #     'simulation', 'batch_size'), 1]), 300 * self.dt / 1000.)
        # spiking_neurons = k.T.repeat(spiking_samples, 10, axis=1)
        # activ = k.T.nnet.softmax(mem)
        # max_activ = k.max(activ, axis=1, keepdims=True)
        # output_spikes = k.equal(activ, max_activ).astype(k.floatx())
        # output_spikes = k.T.set_subtensor(output_spikes[k.equal(
        #     spiking_neurons, 0).nonzero()], 0.)
        # new_and_reset_mem = k.T.set_subtensor(mem[spiking_neurons.nonzero()],
        #                                       0.)
        # self.add_update([(self.mem, new_and_reset_mem)])
        # return output_spikes

        return k.T.mul(k.less_equal(k.random_uniform(mem.shape),
                                    k.softmax(mem)), self.v_thresh) 
開發者ID:NeuromorphicProcessorProject,項目名稱:snn_toolbox,代碼行數:20,代碼來源:temporal_mean_rate_theano.py

示例3: call

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import random_uniform [as 別名]
def call(self, x, mask=None):
        sims = []
        for n, sim in zip(self.n, self.similarities):
            for _ in range(n):
                batch_size = K.shape(x)[0]
                idx = K.random_uniform((batch_size,), low=0, high=batch_size,
                                       dtype='int32')
                x_shuffled = K.gather(x, idx)
                pair_sim = sim(x, x_shuffled)
                for _ in range(K.ndim(x) - 1):
                    pair_sim = K.expand_dims(pair_sim, dim=1)
                sims.append(pair_sim)

        return K.concatenate(sims, axis=-1) 
開發者ID:codekansas,項目名稱:gandlf,代碼行數:16,代碼來源:core.py

示例4: _merge_function

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import random_uniform [as 別名]
def _merge_function(self, inputs):
        alpha = K.random_uniform((32, 1, 1, 1))
        return (alpha * inputs[0]) + ((1 - alpha) * inputs[1]) 
開發者ID:eriklindernoren,項目名稱:Keras-GAN,代碼行數:5,代碼來源:wgan_gp.py

示例5: __call__

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import random_uniform [as 別名]
def __call__(self, shape, dtype=None):
        dtype = dtype or K.floatx()

        init_range = 1.0 / np.sqrt(shape[1])
        return K.random_uniform(shape, -init_range, init_range, dtype=dtype)


# Obtained from https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py 
開發者ID:titu1994,項目名稱:keras-efficientnets,代碼行數:10,代碼來源:custom_objects.py

示例6: call

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import random_uniform [as 別名]
def call(self, inputs, training=None):

        def drop_connect():
            keep_prob = 1.0 - self.drop_connect_rate

            # Compute drop_connect tensor
            batch_size = tf.shape(inputs)[0]
            random_tensor = keep_prob
            random_tensor += K.random_uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
            binary_tensor = tf.floor(random_tensor)
            output = (inputs / keep_prob) * binary_tensor
            return output

        return K.in_train_phase(drop_connect, inputs, training=training) 
開發者ID:titu1994,項目名稱:keras-efficientnets,代碼行數:16,代碼來源:custom_objects.py

示例7: _merge_function

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import random_uniform [as 別名]
def _merge_function(self, inputs):
        alpha = K.random_uniform((self.bs, 1, 1, 1))
        return (alpha * inputs[0]) + ((1 - alpha) * inputs[1]) 
開發者ID:hoangthang1607,項目名稱:StarGAN-Keras,代碼行數:5,代碼來源:StarGAN.py

示例8: _merge_function

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import random_uniform [as 別名]
def _merge_function (self, inputs):
        weights = K.random_uniform((BATCH_SIZE, 1, 1))
        return (weights * inputs[0]) + ((1 - weights) * inputs[1]) 
開發者ID:deepakbaby,項目名稱:se_relativisticgan,代碼行數:5,代碼來源:run_rsgan-gp_se.py

示例9: _merge_function

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import random_uniform [as 別名]
def _merge_function(self, inputs):
        weights = K.random_uniform((BATCH_SIZE, 1, 1, 1))
        return (weights * inputs[0]) + ((1 - weights) * inputs[1]) 
開發者ID:keras-team,項目名稱:keras-contrib,代碼行數:5,代碼來源:improved_wgan.py

示例10: call

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import random_uniform [as 別名]
def call(self, inputs, training=None):
        def dropped_inputs():
            keep_prob = 1. - self.rate
            tile_shape = tf.expand_dims(tf.shape(inputs)[-1], axis=0)
            tiled_keep_prob = K.tile(keep_prob, tile_shape)
            keep_prob = tf.transpose(K.reshape(tiled_keep_prob, [tile_shape[0], tf.shape(keep_prob)[0]]))
            binary_tensor = tf.floor(keep_prob + K.random_uniform(shape=tf.shape(inputs)))
            return inputs * binary_tensor
        return K.in_train_phase(dropped_inputs, inputs,
                                training=training) 
開發者ID:d909b,項目名稱:perfect_match,代碼行數:12,代碼來源:per_sample_dropout.py

示例11: _merge_function

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import random_uniform [as 別名]
def _merge_function(self, inputs):
    weights = K.random_uniform((1, 1, 1, 1))
    return (weights * inputs[0]) + ((1 - weights) * inputs[1]) 
開發者ID:tlatkowski,項目名稱:inpainting-gmcnn-keras,代碼行數:5,代碼來源:custom_layers.py

示例12: __init__

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import random_uniform [as 別名]
def __init__(self,mode='mul', strength=0.4, axes=(0,3), normalize=False,**kwargs):
        super(GDropLayer,self).__init__(**kwargs)
        assert mode in ('drop', 'mul', 'prop')
        #self.random     = K.random_uniform(1, minval=1, maxval=2147462579, dtype=tf.float32, seed=None, name=None)
        self.mode       = mode
        self.strength   = strength
        self.axes       = [axes] if isinstance(axes, int) else list(axes)
        self.normalize  = normalize # If true, retain overall signal variance.
        self.gain       = None      # For experimentation. 
開發者ID:MSC-BUAA,項目名稱:Keras-progressive_growing_of_gans,代碼行數:11,代碼來源:layers.py

示例13: uniform_latent_sampling

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import random_uniform [as 別名]
def uniform_latent_sampling(latent_shape, low=0.0, high=1.0):
    """
    Sample from uniform distribution
    :param latent_shape: batch shape
    :return: normal samples, shape=(n,)+latent_shape
    """
    return Lambda(lambda x: K.random_uniform((K.shape(x)[0],) + latent_shape, low, high),
                  output_shape=lambda x: ((x[0],) + latent_shape)) 
開發者ID:bstriner,項目名稱:keras-adversarial,代碼行數:10,代碼來源:adversarial_utils.py

示例14: _dense_kernel_initializer

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import random_uniform [as 別名]
def _dense_kernel_initializer(shape, dtype=None):
    fan_in, fan_out = _compute_fans(shape)
    stddev = 1. / np.sqrt(fan_in)
    return K.random_uniform(shape, -stddev, stddev, dtype) 
開發者ID:izikgo,項目名稱:AnomalyDetectionTransformations,代碼行數:6,代碼來源:wide_residual_network.py

示例15: random_laplace

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import random_uniform [as 別名]
def random_laplace(shape, mu=0., b=1.):
    '''
    Draw random samples from a Laplace distriubtion.

    See: https://en.wikipedia.org/wiki/Laplace_distribution#Generating_random_variables_according_to_the_Laplace_distribution
    '''
    U = K.random_uniform(shape, -0.5, 0.5)
    return mu - b * K.sign(U) * K.log(1 - 2 * K.abs(U)) 
開發者ID:jhartford,項目名稱:DeepIV,代碼行數:10,代碼來源:samplers.py


注:本文中的keras.backend.random_uniform方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。