當前位置: 首頁>>代碼示例>>Python>>正文


Python backend.relu方法代碼示例

本文整理匯總了Python中tensorflow.keras.backend.relu方法的典型用法代碼示例。如果您正苦於以下問題:Python backend.relu方法的具體用法?Python backend.relu怎麽用?Python backend.relu使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.keras.backend的用法示例。


在下文中一共展示了backend.relu方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: hinge_d

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import relu [as 別名]
def hinge_d(y_true, y_pred):
    return K.mean(K.relu(1.0 + (y_true * y_pred))) 
開發者ID:manicman1999,項目名稱:StyleGAN2-Tensorflow-2.0,代碼行數:4,代碼來源:stylegan_two.py

示例2: relu6

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import relu [as 別名]
def relu6(x):
    return K.relu(x, max_value=6) 
開發者ID:titu1994,項目名稱:keras-squeeze-excite-network,代碼行數:4,代碼來源:se_mobilenets.py

示例3: odd_shifted_relu

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import relu [as 別名]
def odd_shifted_relu(x, shift=-0.5, scale=2.0):
    """
    Odd shifted ReLu
    Essentially in x > 0, it is a shifted ReLu, and in x < 0 it's a negative mirror. 
    """

    shift = float(shift)
    scale = float(scale)
    return scale * K.relu(x - shift)  - scale * K.relu(- x - shift) 
開發者ID:adalca,項目名稱:neuron,代碼行數:11,代碼來源:utils.py

示例4: __call__

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import relu [as 別名]
def __call__(self, x):
    non_sign_bits = self.bits - (self.negative_slope != 0)
    m = K.cast_to_floatx(pow(2, non_sign_bits))
    m_i = K.cast_to_floatx(pow(2, self.integer))
    x_uq = tf.where(
        x <= m_i, K.relu(x, alpha=self.negative_slope), tf.ones_like(x) * m_i)

    if self.use_sigmoid:
      p = _sigmoid(x / m_i) * m
      xq = m_i * tf.keras.backend.clip(
          2.0 * (_round_through(p, self.use_stochastic_rounding) / m) - 1.0,
          0.0, 1.0 - 1.0 / m)
      if self.negative_slope > 0:
        neg_factor = 1 / (self.negative_slope * m)
        xq = xq + m_i * self.negative_slope * tf.keras.backend.clip(
            2.0 * (_round_through(p * self.negative_slope,
            self.use_stochastic_rounding) * neg_factor) - 1.0,
            -1.0, 0.0)
    else:
      p = x * m / m_i
      xq = m_i * tf.keras.backend.clip(
          _round_through(p, self.use_stochastic_rounding) / m, 0.0,
          1.0 - 1.0 / m)
      if self.negative_slope > 0:
        neg_factor = 1 / (self.negative_slope * m)
        xq = xq + m_i * self.negative_slope * (tf.keras.backend.clip(
            _round_through(p * self.negative_slope,
                           self.use_stochastic_rounding) * neg_factor, -1.0, 0.0))
    return x_uq + tf.stop_gradient(-x_uq + xq) 
開發者ID:google,項目名稱:qkeras,代碼行數:31,代碼來源:quantizers.py

示例5: softplus2

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import relu [as 別名]
def softplus2(x):
    """
    out = log(exp(x)+1) - log(2)
    softplus function that is 0 at x=0, the implementation aims at avoiding overflow

    Args:
        x: (Tensor) input tensor

    Returns:
         (Tensor) output tensor
    """
    return kb.relu(x) + kb.log(0.5*kb.exp(-kb.abs(x)) + 0.5) 
開發者ID:materialsvirtuallab,項目名稱:megnet,代碼行數:14,代碼來源:activations.py

示例6: _relu6

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import relu [as 別名]
def _relu6(self, x):
        """Relu 6
        """
        return K.relu(x, max_value=6.0) 
開發者ID:1044197988,項目名稱:TF.Keras-Commonly-used-models,代碼行數:6,代碼來源:mobilenet_base.py

示例7: _hard_swish

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import relu [as 別名]
def _hard_swish(self, x):
        """Hard swish
        """
        return x * K.relu(x + 3.0, max_value=6.0) / 6.0 
開發者ID:1044197988,項目名稱:TF.Keras-Commonly-used-models,代碼行數:6,代碼來源:mobilenet_base.py

示例8: _squeeze

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import relu [as 別名]
def _squeeze(self, inputs):
        """Squeeze and Excitation.
        This function defines a squeeze structure.
        # Arguments
            inputs: Tensor, input tensor of conv layer.
        """
        input_channels = int(inputs.shape[-1])

        x = GlobalAveragePooling2D()(inputs)
        x = Dense(input_channels, activation='relu')(x)
        x = Dense(input_channels, activation='hard_sigmoid')(x)
        x = Reshape((1, 1, input_channels))(x)

        return x 
開發者ID:1044197988,項目名稱:TF.Keras-Commonly-used-models,代碼行數:16,代碼來源:mobilenet_base.py

示例9: train_step

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import relu [as 別名]
def train_step(self, images, style, noise, perform_gp = True, perform_pl = False):

        with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
            #Get style information
            w_space = []
            pl_lengths = self.pl_mean
            for i in range(len(style)):
                w_space.append(self.GAN.S(style[i]))

            #Generate images
            generated_images = self.GAN.G(w_space + [noise])

            #Discriminate
            real_output = self.GAN.D(images, training=True)
            fake_output = self.GAN.D(generated_images, training=True)

            #Hinge loss function
            gen_loss = K.mean(fake_output)
            divergence = K.mean(K.relu(1 + real_output) + K.relu(1 - fake_output))
            disc_loss = divergence

            if perform_gp:
                #R1 gradient penalty
                disc_loss += gradient_penalty(images, real_output, 10)

            if perform_pl:
                #Slightly adjust W space
                w_space_2 = []
                for i in range(len(style)):
                    std = 0.1 / (K.std(w_space[i], axis = 0, keepdims = True) + 1e-8)
                    w_space_2.append(w_space[i] + K.random_normal(tf.shape(w_space[i])) / (std + 1e-8))

                #Generate from slightly adjusted W space
                pl_images = self.GAN.G(w_space_2 + [noise])

                #Get distance after adjustment (path length)
                delta_g = K.mean(K.square(pl_images - generated_images), axis = [1, 2, 3])
                pl_lengths = delta_g

                if self.pl_mean > 0:
                    gen_loss += K.mean(K.square(pl_lengths - self.pl_mean))

        #Get gradients for respective areas
        gradients_of_generator = gen_tape.gradient(gen_loss, self.GAN.GM.trainable_variables)
        gradients_of_discriminator = disc_tape.gradient(disc_loss, self.GAN.D.trainable_variables)

        #Apply gradients
        self.GAN.GMO.apply_gradients(zip(gradients_of_generator, self.GAN.GM.trainable_variables))
        self.GAN.DMO.apply_gradients(zip(gradients_of_discriminator, self.GAN.D.trainable_variables))

        return disc_loss, gen_loss, divergence, pl_lengths 
開發者ID:manicman1999,項目名稱:StyleGAN2-Tensorflow-2.0,代碼行數:53,代碼來源:stylegan_two.py


注:本文中的tensorflow.keras.backend.relu方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。