当前位置: 首页>>代码示例>>Python>>正文


Python backend.mean方法代码示例

本文整理汇总了Python中tensorflow.python.keras.backend.mean方法的典型用法代码示例。如果您正苦于以下问题:Python backend.mean方法的具体用法?Python backend.mean怎么用?Python backend.mean使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.keras.backend的用法示例。


在下文中一共展示了backend.mean方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: call

# 需要导入模块: from tensorflow.python.keras import backend [as 别名]
# 或者: from tensorflow.python.keras.backend import mean [as 别名]
def call(self, x):
        mean = K.mean(x, axis=self.axis, keepdims=True)
        std = K.std(x, axis=self.axis, keepdims=True)
        return self.gamma * (x - mean) / (std + self.eps) + self.beta 
开发者ID:ShenDezhou,项目名称:icme2019,代码行数:6,代码来源:normalization.py

示例2: l_2nd

# 需要导入模块: from tensorflow.python.keras import backend [as 别名]
# 或者: from tensorflow.python.keras.backend import mean [as 别名]
def l_2nd(beta):
    def loss_2nd(y_true, y_pred):
        b_ = np.ones_like(y_true)
        b_[y_true != 0] = beta
        x = K.square((y_true - y_pred) * b_)
        t = K.sum(x, axis=-1, )
        return K.mean(t)

    return loss_2nd 
开发者ID:shenweichen,项目名称:GraphEmbedding,代码行数:11,代码来源:sdne.py

示例3: line_loss

# 需要导入模块: from tensorflow.python.keras import backend [as 别名]
# 或者: from tensorflow.python.keras.backend import mean [as 别名]
def line_loss(y_true, y_pred):
    return -K.mean(K.log(K.sigmoid(y_true*y_pred))) 
开发者ID:shenweichen,项目名称:GraphEmbedding,代码行数:4,代码来源:line.py

示例4: call

# 需要导入模块: from tensorflow.python.keras import backend [as 别名]
# 或者: from tensorflow.python.keras.backend import mean [as 别名]
def call(self, x, mask=None):
        """1, mask is a bool type tensor, need casting before compute.
           2, mask shape in 2 dimension (batch_size, feature_dimension)
        """
        if mask is not None:
            mask = K.repeat(mask, x.shape[-1])
            mask = tf.transpose(mask, [0, 2, 1])
            mask = tf.cast(mask, tf.float32)
            x = x * mask
            return K.sum(x, axis=1) / K.sum(mask, axis=1)
        else:
            return K.mean(x, axis=1) 
开发者ID:FederatedAI,项目名称:FATE,代码行数:14,代码来源:backend.py

示例5: __init__

# 需要导入模块: from tensorflow.python.keras import backend [as 别名]
# 或者: from tensorflow.python.keras.backend import mean [as 别名]
def __init__(self, latent_regularizer='bvae', beta=100., **kwargs):
        '''
        args:
        ------
        latent_regularizer : str
            Either 'bvae', 'vae', or 'no'
            Determines whether regularization is applied
                to the latent space representation.
        beta : float
            beta > 1, used for 'bvae' latent_regularizer,
            (Unused if 'bvae' not selected)
        ------
        ex.
            sample = SampleLayer('bvae', 16)([mean, logvar])
        '''
        if latent_regularizer.lower() in ['bvae', 'vae']:
            self.reg = latent_regularizer
        else:
            self.reg = None
    
        if self.reg == 'bvae':
            self.beta = beta
        elif self.reg == 'vae':
            self.beta = 1.

        super(SampleLayer, self).__init__(**kwargs) 
开发者ID:alecGraves,项目名称:BVAE-tf,代码行数:28,代码来源:sample_layer.py

示例6: call

# 需要导入模块: from tensorflow.python.keras import backend [as 别名]
# 或者: from tensorflow.python.keras.backend import mean [as 别名]
def call(self, x, training=None):
        if len(x) != 2:
            raise Exception('input layers must be a list: mean and logvar')
        if len(x[0].shape) != 2 or len(x[1].shape) != 2:
            raise Exception('input shape is not a vector [batchSize, latentSize]')

        mean = x[0]
        logvar = x[1]

        # trick to allow setting batch at train/eval time
        if mean.shape[0].value == None or  logvar.shape[0].value == None:
            return mean + 0*logvar # Keras needs the *0 so the gradinent is not None

        if self.reg is not None:
            # kl divergence:
            latent_loss = -0.5 * (1 + logvar
                                - K.square(mean)
                                - K.exp(logvar))
            latent_loss = K.sum(latent_loss, axis=-1) # sum over latent dimension
            latent_loss = K.mean(latent_loss, axis=0) # avg over batch

            # use beta to force less usage of vector space:
            latent_loss = self.beta * latent_loss
            self.add_loss(latent_loss, x)

        def reparameterization_trick():
            epsilon = K.random_normal(shape=logvar.shape,
                              mean=0., stddev=1.)
            stddev = K.exp(logvar*0.5)
            return mean + stddev * epsilon

        return K.in_train_phase(reparameterization_trick, mean + 0*logvar, training=training) # TODO figure out why this is not working in the specified tf version??? 
开发者ID:alecGraves,项目名称:BVAE-tf,代码行数:34,代码来源:sample_layer.py

示例7: compute_cmvn

# 需要导入模块: from tensorflow.python.keras import backend [as 别名]
# 或者: from tensorflow.python.keras.backend import mean [as 别名]
def compute_cmvn(sums, square, count):
  ''' compute global feature mean and variance
     vars = E(x^2) - (E(x))^2
  '''
  mean = sums / count
  var = (square / count) - np.square(mean)
  return mean, var 
开发者ID:didi,项目名称:delta,代码行数:9,代码来源:cmvn.py

示例8: load_cmvn

# 需要导入模块: from tensorflow.python.keras import backend [as 别名]
# 或者: from tensorflow.python.keras.backend import mean [as 别名]
def load_cmvn(path):
  ''' load mean and variance from cmvn.npy,
      then convert to TF Tensor
  '''
  # [1, nbins, nchannels]
  mean, variance = np.load(path)
  # [1, 1, nbins, nchannels]
  mean = np.expand_dims(mean, axis=0)
  variance = np.expand_dims(variance, axis=0)
  mean = tf.convert_to_tensor(mean, dtype=tf.float32, name='cmvn_mean')
  variance = tf.convert_to_tensor(
      variance, dtype=tf.float32, name='cmvn_variance')
  return mean, variance 
开发者ID:didi,项目名称:delta,代码行数:15,代码来源:cmvn.py

示例9: apply_cmvn

# 需要导入模块: from tensorflow.python.keras import backend [as 别名]
# 或者: from tensorflow.python.keras.backend import mean [as 别名]
def apply_cmvn(feats, mean, variance, epsilon=1e-9):
  ''' TF: apply CMVN on feature'''
  return (feats - mean) * tf.rsqrt(variance + epsilon) 
开发者ID:didi,项目名称:delta,代码行数:5,代码来源:cmvn.py

示例10: apply_local_cmvn

# 需要导入模块: from tensorflow.python.keras import backend [as 别名]
# 或者: from tensorflow.python.keras.backend import mean [as 别名]
def apply_local_cmvn(feats, epsilon=1e-9):
  ''' feats: (NHWC) '''
  mean = tf.expand_dims(keras_backend.mean(feats, axis=1), axis=1)
  var = tf.expand_dims(keras_backend.var(feats, axis=1), axis=1)
  feats = (feats - mean) * tf.rsqrt(var + epsilon)
  return feats 
开发者ID:didi,项目名称:delta,代码行数:8,代码来源:cmvn.py

示例11: call

# 需要导入模块: from tensorflow.python.keras import backend [as 别名]
# 或者: from tensorflow.python.keras.backend import mean [as 别名]
def call(self, inputs):
        mean = K.mean(inputs, axis=self.axis, keepdims=True)
        variance = K.mean(K.square(inputs - mean), axis=-1, keepdims=True)
        std = K.sqrt(variance + self.eps)
        outputs = (inputs - mean) / std
        if self.scale:
            outputs *= self.gamma
        if self.center:
            outputs += self.beta
        return outputs 
开发者ID:shenweichen,项目名称:DeepCTR,代码行数:12,代码来源:normalization.py


注:本文中的tensorflow.python.keras.backend.mean方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。