当前位置: 首页>>代码示例>>Python>>正文


Python backend.sqrt方法代码示例

本文整理汇总了Python中tensorflow.keras.backend.sqrt方法的典型用法代码示例。如果您正苦于以下问题:Python backend.sqrt方法的具体用法?Python backend.sqrt怎么用?Python backend.sqrt使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.keras.backend的用法示例。


在下文中一共展示了backend.sqrt方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: distance_matrix

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sqrt [as 别名]
def distance_matrix(self, D):
    """Calcuates the distance matrix from the distance tensor

    B = batch_size, N = max_num_atoms, M = max_num_neighbors, d = num_features

    Parameters
    ----------
    D: tf.Tensor of shape (B, N, M, d)
      Distance tensor.

    Returns
    -------
    R: tf.Tensor of shape (B, N, M)
       Distance matrix.
    """
    R = tf.reduce_sum(tf.multiply(D, D), 3)
    R = tf.sqrt(R)
    return R 
开发者ID:deepchem,项目名称:deepchem,代码行数:20,代码来源:layers.py

示例2: build

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sqrt [as 别名]
def build(self, input_shape):
    no_features = int(input_shape[0][2])
    no_A = int(input_shape[1][2])
    self.W = tf.Variable(
        tf.random.truncated_normal(
            [no_features * no_A, self.num_filters],
            stddev=np.sqrt(1.0 / (no_features * (no_A + 1) * 1.0))),
        name='weights',
        dtype=tf.float32)
    self.W_I = tf.Variable(
        tf.random.truncated_normal(
            [no_features, self.num_filters],
            stddev=np.sqrt(1.0 / (no_features * (no_A + 1) * 1.0))),
        name='weights_I',
        dtype=tf.float32)
    self.b = tf.Variable(tf.constant(0.1), name='bias', dtype=tf.float32)
    self.built = True 
开发者ID:deepchem,项目名称:deepchem,代码行数:19,代码来源:layers.py

示例3: loss

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sqrt [as 别名]
def loss(self, y_true, y_pred):

        # get the value for the true and fake images
        disc_true = self.disc(y_true)
        disc_pred = self.disc(y_pred)

        # sample a x_hat by sampling along the line between true and pred
        # z = tf.placeholder(tf.float32, shape=[None, 1])
        # shp = y_true.get_shape()[0]
        # WARNING: SHOULD REALLY BE shape=[batch_size, 1] !!!
        # self.batch_size does not work, since it's not None!!!
        alpha = K.random_uniform(shape=[K.shape(y_pred)[0], 1, 1, 1])
        diff = y_pred - y_true
        interp = y_true + alpha * diff

        # take gradient of D(x_hat)
        gradients = K.gradients(self.disc(interp), [interp])[0]
        grad_pen = K.mean(K.square(K.sqrt(K.sum(K.square(gradients), axis=1))-1))

        # compute loss
        return (K.mean(disc_pred) - K.mean(disc_true)) + self.lambda_gp * grad_pen 
开发者ID:adalca,项目名称:neuron,代码行数:23,代码来源:metrics.py

示例4: mcor

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sqrt [as 别名]
def mcor(y_true, y_pred):
    # matthews_correlation
    y_pred_pos = K.round(K.clip(y_pred, 0, 1))
    y_pred_neg = 1 - y_pred_pos

    y_pos = K.round(K.clip(y_true, 0, 1))
    y_neg = 1 - y_pos

    tp = K.sum(y_pos * y_pred_pos)
    tn = K.sum(y_neg * y_pred_neg)

    fp = K.sum(y_neg * y_pred_pos)
    fn = K.sum(y_pos * y_pred_neg)

    numerator = (tp * tn - fp * fn)
    denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))

    return numerator / (denominator + K.epsilon()) 
开发者ID:RichardBJ,项目名称:Deep-Channel,代码行数:20,代码来源:predictor.py

示例5: mcor

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sqrt [as 别名]
def mcor(y_true, y_pred):
    # Matthews correlation
    y_pred_pos = K.round(K.clip(y_pred, 0, 1))
    y_pred_neg = 1 - y_pred_pos

    y_pos = K.round(K.clip(y_true, 0, 1))
    y_neg = 1 - y_pos

    tp = K.sum(y_pos * y_pred_pos)
    tn = K.sum(y_neg * y_pred_neg)

    fp = K.sum(y_neg * y_pred_pos)
    fn = K.sum(y_pos * y_pred_neg)

    numerator = (tp * tn - fp * fn)
    denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))

    return numerator / (denominator + K.epsilon()) 
开发者ID:RichardBJ,项目名称:Deep-Channel,代码行数:20,代码来源:deepchannel_train.py

示例6: convert_sqrt

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sqrt [as 别名]
def convert_sqrt(node, params, layers, lambda_func, node_name, keras_name):
    """
    Convert Sqrt layer
    :param node: current operation node
    :param params: operation attributes
    :param layers: available keras layers
    :param lambda_func: function for keras Lambda layer
    :param node_name: internal converter name
    :param keras_name: resulting layer name
    :return: None
    """
    if len(node.input) != 1:
        assert AttributeError('More than 1 input for sqrt layer.')

    input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name)

    def target_layer(x):
        import tensorflow.keras.backend as K
        return K.sqrt(x)

    lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
    layers[node_name] = lambda_layer(input_0)
    lambda_func[keras_name] = target_layer 
开发者ID:nerox8664,项目名称:onnx2keras,代码行数:25,代码来源:operation_layers.py

示例7: _cosine_dist

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sqrt [as 别名]
def _cosine_dist(x, y):
  """Computes the inner product (cosine distance) between two tensors.

  Parameters
  ----------
  x: tf.Tensor
    Input Tensor
  y: tf.Tensor
    Input Tensor
  """
  denom = (backend.sqrt(backend.sum(tf.square(x)) * backend.sum(tf.square(y))) +
           backend.epsilon())
  return backend.dot(x, tf.transpose(y)) / denom 
开发者ID:deepchem,项目名称:deepchem,代码行数:15,代码来源:layers.py

示例8: call

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sqrt [as 别名]
def call(self, inputs):

        #To channels last
        x = tf.transpose(inputs[0], [0, 3, 1, 2])

        #Get weight and bias modulations
        #Make sure w's shape is compatible with self.kernel
        w = K.expand_dims(K.expand_dims(K.expand_dims(inputs[1], axis = 1), axis = 1), axis = -1)

        #Add minibatch layer to weights
        wo = K.expand_dims(self.kernel, axis = 0)

        #Modulate
        weights = wo * (w+1)

        #Demodulate
        if self.demod:
            d = K.sqrt(K.sum(K.square(weights), axis=[1,2,3], keepdims = True) + 1e-8)
            weights = weights / d

        #Reshape/scale input
        x = tf.reshape(x, [1, -1, x.shape[2], x.shape[3]]) # Fused => reshape minibatch to convolution groups.
        w = tf.reshape(tf.transpose(weights, [1, 2, 3, 0, 4]), [weights.shape[1], weights.shape[2], weights.shape[3], -1])

        x = tf.nn.conv2d(x, w,
                strides=self.strides,
                padding="SAME",
                data_format="NCHW")

        # Reshape/scale output.
        x = tf.reshape(x, [-1, self.filters, x.shape[2], x.shape[3]]) # Fused => reshape convolution groups back to minibatch.
        x = tf.transpose(x, [0, 2, 3, 1])

        return x 
开发者ID:manicman1999,项目名称:StyleGAN2-Tensorflow-2.0,代码行数:36,代码来源:conv_mod.py

示例9: earth_movers_distance

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sqrt [as 别名]
def earth_movers_distance(y_true, y_pred):
    cdf_true = K.cumsum(y_true, axis=-1)
    cdf_pred = K.cumsum(y_pred, axis=-1)
    emd = K.sqrt(K.mean(K.square(cdf_true - cdf_pred), axis=-1))
    return K.mean(emd) 
开发者ID:idealo,项目名称:image-quality-assessment,代码行数:7,代码来源:losses.py

示例10: _euclidean_distance

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sqrt [as 别名]
def _euclidean_distance(x, y):
    return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=-1, keepdims=True), K.epsilon())) 
开发者ID:beringresearch,项目名称:ivis,代码行数:4,代码来源:losses.py

示例11: correlation_coefficient_loss

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sqrt [as 别名]
def correlation_coefficient_loss(y_true, y_pred):
    x = y_true                      
    y = y_pred                                  
    mx = K.mean(x)  

    my = K.mean(y)                                     
    xm, ym = x-mx, y-my                                                
    r_num = K.sum(tf.multiply(xm,ym))                                     
    r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym))))
    r = r_num / r_den
    r = K.maximum(K.minimum(r, 1.0), -1.0)

    return 1 - K.square(r) 
开发者ID:xulabs,项目名称:aitom,代码行数:15,代码来源:utils.py

示例12: angle_zyz_difference

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sqrt [as 别名]
def angle_zyz_difference(ang1=np.zeros(3), ang2=np.zeros(3)):
    loc1_r = np.zeros(ang1.shape)
    loc2_r = np.zeros(ang2.shape)

    rm1 = rotation_matrix_zyz(ang1)
    rm2 = rotation_matrix_zyz(ang2)
    loc1_r_t = np.array([loc1_r, loc1_r, loc1_r])
    loc2_r_t = np.array([loc2_r, loc2_r, loc2_r])

    dif_m = (rm1.dot(np.eye(3) - loc1_r_t)).transpose() - (rm2.dot(np.eye(3) - loc2_r_t)).transpose()
    dif_d = math.sqrt(np.square(dif_m).sum())


    return dif_d 
开发者ID:xulabs,项目名称:aitom,代码行数:16,代码来源:utils.py

示例13: _euclidian_dist

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sqrt [as 别名]
def _euclidian_dist(self, x_pair: List[Tensor]) -> Tensor:
        x1_norm = K.l2_normalize(x_pair[0], axis=1)
        x2_norm = K.l2_normalize(x_pair[1], axis=1)
        diff = x1_norm - x2_norm
        square = K.square(diff)
        _sum = K.sum(square, axis=1)
        _sum = K.clip(_sum, min_value=1e-12, max_value=None)
        dist = K.sqrt(_sum) / 2.
        return dist 
开发者ID:deepmipt,项目名称:DeepPavlov,代码行数:11,代码来源:bilstm_siamese_network.py

示例14: _pairwise_distances

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sqrt [as 别名]
def _pairwise_distances(self, inputs: List[Tensor]) -> Tensor:
        emb_c, emb_r = inputs
        bs = K.shape(emb_c)[0]
        embeddings = K.concatenate([emb_c, emb_r], 0)
        dot_product = K.dot(embeddings, K.transpose(embeddings))
        square_norm = K.batch_dot(embeddings, embeddings, axes=1)
        distances = K.transpose(square_norm) - 2.0 * dot_product + square_norm
        distances = distances[0:bs, bs:bs+bs]
        distances = K.clip(distances, 0.0, None)
        mask = K.cast(K.equal(distances, 0.0), K.dtype(distances))
        distances = distances + mask * 1e-16
        distances = K.sqrt(distances)
        distances = distances * (1.0 - mask)
        return distances 
开发者ID:deepmipt,项目名称:DeepPavlov,代码行数:16,代码来源:bilstm_siamese_network.py

示例15: frn_layer_keras

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sqrt [as 别名]
def frn_layer_keras(x, tau, beta, gamma, epsilon=1e-6):
    # x: Input tensor of shape [BxHxWxC].
    # tau, beta, gamma: Variables of shape [1, 1, 1, C].
    # eps: A scalar constant or learnable variable.
    # Compute the mean norm of activations per channel.
    nu2 = K.mean(K.square(x), axis=[1, 2], keepdims=True)
    # Perform FRN.
    x = x * 1 / K.sqrt(nu2 + K.abs(epsilon))
    # Return after applying the Offset-ReLU non-linearity.
    return K.maximum(gamma * x + beta, tau) 
开发者ID:1044197988,项目名称:TF.Keras-Commonly-used-models,代码行数:12,代码来源:FRN.py


注:本文中的tensorflow.keras.backend.sqrt方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。