当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.diag方法代码示例

本文整理汇总了Python中tensorflow.diag方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.diag方法的具体用法?Python tensorflow.diag怎么用?Python tensorflow.diag使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.diag方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: transition

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag [as 别名]
def transition(h,share=None):
  # compute A,B,o linearization matrices
  with tf.variable_scope("trans",reuse=share):
    for l in range(2):
      h=ReLU(h,100,"l"+str(l))
    with tf.variable_scope("A"):
      v,r=tf.split(1,2,linear(h,z_dim*2))
      v1=tf.expand_dims(v,-1) # (batch, z_dim, 1)
      rT=tf.expand_dims(r,1) # batch, 1, z_dim
      I=tf.diag([1.]*z_dim)
      A=(I+tf.batch_matmul(v1,rT)) # (z_dim, z_dim) + (batch, z_dim, 1)*(batch, 1, z_dim) (I is broadcasted) 
    with tf.variable_scope("B"):
      B=linear(h,z_dim*u_dim)
      B=tf.reshape(B,[-1,z_dim,u_dim])
    with tf.variable_scope("o"):
      o=linear(h,z_dim)
    return A,B,o,v,r 
开发者ID:ericjang,项目名称:e2c,代码行数:19,代码来源:e2c_seq.py

示例2: transition

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag [as 别名]
def transition(h):
  # compute A,B,o linearization matrices
  with tf.variable_scope("trans"):
    for l in range(2):
      h=ReLU(h,100,"l"+str(l))
    with tf.variable_scope("A"):
      v,r=tf.split(1,2,linear(h,z_dim*2))
      v1=tf.expand_dims(v,-1) # (batch, z_dim, 1)
      rT=tf.expand_dims(r,1) # batch, 1, z_dim
      I=tf.diag([1.]*z_dim)
      A=(I+tf.batch_matmul(v1,rT)) # (z_dim, z_dim) + (batch, z_dim, 1)*(batch, 1, z_dim) (I is broadcasted) 
    with tf.variable_scope("B"):
      B=linear(h,z_dim*u_dim)
      B=tf.reshape(B,[-1,z_dim,u_dim])
    with tf.variable_scope("o"):
      o=linear(h,z_dim)
    return A,B,o,v,r 
开发者ID:ericjang,项目名称:e2c,代码行数:19,代码来源:e2c_plane.py

示例3: _symmetric_matrix_square_root

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag [as 别名]
def _symmetric_matrix_square_root(mat, eps=1e-10):
    """Compute square root of a symmetric matrix.
    Note that this is different from an elementwise square root. We want to
    compute M' where M' = sqrt(mat) such that M' * M' = mat.
    Also note that this method **only** works for symmetric matrices.
    Args:
      mat: Matrix to take the square root of.
      eps: Small epsilon such that any element less than eps will not be square
        rooted to guard against numerical instability.
    Returns:
      Matrix square root of mat.
    """
    # Unlike numpy, tensorflow's return order is (s, u, v)
    s, u, v = tf.svd(mat)
    # sqrt is unstable around 0, just use 0 in such case
    si = tf.where(tf.less(s, eps), s, tf.sqrt(s))
    # Note that the v returned by Tensorflow is v = V
    # (when referencing the equation A = U S V^T)
    # This is unlike Numpy which returns v = V^T
    return tf.matmul(
        tf.matmul(u, tf.diag(si)), v, transpose_b=True) 
开发者ID:openai,项目名称:glow,代码行数:23,代码来源:tfops.py

示例4: build

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag [as 别名]
def build(self, input_shape):
        self.spatial_ker_weights = self.add_weight(name='spatial_ker_weights',
                                                   shape=(self.num_classes,),
                                                   initializer=tf.initializers.truncated_normal(mean=0, stddev=0.1),
                                                   trainable=True)

        self.spatial_ker_weights = tf.diag(self.spatial_ker_weights)

        self.bilateral_ker_weights = self.add_weight(name='bilateral_ker_weights',
                                                     shape=(self.num_classes,),
                                                     initializer=tf.initializers.truncated_normal(mean=0, stddev=0.1),
                                                     trainable=True)
        self.bilateral_ker_weights = tf.diag(self.bilateral_ker_weights)

        self.compatibility_matrix = self.add_weight(name='compatibility_matrix',
                                                    shape=(self.num_classes, self.num_classes),
                                                    initializer=tf.initializers.truncated_normal(mean=0, stddev=0.1),
                                                    trainable=True)

        super(CRF_RNN_Layer, self).build(input_shape) 
开发者ID:MiguelMonteiro,项目名称:CRFasRNNLayer,代码行数:22,代码来源:crf_as_rnn_keras_layer.py

示例5: starting_point

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag [as 别名]
def starting_point(self, random=False):
        """Heuristic to find a starting point candidate

        Parameters
        ----------
        random : `bool`
            Use a random orthogonal matrix instead of identity

        Returns
        -------
        startint_point : `np.ndarray`, shape=(n_nodes, n_nodes)
            A starting point candidate
        """
        sqrt_C = sqrtm(self.covariance)
        sqrt_L = np.sqrt(self.mean_intensity)
        if random:
            random_matrix = np.random.rand(self.n_nodes, self.n_nodes)
            M, _ = qr(random_matrix)
        else:
            M = np.eye(self.n_nodes)
        initial = np.dot(np.dot(sqrt_C, M), np.diag(1. / sqrt_L))
        return initial 
开发者ID:X-DataInitiative,项目名称:tick,代码行数:24,代码来源:hawkes_cumulant_matching.py

示例6: solve_ridge

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag [as 别名]
def solve_ridge(x, y, ridge_factor):
  with tf.name_scope("solve_ridge"):
    # Added a column of ones to the end of the feature matrix for bias
    A = tf.concat([x, tf.ones((x.shape.as_list()[0], 1))], axis=1)

    # Analytic solution for the ridge regression loss
    inv_target = tf.matmul(A, A, transpose_a=True)
    np_diag_penalty = ridge_factor * np.ones(
        A.shape.as_list()[1], dtype="float32")
    # Remove penalty on bias component of weights
    np_diag_penalty[-1] = 0.
    diag_penalty = tf.constant(np_diag_penalty)
    inv_target += tf.diag(diag_penalty)

    inv = tf.matrix_inverse(inv_target)
    w = tf.matmul(inv, tf.matmul(A, y, transpose_a=True))
    return w 
开发者ID:itsamitgoel,项目名称:Gun-Detector,代码行数:19,代码来源:linear_regression.py

示例7: _covariance

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag [as 别名]
def _covariance(x, diag):
  """Defines the covariance operation of a matrix.

  Args:
    x: a matrix Tensor. Dimension 0 should contain the number of examples.
    diag: if True, it computes the diagonal covariance.

  Returns:
    A Tensor representing the covariance of x. In the case of
  diagonal matrix just the diagonal is returned.
  """
  num_points = tf.to_float(tf.shape(x)[0])
  x -= tf.reduce_mean(x, 0, keep_dims=True)
  if diag:
    cov = tf.reduce_sum(
        tf.square(x), 0, keep_dims=True) / (num_points - 1)
  else:
    cov = tf.matmul(x, x, transpose_a=True)  / (num_points - 1)
  return cov 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:21,代码来源:gmm_ops.py

示例8: transition

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag [as 别名]
def transition(h,share=None):
  # compute A,B,o linearization matrices
  with tf.variable_scope("trans",reuse=share):
    for l in range(2):
      h=ReLU(h,100,"aggregate_loss"+str(l))
    with tf.variable_scope("A"):
      v,r=tf.split(1,2,linear(h,z_dim*2))
      v1=tf.expand_dims(v,-1) # (batch, z_dim, 1)
      rT=tf.expand_dims(r,1) # batch, 1, z_dim
      I=tf.diag([1.]*z_dim)
      A=(I+tf.batch_matmul(v1,rT)) # (z_dim, z_dim) + (batch, z_dim, 1)*(batch, 1, z_dim) (I is broadcasted) 
    with tf.variable_scope("B"):
      B=linear(h,z_dim*u_dim)
      B=tf.reshape(B,[-1,z_dim,u_dim])
    with tf.variable_scope("o"):
      o=linear(h,z_dim)
    return A,B,o,v,r 
开发者ID:ethanluoyc,项目名称:e2c-pytorch,代码行数:19,代码来源:e2c_seq.py

示例9: transition

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag [as 别名]
def transition(h):
    # compute A,B,o linearization matrices
    with tf.variable_scope("trans"):
        for l in range(2):
            h = ReLU(h, 100, "aggregate_loss" + str(l))
        with tf.variable_scope("A"):
            v, r = tf.split(1, 2, linear(h, z_dim * 2))
            v1 = tf.expand_dims(v, -1)  # (batch, z_dim, 1)
            rT = tf.expand_dims(r, 1)  # batch, 1, z_dim
            I = tf.diag([1.] * z_dim)
            A = (
                I + tf.batch_matmul(v1, rT)
            )  # (z_dim, z_dim) + (batch, z_dim, 1)*(batch, 1, z_dim) (I is broadcasted) 
        with tf.variable_scope("B"):
            B = linear(h, z_dim * u_dim)
            B = tf.reshape(B, [-1, z_dim, u_dim])
        with tf.variable_scope("o"):
            o = linear(h, z_dim)
        return A, B, o, v, r 
开发者ID:ethanluoyc,项目名称:e2c-pytorch,代码行数:21,代码来源:e2c_plane.py

示例10: regularize_diag_off_diag_dip

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag [as 别名]
def regularize_diag_off_diag_dip(covariance_matrix, lambda_od, lambda_d):
  """Compute on and off diagonal regularizers for DIP-VAE models.

  Penalize deviations of covariance_matrix from the identity matrix. Uses
  different weights for the deviations of the diagonal and off diagonal entries.

  Args:
    covariance_matrix: Tensor of size [num_latent, num_latent] to regularize.
    lambda_od: Weight of penalty for off diagonal elements.
    lambda_d: Weight of penalty for diagonal elements.

  Returns:
    dip_regularizer: Regularized deviation from diagonal of covariance_matrix.
  """
  covariance_matrix_diagonal = tf.diag_part(covariance_matrix)
  covariance_matrix_off_diagonal = covariance_matrix - tf.diag(
      covariance_matrix_diagonal)
  dip_regularizer = tf.add(
      lambda_od * tf.reduce_sum(covariance_matrix_off_diagonal**2),
      lambda_d * tf.reduce_sum((covariance_matrix_diagonal - 1)**2))
  return dip_regularizer 
开发者ID:google-research,项目名称:disentanglement_lib,代码行数:23,代码来源:vae.py

示例11: BatchedSparseToDense

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag [as 别名]
def BatchedSparseToDense(sparse_indices, output_size):
  """Batch compatible sparse to dense conversion.

  This is useful for one-hot coded target labels.

  Args:
    sparse_indices: [batch_size] tensor containing one index per batch
    output_size: needed in order to generate the correct dense output

  Returns:
    A [batch_size, output_size] dense tensor.
  """
  eye = tf.diag(tf.fill([output_size], tf.constant(1, tf.float32)))
  return tf.nn.embedding_lookup(eye, sparse_indices) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:16,代码来源:graph_builder.py

示例12: pullaway_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag [as 别名]
def pullaway_loss(embeddings):
    """
    Pull Away loss calculation
    :param embeddings: The embeddings to be orthogonalized for varied faces. Shape [batch_size, embeddings_dim]
    :return: pull away term loss
    """
    norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
    normalized_embeddings = embeddings / norm
    similarity = tf.matmul(
        normalized_embeddings, normalized_embeddings, transpose_b=True)
    similarity -= tf.diag(tf.diag_part(similarity))
    batch_size = tf.cast(tf.shape(embeddings)[0], tf.float32)
    pt_loss = tf.reduce_sum(similarity) / (batch_size * (batch_size - 1))
    return pt_loss 
开发者ID:cs-chan,项目名称:ArtGAN,代码行数:16,代码来源:layers.py

示例13: get_matrix

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag [as 别名]
def get_matrix(self):
        return tf.diag(tf.fill([self.dims], self.tf_variance_scalar)) 
开发者ID:aakhundov,项目名称:tf-example-models,代码行数:4,代码来源:isotropic_covariance.py

示例14: get_matrix

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag [as 别名]
def get_matrix(self):
        tf_base_times_eye = tf.diag(tf.fill([self.dims], self.tf_baseline))
        tf_eig_vec_val = tf.matmul(tf.transpose(self.tf_eigvecs), tf.diag(self.tf_eigvals))
        tf_eig_vec_val_vec = tf.matmul(tf_eig_vec_val, self.tf_eigvecs)

        return tf_base_times_eye + tf_eig_vec_val_vec 
开发者ID:aakhundov,项目名称:tf-example-models,代码行数:8,代码来源:sparse_covariance.py

示例15: get_prior_adjustment

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag [as 别名]
def get_prior_adjustment(self, original, gamma_sum):
        tf_adjusted = original
        tf_adjusted *= gamma_sum
        tf_adjusted += tf.diag(tf.fill([self.dims], 2.0 * self.tf_beta))
        tf_adjusted /= gamma_sum + (2.0 * (self.tf_alpha + 1.0))

        return tf_adjusted 
开发者ID:aakhundov,项目名称:tf-example-models,代码行数:9,代码来源:sparse_covariance.py


注:本文中的tensorflow.diag方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。