当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.diag_part方法代码示例

本文整理汇总了Python中tensorflow.diag_part方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.diag_part方法的具体用法?Python tensorflow.diag_part怎么用?Python tensorflow.diag_part使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.diag_part方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: rank_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag_part [as 别名]
def rank_loss(sentence_emb, image_emb, margin=0.2):
  """Experimental rank loss, thanks to kkurach@ for the code."""
  with tf.name_scope("rank_loss"):
    # Normalize first as this is assumed in cosine similarity later.
    sentence_emb = tf.nn.l2_normalize(sentence_emb, 1)
    image_emb = tf.nn.l2_normalize(image_emb, 1)
    # Both sentence_emb and image_emb have size [batch, depth].
    scores = tf.matmul(image_emb, tf.transpose(sentence_emb))  # [batch, batch]
    diagonal = tf.diag_part(scores)  # [batch]
    cost_s = tf.maximum(0.0, margin - diagonal + scores)  # [batch, batch]
    cost_im = tf.maximum(
        0.0, margin - tf.reshape(diagonal, [-1, 1]) + scores)  # [batch, batch]
    # Clear diagonals.
    batch_size = tf.shape(sentence_emb)[0]
    empty_diagonal_mat = tf.ones_like(cost_s) - tf.eye(batch_size)
    cost_s *= empty_diagonal_mat
    cost_im *= empty_diagonal_mat
    return tf.reduce_mean(cost_s) + tf.reduce_mean(cost_im) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:20,代码来源:slicenet.py

示例2: _mix_rbf_kernel

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag_part [as 别名]
def _mix_rbf_kernel(X, Y, sigmas, wts=None):
    if wts is None:
        wts = [1] * len(sigmas)

    XX = tf.matmul(X, X, transpose_b=True)
    XY = tf.matmul(X, Y, transpose_b=True)
    YY = tf.matmul(Y, Y, transpose_b=True)

    X_sqnorms = tf.diag_part(XX)
    Y_sqnorms = tf.diag_part(YY)

    r = lambda x: tf.expand_dims(x, 0)
    c = lambda x: tf.expand_dims(x, 1)

    K_XX, K_XY, K_YY = 0, 0, 0
    for sigma, wt in zip(sigmas, wts):
        gamma = 1 / (2 * sigma**2)
        K_XX += wt * tf.exp(-gamma * (-2 * XX + c(X_sqnorms) + r(X_sqnorms)))
        K_XY += wt * tf.exp(-gamma * (-2 * XY + c(X_sqnorms) + r(Y_sqnorms)))
        K_YY += wt * tf.exp(-gamma * (-2 * YY + c(Y_sqnorms) + r(Y_sqnorms)))

    return K_XX, K_XY, K_YY, tf.reduce_sum(wts) 
开发者ID:djsutherland,项目名称:opt-mmd,代码行数:24,代码来源:mmd.py

示例3: build_graph_with_hess

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag_part [as 别名]
def build_graph_with_hess(images, labels, loss_function, inference_function, learning_rate, global_step):
    optimizer_net = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.95)
    tf.summary.scalar('learning_rate', learning_rate)
    with tf.variable_scope(tf.get_variable_scope()) as scope:
        logits_stoch = inference_function(images, stochastic=True, reuse=False)
        probs_stoch = tf.nn.softmax(logits_stoch)
        tf.get_variable_scope().reuse_variables()
        logits_det = inference_function(images, stochastic=False, reuse=True)
        probs_det = tf.nn.softmax(logits_det)
        train_loss = loss_function(logits_stoch, labels)
        # weights = get_weights()
        # for v in weights:
        #     hess = tf.diag_part(tf.squeeze(tf.hessians(logits_det, v)))
        #     tf.summary.histogram(v.name + 'hessian', hess)
        #     print v.name, v.get_shape(), hess.get_shape()
    train_op = optimizer_net.minimize(train_loss, global_step=global_step)
    return train_op, probs_det, probs_stoch 
开发者ID:da-molchanov,项目名称:variance-networks,代码行数:19,代码来源:utils.py

示例4: call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag_part [as 别名]
def call(self, inputs):
    if self.coeffs_mean is None and self.coeffs_precision_tril_op is None:
      # p(mean(ynew) | xnew) = Normal(ynew | mean = 0, variance = xnew xnew^T)
      predictive_mean = 0.
      predictive_variance = tf.reduce_sum(tf.square(inputs), -1)
    else:
      # p(mean(ynew) | xnew, x, y) = Normal(ynew |
      #   mean = xnew (1/noise_variance) (1/noise_variance x^T x + I)^{-1}x^T y,
      #   variance = xnew (1/noise_variance x^T x + I)^{-1} xnew^T)
      predictive_mean = tf.einsum('nm,m->n', inputs, self.coeffs_mean)
      predictive_covariance = tf.matmul(
          inputs,
          self.coeffs_precision_tril_op.solve(
              self.coeffs_precision_tril_op.solve(inputs, adjoint_arg=True),
              adjoint=True))
      predictive_variance = tf.diag_part(predictive_covariance)
    return ed.Normal(loc=predictive_mean, scale=tf.sqrt(predictive_variance)) 
开发者ID:yyht,项目名称:BERT,代码行数:19,代码来源:gaussian_process.py

示例5: sparse_mean_fg_f1

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag_part [as 别名]
def sparse_mean_fg_f1(y_true, y_pred):
    y_pred = tf.argmax(y_pred, axis=-1)

    # Get confusion matrix
    cm = tf.confusion_matrix(tf.reshape(y_true, [-1]),
                             tf.reshape(y_pred, [-1]))

    # Get precisions
    TP = tf.diag_part(cm)
    precisions = TP / tf.reduce_sum(cm, axis=0)

    # Get recalls
    TP = tf.diag_part(cm)
    recalls = TP / tf.reduce_sum(cm, axis=1)

    # Get F1s
    f1s = (2 * precisions * recalls) / (precisions + recalls)

    return tf.reduce_mean(f1s[1:]) 
开发者ID:perslev,项目名称:MultiPlanarUNet,代码行数:21,代码来源:metrics.py

示例6: regularize_diag_off_diag_dip

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag_part [as 别名]
def regularize_diag_off_diag_dip(covariance_matrix, lambda_od, lambda_d):
  """Compute on and off diagonal regularizers for DIP-VAE models.

  Penalize deviations of covariance_matrix from the identity matrix. Uses
  different weights for the deviations of the diagonal and off diagonal entries.

  Args:
    covariance_matrix: Tensor of size [num_latent, num_latent] to regularize.
    lambda_od: Weight of penalty for off diagonal elements.
    lambda_d: Weight of penalty for diagonal elements.

  Returns:
    dip_regularizer: Regularized deviation from diagonal of covariance_matrix.
  """
  covariance_matrix_diagonal = tf.diag_part(covariance_matrix)
  covariance_matrix_off_diagonal = covariance_matrix - tf.diag(
      covariance_matrix_diagonal)
  dip_regularizer = tf.add(
      lambda_od * tf.reduce_sum(covariance_matrix_off_diagonal**2),
      lambda_d * tf.reduce_sum((covariance_matrix_diagonal - 1)**2))
  return dip_regularizer 
开发者ID:google-research,项目名称:disentanglement_lib,代码行数:23,代码来源:vae.py

示例7: _mix_rbf_kernel

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag_part [as 别名]
def _mix_rbf_kernel(X, Y, sigmas=[1.], wts=None):
    if wts is None:
        wts = [1] * len(sigmas)

    XX = tf.matmul(X, X, transpose_b=True)
    XY = tf.matmul(X, Y, transpose_b=True)
    YY = tf.matmul(Y, Y, transpose_b=True)

    X_sqnorms = tf.diag_part(XX)
    Y_sqnorms = tf.diag_part(YY)

    r = lambda x: tf.expand_dims(x, 0)
    c = lambda x: tf.expand_dims(x, 1)

    K_XX, K_XY, K_YY = 0, 0, 0
    for sigma, wt in zip(sigmas, wts):
        gamma = 1 / (2 * sigma**2)
        K_XX += wt * tf.exp(-gamma * (-2 * XX + c(X_sqnorms) + r(X_sqnorms)))
        K_XY += wt * tf.exp(-gamma * (-2 * XY + c(X_sqnorms) + r(Y_sqnorms)))
        K_YY += wt * tf.exp(-gamma * (-2 * YY + c(Y_sqnorms) + r(Y_sqnorms)))

    return K_XX, K_XY, K_YY, tf.reduce_sum(wts) 
开发者ID:ruidan,项目名称:DAS,代码行数:24,代码来源:my_layers.py

示例8: matrix_mean_wo_diagonal

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag_part [as 别名]
def matrix_mean_wo_diagonal(matrix, num_row, num_col=None, name='mu_wo_diag'):
    """ This function calculates the mean of the matrix elements not in the diagonal

    2018.4.9 - replace tf.diag_part with tf.matrix_diag_part
    tf.matrix_diag_part can be used for rectangle matrix while tf.diag_part can only be used for square matrix

    :param matrix:
    :param num_row:
    :type num_row: float
    :param num_col:
    :type num_col: float
    :param name:
    :return:
    """
    with tf.name_scope(name):
        if num_col is None:
            mu = (tf.reduce_sum(matrix) - tf.reduce_sum(tf.matrix_diag_part(matrix))) / (num_row * (num_row - 1.0))
        else:
            mu = (tf.reduce_sum(matrix) - tf.reduce_sum(tf.matrix_diag_part(matrix))) \
                 / (num_row * num_col - tf.minimum(num_col, num_row))

    return mu


######################################################################## 
开发者ID:richardwth,项目名称:MMD-GAN,代码行数:27,代码来源:math_func.py

示例9: gradStep

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag_part [as 别名]
def gradStep(cls, data, mu, tau, v, e):
        swidths = tf.constant(np.array([0., 1e-9, 1e-8, 1e-7, 1e-6, 1e-4, 1e-3,
                                        1e-2, 1e-1, 1e0, 1e1, 1e2,
                                        1e3])[..., None, None],
                              dtype=data.dtype)
        for i in range(5):
            llhs = cls.llh(parameters={"mu": mu,
                                       "tau": 1./(v+e**2-e*mu)},
                           data=data)
            llhs = tf.reduce_mean(llhs, axis=0)
            signGradMu = tf.sign(tf.gradients(llhs, [mu])[0])
            mus = mu+signGradMu*swidths
            taus = 1./(v+e**2-e*mus)
            tauIsNonPositive = tf.less_equal(taus, 0.)
            mus = tf.where(tauIsNonPositive, tf.zeros_like(mus), mus)
            taus = tf.where(tauIsNonPositive, 1./tf.reduce_mean(data, axis=0)*tf.ones_like(taus), taus)
            newLlhs = cls.llh(parameters={"mu": mus,
                                          "tau": taus},
                              data=data[None])
            newLlhs = tf.reduce_mean(newLlhs, axis=-2, keepdims=True)
            argmax = tf.cast(tf.argmax(newLlhs, axis=0)[0], dtype=tf.int32)
            mu = tf.gather(mus[:, 0], argmax)
            mu = tf.diag_part(mu)
            tau = tf.gather(taus[:, 0], argmax)
            tau = tf.diag_part(tau)
        return(mu, tau) 
开发者ID:bethgelab,项目名称:decompose,代码行数:28,代码来源:nnNormalAlgorithms.py

示例10: _pairwise_distances

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag_part [as 别名]
def _pairwise_distances(embeddings, squared=False):
	"""Compute the 2D matrix of distances between all the embeddings.
	Args:
		embeddings: tensor of shape (batch_size, embed_dim)
		squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
				 If false, output is the pairwise euclidean distance matrix.
	Returns:
		pairwise_distances: tensor of shape (batch_size, batch_size)
	"""
	# Get the dot product between all embeddings
	# shape (batch_size, batch_size)
	dot_product = tf.matmul(embeddings, tf.transpose(embeddings))

	# Get squared L2 norm for each embedding. We can just take the diagonal of `dot_product`.
	# This also provides more numerical stability (the diagonal of the result will be exactly 0).
	# shape (batch_size,)
	square_norm = tf.diag_part(dot_product)

	# Compute the pairwise distance matrix as we have:
	# ||a - b||^2 = ||a||^2  - 2 <a, b> + ||b||^2
	# shape (batch_size, batch_size)
	distances = tf.expand_dims(square_norm, 1) - 2.0 * dot_product + tf.expand_dims(square_norm, 0)

	# Because of computation errors, some distances might be negative so we put everything >= 0.0
	distances = tf.maximum(distances, 0.0)

	if not squared:
		# Because the gradient of sqrt is infinite when distances == 0.0 (ex: on the diagonal)
		# we need to add a small epsilon where distances == 0.0
		mask = tf.to_float(tf.equal(distances, 0.0))
		distances = distances + mask * 1e-16

		distances = tf.sqrt(distances)

		# Correct the epsilon added: set the distances on the mask to be exactly 0.0
		distances = distances * (1.0 - mask)

	return distances 
开发者ID:yyht,项目名称:BERT,代码行数:40,代码来源:triplet_loss_utils.py

示例11: test_DiagPart

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag_part [as 别名]
def test_DiagPart(self):
        t = tf.diag_part(self.random(3, 3))
        self.check(t) 
开发者ID:riga,项目名称:tfdeploy,代码行数:5,代码来源:ops.py

示例12: compute_pairwise_squared_distance

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag_part [as 别名]
def compute_pairwise_squared_distance(matrix):
    pairwise_dot = tf.matmul(matrix, matrix, transpose_b=True)
    squared_norm = tf.diag_part(pairwise_dot)
    sq_dist = squared_norm[:, None] + squared_norm[None, :] - 2 * pairwise_dot

    return sq_dist 
开发者ID:songlab-cal,项目名称:tape-neurips2019,代码行数:8,代码来源:proteinnet_serializer.py

示例13: bag_attention

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag_part [as 别名]
def bag_attention(x, scope, query, rel_tot, is_training, var_scope=None, dropout_before=False, keep_prob=1.0):
    with tf.variable_scope(var_scope or "attention", reuse=tf.AUTO_REUSE):
        if is_training: # training
            if dropout_before:
                x = __dropout__(x, keep_prob)
            bag_repre = []
            attention_logit = __attention_train_logit__(x, query, rel_tot)
            for i in range(scope.shape[0]):
                bag_hidden_mat = x[scope[i][0]:scope[i][1]]
                attention_score = tf.nn.softmax(attention_logit[scope[i][0]:scope[i][1]], -1)
                bag_repre.append(tf.squeeze(tf.matmul(tf.expand_dims(attention_score, 0), bag_hidden_mat))) # (1, n') x (n', hidden_size) = (1, hidden_size) -> (hidden_size)
            bag_repre = tf.stack(bag_repre)
            if not dropout_before:
                bag_repre = __dropout__(bag_repre, keep_prob)
            return __logit__(bag_repre, rel_tot), bag_repre
        else: # testing
            attention_logit = __attention_test_logit__(x, rel_tot) # (n, rel_tot)
            bag_repre = [] 
            bag_logit = []
            for i in range(scope.shape[0]):
                bag_hidden_mat = x[scope[i][0]:scope[i][1]]
                attention_score = tf.nn.softmax(tf.transpose(attention_logit[scope[i][0]:scope[i][1], :]), -1) # softmax of (rel_tot, n')
                bag_repre_for_each_rel = tf.matmul(attention_score, bag_hidden_mat) # (rel_tot, n') \dot (n', hidden_size) = (rel_tot, hidden_size)
                bag_logit_for_each_rel = __logit__(bag_repre_for_each_rel, rel_tot) # -> (rel_tot, rel_tot)
                bag_repre.append(bag_repre_for_each_rel)
                bag_logit.append(tf.diag_part(tf.nn.softmax(bag_logit_for_each_rel, -1))) # could be improved by sigmoid?
            bag_repre = tf.stack(bag_repre)
            bag_logit = tf.stack(bag_logit)
            return bag_logit, bag_repre 
开发者ID:xiaolalala,项目名称:Distant-Supervised-Chinese-Relation-Extraction,代码行数:31,代码来源:selector.py

示例14: decov_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag_part [as 别名]
def decov_loss(xs):
    """Decov loss as described in https://arxiv.org/pdf/1511.06068.pdf
    'Reducing Overfitting In Deep Networks by Decorrelating Representation'
    """
    x = tf.reshape(xs, [int(xs.get_shape()[0]), -1])
    m = tf.reduce_mean(x, 0, True)
    z = tf.expand_dims(x-m, 2)
    corr = tf.reduce_mean(tf.matmul(z, tf.transpose(z, perm=[0,2,1])), 0)
    corr_frob_sqr = tf.reduce_sum(tf.square(corr))
    corr_diag_sqr = tf.reduce_sum(tf.square(tf.diag_part(corr)))
    loss = 0.5*(corr_frob_sqr - corr_diag_sqr)
    return loss 
开发者ID:bearsprogrammer,项目名称:real-time-deep-face-recognition,代码行数:14,代码来源:facenet.py

示例15: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import diag_part [as 别名]
def __init__(self, is_training, word_embeddings, simple_position = False):
		NN.__init__(self, is_training, word_embeddings, simple_position)

		with tf.name_scope("conv-maxpool"):
			input_sentence = tf.expand_dims(self.input_embedding, axis=1)
			x = tf.layers.conv2d(inputs = input_sentence, filters=FLAGS.hidden_size, kernel_size=[1,3], strides=[1, 1], padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d()) 
			x = tf.reduce_max(x, axis=2)
			x = tf.nn.relu(tf.squeeze(x))

		if FLAGS.katt_flag != 0:
			stack_repre = self.katt(x, is_training)
		else:
			stack_repre = self.att(x, is_training)

		with tf.name_scope("loss"):
			logits = tf.matmul(stack_repre, tf.transpose(self.relation_matrix)) + self.bias
			self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.label,logits=logits))
			self.loss = tf.losses.softmax_cross_entropy(onehot_labels = self.label, logits = logits, weights = self.weights)
			self.output = tf.nn.softmax(logits)
			tf.summary.scalar('loss',self.loss)
			self.predictions = tf.argmax(logits, 1, name="predictions")
			self.correct_predictions = tf.equal(self.predictions, tf.argmax(self.label, 1))
			self.accuracy = tf.reduce_mean(tf.cast(self.correct_predictions, "float"), name="accuracy")

		if not is_training:
			with tf.name_scope("test"):
				if FLAGS.katt_flag != 0:
					test_attention_logit = self.katt_test(x)
				else:
					test_attention_logit = self.att_test(x)
				test_tower_output = []
				for i in range(FLAGS.test_batch_size):
					test_attention_score = tf.nn.softmax(tf.transpose(test_attention_logit[self.scope[i]:self.scope[i+1],:]))
					final_repre = tf.matmul(test_attention_score, x[self.scope[i]:self.scope[i+1]])
					logits = tf.matmul(final_repre, tf.transpose(self.relation_matrix)) + self.bias
					output = tf.diag_part(tf.nn.softmax(logits))
					test_tower_output.append(output)
				test_stack_output = tf.reshape(tf.stack(test_tower_output),[FLAGS.test_batch_size, self.num_classes])
				self.test_output = test_stack_output 
开发者ID:thunlp,项目名称:JointNRE,代码行数:41,代码来源:network.py


注:本文中的tensorflow.diag_part方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。