当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.eye方法代码示例

本文整理汇总了Python中tensorflow.eye方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.eye方法的具体用法?Python tensorflow.eye怎么用?Python tensorflow.eye使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.eye方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: rank_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import eye [as 别名]
def rank_loss(sentence_emb, image_emb, margin=0.2):
  """Experimental rank loss, thanks to kkurach@ for the code."""
  with tf.name_scope("rank_loss"):
    # Normalize first as this is assumed in cosine similarity later.
    sentence_emb = tf.nn.l2_normalize(sentence_emb, 1)
    image_emb = tf.nn.l2_normalize(image_emb, 1)
    # Both sentence_emb and image_emb have size [batch, depth].
    scores = tf.matmul(image_emb, tf.transpose(sentence_emb))  # [batch, batch]
    diagonal = tf.diag_part(scores)  # [batch]
    cost_s = tf.maximum(0.0, margin - diagonal + scores)  # [batch, batch]
    cost_im = tf.maximum(
        0.0, margin - tf.reshape(diagonal, [-1, 1]) + scores)  # [batch, batch]
    # Clear diagonals.
    batch_size = tf.shape(sentence_emb)[0]
    empty_diagonal_mat = tf.ones_like(cost_s) - tf.eye(batch_size)
    cost_s *= empty_diagonal_mat
    cost_im *= empty_diagonal_mat
    return tf.reduce_mean(cost_s) + tf.reduce_mean(cost_im) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:20,代码来源:slicenet.py

示例2: gather_indices_2d

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import eye [as 别名]
def gather_indices_2d(x, block_shape, block_stride):
  """Getting gather indices."""
  # making an identity matrix kernel
  kernel = tf.eye(block_shape[0] * block_shape[1])
  kernel = reshape_range(kernel, 0, 1, [block_shape[0], block_shape[1], 1])
  # making indices [1, h, w, 1] to appy convs
  x_shape = common_layers.shape_list(x)
  indices = tf.range(x_shape[2] * x_shape[3])
  indices = tf.reshape(indices, [1, x_shape[2], x_shape[3], 1])
  indices = tf.nn.conv2d(
      tf.cast(indices, tf.float32),
      kernel,
      strides=[1, block_stride[0], block_stride[1], 1],
      padding="VALID")
  # making indices [num_blocks, dim] to gather
  dims = common_layers.shape_list(indices)[:3]
  if all([isinstance(dim, int) for dim in dims]):
    num_blocks = functools.reduce(operator.mul, dims, 1)
  else:
    num_blocks = tf.reduce_prod(dims)
  indices = tf.reshape(indices, [num_blocks, -1])
  return tf.cast(indices, tf.int32) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:24,代码来源:common_attention.py

示例3: _build_relation_feature

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import eye [as 别名]
def _build_relation_feature(self):
        if self.feature_type == 'id':
            self.relation_dim = self.n_relations
            self.relation_features = tf.eye(self.n_relations, dtype=tf.float64)
        elif self.feature_type == 'bow':
            bow = np.load('../data/' + self.dataset + '/bow.npy')
            self.relation_dim = bow.shape[1]
            self.relation_features = tf.constant(bow, tf.float64)
        elif self.feature_type == 'bert':
            bert = np.load('../data/' + self.dataset + '/bert.npy')
            self.relation_dim = bert.shape[1]
            self.relation_features = tf.constant(bert, tf.float64)

        # the feature of the last relation (the null relation) is a zero vector
        self.relation_features = tf.concat([self.relation_features, tf.zeros([1, self.relation_dim], tf.float64)],
                                           axis=0, name='relation_features') 
开发者ID:hwwang55,项目名称:PathCon,代码行数:18,代码来源:model.py

示例4: radial_symmetry

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import eye [as 别名]
def radial_symmetry(self, d_cutoff, d, atom_numbers):
    """ Radial Symmetry Function """
    embedding = tf.eye(np.max(self.atom_cases) + 1)
    atom_numbers_embedded = tf.nn.embedding_lookup(embedding, atom_numbers)

    Rs = np.linspace(0., self.radial_cutoff, self.radial_length)
    ita = np.ones_like(Rs) * 3 / (Rs[1] - Rs[0])**2
    Rs = tf.cast(np.reshape(Rs, (1, 1, 1, -1)), tf.float32)
    ita = tf.cast(np.reshape(ita, (1, 1, 1, -1)), tf.float32)
    length = ita.get_shape().as_list()[-1]

    d_cutoff = tf.stack([d_cutoff] * length, axis=3)
    d = tf.stack([d] * length, axis=3)

    out = tf.exp(-ita * tf.square(d - Rs)) * d_cutoff
    if self.atomic_number_differentiated:
      out_tensors = []
      for atom_type in self.atom_cases:
        selected_atoms = tf.expand_dims(
            tf.expand_dims(atom_numbers_embedded[:, :, atom_type], axis=1),
            axis=3)
        out_tensors.append(tf.reduce_sum(out * selected_atoms, axis=2))
      return tf.concat(out_tensors, axis=2)
    else:
      return tf.reduce_sum(out, axis=2) 
开发者ID:deepchem,项目名称:deepchem,代码行数:27,代码来源:layers.py

示例5: log_coral_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import eye [as 别名]
def log_coral_loss(self, h_src, h_trg, gamma=1e-3):
	# regularized covariances result in inf or nan
	# First: subtract the mean from the data matrix
	batch_size = tf.to_float(tf.shape(h_src)[0])
	h_src = h_src - tf.reduce_mean(h_src, axis=0) 
	h_trg = h_trg - tf.reduce_mean(h_trg, axis=0 )
	cov_source = (1./(batch_size-1)) * tf.matmul( h_src, h_src, transpose_a=True) #+ gamma * tf.eye(self.hidden_repr_size)
	cov_target = (1./(batch_size-1)) * tf.matmul( h_trg, h_trg, transpose_a=True) #+ gamma * tf.eye(self.hidden_repr_size)
	#eigen decomposition
	eig_source  = tf.self_adjoint_eig(cov_source)
	eig_target  = tf.self_adjoint_eig(cov_target)
	log_cov_source = tf.matmul( eig_source[1] ,  tf.matmul(tf.diag( tf.log(eig_source[0]) ), eig_source[1], transpose_b=True) )
	log_cov_target = tf.matmul( eig_target[1] ,  tf.matmul(tf.diag( tf.log(eig_target[0]) ), eig_target[1], transpose_b=True) )

	# Returns the Frobenius norm
	return tf.reduce_mean(tf.square( tf.subtract(log_cov_source,log_cov_target))) 
	#~ return tf.reduce_mean(tf.reduce_max(eig_target[0]))
	#~ return tf.to_float(tf.equal(tf.count_nonzero(h_src), tf.count_nonzero(h_src))) 
开发者ID:pmorerio,项目名称:minimal-entropy-correlation-alignment,代码行数:20,代码来源:model.py

示例6: orthogonal_regularizer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import eye [as 别名]
def orthogonal_regularizer(scale) :
    """ Defining the Orthogonal regularizer and return the function at last to be used in Conv layer as kernel regularizer"""

    def ortho_reg(w) :
        """ Reshaping the matrxi in to 2D tensor for enforcing orthogonality"""
        _, _, _, c = w.get_shape().as_list()

        w = tf.reshape(w, [-1, c])

        """ Declaring a Identity Tensor of appropriate size"""
        identity = tf.eye(c)

        """ Regularizer Wt*W - I """
        w_transpose = tf.transpose(w)
        w_mul = tf.matmul(w_transpose, w)
        reg = tf.subtract(w_mul, identity)

        """Calculating the Loss Obtained"""
        ortho_loss = tf.nn.l2_loss(reg)

        return scale * ortho_loss

    return ortho_reg 
开发者ID:taki0112,项目名称:Tensorflow-Cookbook,代码行数:25,代码来源:utils.py

示例7: orthogonal_regularizer_fully

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import eye [as 别名]
def orthogonal_regularizer_fully(scale) :
    """ Defining the Orthogonal regularizer and return the function at last to be used in Fully Connected Layer """

    def ortho_reg_fully(w) :
        """ Reshaping the matrix in to 2D tensor for enforcing orthogonality"""
        _, c = w.get_shape().as_list()

        """Declaring a Identity Tensor of appropriate size"""
        identity = tf.eye(c)
        w_transpose = tf.transpose(w)
        w_mul = tf.matmul(w_transpose, w)
        reg = tf.subtract(w_mul, identity)

        """ Calculating the Loss """
        ortho_loss = tf.nn.l2_loss(reg)

        return scale * ortho_loss

    return ortho_reg_fully 
开发者ID:taki0112,项目名称:Tensorflow-Cookbook,代码行数:21,代码来源:utils.py

示例8: batch_rodrigues

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import eye [as 别名]
def batch_rodrigues(theta, name=None):
    """
    Theta is N x 3
    """
    with tf.variable_scope(name, "batch_rodrigues", [theta]):
        batch_size = tf.shape(theta)[0]

        angle = tf.expand_dims(tf.norm(theta + 1e-8, axis=1), -1)
        r = tf.expand_dims(tf.div(theta, angle), -1)

        angle = tf.expand_dims(angle, -1)
        cos = tf.cos(angle)
        sin = tf.sin(angle)

        outer = tf.matmul(r, r, transpose_b=True, name="outer")

        eyes = tf.tile(tf.expand_dims(tf.eye(3), 0), [batch_size, 1, 1])
        R = cos * eyes + (1 - cos) * outer + sin * batch_skew(
            r, batch_size=batch_size)
        return R 
开发者ID:blzq,项目名称:tf_smpl,代码行数:22,代码来源:batch_lbs.py

示例9: batch_lrotmin

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import eye [as 别名]
def batch_lrotmin(theta, name=None):
    """ NOTE: not used bc I want to reuse R and this is simple.
    Output of this is used to compute joint-to-pose blend shape mapping.
    Equation 9 in SMPL paper.


    Args:
      pose: `Tensor`, N x 72 vector holding the axis-angle rep of K joints.
            This includes the global rotation so K=24

    Returns
      diff_vec : `Tensor`: N x 207 rotation matrix of 23=(K-1) joints with identity subtracted.,
    """
    with tf.variable_scope(name, "batch_lrotmin", [theta]):
        with tf.variable_scope("ignore_global"):
            theta = theta[:, 3:]

        # N*23 x 3 x 3
        Rs = batch_rodrigues(tf.reshape(theta, [-1, 3]))
        lrotmin = tf.reshape(Rs - tf.eye(3), [-1, 207])

        return lrotmin 
开发者ID:blzq,项目名称:tf_smpl,代码行数:24,代码来源:batch_lbs.py

示例10: radial_symmetry

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import eye [as 别名]
def radial_symmetry(self, d_cutoff, d, atom_numbers):
    """ Radial Symmetry Function """
    embedding = tf.eye(np.max(self.atom_cases) + 1)
    atom_numbers_embedded = tf.nn.embedding_lookup(embedding, atom_numbers)

    Rs = np.linspace(0., self.radial_cutoff, self.radial_length)
    ita = np.ones_like(Rs) * 3 / (Rs[1] - Rs[0])**2
    Rs = tf.to_float(np.reshape(Rs, (1, 1, 1, -1)))
    ita = tf.to_float(np.reshape(ita, (1, 1, 1, -1)))
    length = ita.get_shape().as_list()[-1]

    d_cutoff = tf.stack([d_cutoff] * length, axis=3)
    d = tf.stack([d] * length, axis=3)

    out = tf.exp(-ita * tf.square(d - Rs)) * d_cutoff
    if self.atomic_number_differentiated:
      out_tensors = []
      for atom_type in self.atom_cases:
        selected_atoms = tf.expand_dims(
            tf.expand_dims(atom_numbers_embedded[:, :, atom_type], axis=1),
            axis=3)
        out_tensors.append(tf.reduce_sum(out * selected_atoms, axis=2))
      return tf.concat(out_tensors, axis=2)
    else:
      return tf.reduce_sum(out, axis=2) 
开发者ID:simonfqy,项目名称:PADME,代码行数:27,代码来源:layers.py

示例11: test_softmax_masking

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import eye [as 别名]
def test_softmax_masking(self):

        max_len = 3
        axis = 1
        logits = tf.eye(max_len)
        seq_len = [1,2,2]
        mask = tf.sequence_mask(seq_len, max_len)

        r = softmax_with_masking(logits, mask, axis)
        r = np.array(r)

        d = math.exp(1) + math.exp(0)

        expected = np.array([
            [1,0,0],
            [math.exp(0)/d, math.exp(1)/d,0],
            [0.5, 0.5, 0],
        ])

        np.testing.assert_almost_equal(r, expected) 
开发者ID:Octavian-ai,项目名称:shortest-path,代码行数:22,代码来源:attention_test.py

示例12: execute_reasoning

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import eye [as 别名]
def execute_reasoning(args, features, **kwargs):

	d_eye = tf.eye(args["max_decode_iterations"])

	iteration_id = [
		tf.tile(tf.expand_dims(d_eye[i], 0), [features["d_batch_size"], 1])
		for i in range(args["max_decode_iterations"])
	]

	inputs = [iteration_id]

	final_output, out_taps = static_decode(args, features, inputs, **kwargs)


	final_output = dynamic_assert_shape(final_output, [features["d_batch_size"], args["output_width"]])


	return final_output, out_taps 
开发者ID:Octavian-ai,项目名称:shortest-path,代码行数:20,代码来源:decode.py

示例13: testLossFunctionByName

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import eye [as 别名]
def testLossFunctionByName(self):
    """Ensure loss functions can be identified by name."""
    with tf.Graph().as_default():
      logits = tf.eye(2)
      lc = layer_collection.LayerCollection()

      # Create a new loss function by name.
      lc.register_categorical_predictive_distribution(logits, name='loss1')
      self.assertEqual(1, len(lc.towers_by_loss))

      # Add logits to same loss function.
      lc.register_categorical_predictive_distribution(
          logits, name='loss1', reuse=True)
      self.assertEqual(1, len(lc.towers_by_loss))

      # Add another new loss function.
      lc.register_categorical_predictive_distribution(logits, name='loss2')
      self.assertEqual(2, len(lc.towers_by_loss)) 
开发者ID:tensorflow,项目名称:kfac,代码行数:20,代码来源:layer_collection_test.py

示例14: get_matpower

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import eye [as 别名]
def get_matpower(self, exp, damping_func):
    # Note that this function returns a variable which gets updated by the
    # inverse ops.  It may be stale / inconsistent with the latest value of
    # self.cov (except when exp == 1).
    if exp != 1:
      damping_id = graph_func_to_id(damping_func)
      matpower = self._matpower_by_exp_and_damping[(exp, damping_id)]
    else:
      cov = self.cov
      identity = tf.eye(cov.shape.as_list()[0], dtype=cov.dtype)
      matpower = cov + tf.cast(damping_func(), dtype=self.cov.dtype)*identity

    assert matpower.shape.ndims == 2
    return lo.LinearOperatorFullMatrix(matpower,
                                       is_non_singular=True,
                                       is_self_adjoint=True,
                                       is_positive_definite=True,
                                       is_square=True) 
开发者ID:tensorflow,项目名称:kfac,代码行数:20,代码来源:fisher_factors.py

示例15: _add_orthogonal_constraint

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import eye [as 别名]
def _add_orthogonal_constraint(self, filt, n_filt):
        
        filt = tf.reshape(filt, [-1, n_filt])
        inner_pro = tf.matmul(tf.transpose(filt), filt)

        loss = 2e-4*tf.nn.l2_loss(inner_pro-tf.eye(n_filt))
        tf.add_to_collection('orth_constraint', loss) 
开发者ID:wy1iu,项目名称:SphereNet,代码行数:9,代码来源:spherenet_linear_sphereconv_wsoftmax.py


注:本文中的tensorflow.eye方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。