当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.tensordot方法代码示例

本文整理汇总了Python中tensorflow.tensordot方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.tensordot方法的具体用法?Python tensorflow.tensordot怎么用?Python tensorflow.tensordot使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.tensordot方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tensordot [as 别名]
def call(self, inputs, **kwargs):

        query, keys = inputs

        keys_len = keys.get_shape()[1]
        queries = K.repeat_elements(query, keys_len, 1)

        att_input = tf.concat(
            [queries, keys, queries - keys, queries * keys], axis=-1)

        att_out = MLP(self.hidden_size, self.activation, self.l2_reg,
                      self.keep_prob, self.use_bn, seed=self.seed)(att_input)
        attention_score = tf.nn.bias_add(tf.tensordot(
            att_out, self.kernel, axes=(-1, 0)), self.bias)

        return attention_score 
开发者ID:ShenDezhou,项目名称:icme2019,代码行数:18,代码来源:core.py

示例2: soft_embedding_lookup

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tensordot [as 别名]
def soft_embedding_lookup(embedding, soft_ids):
    """Transforms soft ids (e.g., probability distribution over ids) into
    embeddings, by mixing the embedding vectors with the soft weights.

    Args:
        embedding: A Tensor of shape `[num_classes] + embedding-dim` containing
            the embedding vectors. Embedding can have dimensionality > 1, i.e.,
            :attr:`embedding` can be of shape
            `[num_classes, emb_dim_1, emb_dim_2, ...]`
        soft_ids: A Tensor of weights (probabilities) used to mix the
            embedding vectors.

    Returns:
        A Tensor of shape `shape(soft_ids)[:-1] + shape(embedding)[1:]`. For
        example, if `shape(soft_ids) = [batch_size, max_time, vocab_size]`
        and `shape(embedding) = [vocab_size, emb_dim]`, then the return tensor
        has shape `[batch_size, max_time, emb_dim]`.

    Example::

        decoder_outputs, ... = decoder(...)
        soft_seq_emb = soft_embedding_lookup(
            embedding, tf.nn.softmax(decoder_outputs.logits))
    """
    return tf.tensordot(tf.to_float(soft_ids), embedding, [-1, 0]) 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:27,代码来源:embedder_utils.py

示例3: create_tensor

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tensordot [as 别名]
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    act_fn = activations.get('sigmoid')
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)
    self._build()

    A_tilda_k = in_layers[0].out_tensor
    X = in_layers[1].out_tensor

    if self.combine_method == "linear":
      concatenated = tf.concat([A_tilda_k, X], axis=2)
      adp_fn_val = act_fn(
          tf.tensordot(concatenated, self.trainable_weights[0], axes=1))
    else:
      adp_fn_val = act_fn(tf.matmul(A_tilda_k, tf.tensordot(X, self.Q, axes=1)))
    out_tensor = adp_fn_val
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor

    return out_tensor 
开发者ID:deepchem,项目名称:deepchem,代码行数:24,代码来源:hagcn_layers.py

示例4: bilinear_attention

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tensordot [as 别名]
def bilinear_attention(query, context, 
                query_mask, context_mask, dropout_ratio,
                scope, reuse=None):
    with tf.variable_scope(scope+"_Context_to_Query_Attention_Layer", reuse=reuse):
        context_ = tf.transpose(context, [0,2,1])
        hidden_dim = query.get_shape()[-1]

        attn_W = tf.get_variable("AttnW", dtype=tf.float32,
                                    shape=[hidden_dim, hidden_dim],
                                    initializer=initializer)

        weighted_query = tf.tensordot(query, attn_W, axes=[[2], [0]])

        S = tf.matmul(weighted_query, context_)  # batch x q_len x c_len

        mask_q = tf.expand_dims(query_mask, 1)
        mask_c = tf.expand_dims(context_mask, 1)

        S_ = tf.nn.softmax(qanet_layers.mask_logits(S, mask = mask_c))
        c2q = tf.matmul(S_, context)

        S_T = tf.nn.softmax(qanet_layers.mask_logits(tf.transpose(S, [0,2,1]), mask = mask_q))
        q2c = tf.matmul(S_T, query)

        return c2q, q2c 
开发者ID:yyht,项目名称:BERT,代码行数:27,代码来源:man_utils.py

示例5: quantizer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tensordot [as 别名]
def quantizer(w, config, reuse=False, temperature=1, L=5, scope='image'):
        """
        Quantize feature map over L centers to obtain discrete $\hat{w}$
         + Centers: {-2,-1,0,1,2}
         + TODO:    Toggle learnable centers?
        """
        with tf.variable_scope('quantizer_{}'.format(scope, reuse=reuse)):

            centers = tf.cast(tf.range(-2,3), tf.float32)
            # Partition W into the Voronoi tesellation over the centers
            w_stack = tf.stack([w for _ in range(L)], axis=-1)
            w_hard = tf.cast(tf.argmin(tf.abs(w_stack - centers), axis=-1), tf.float32) + tf.reduce_min(centers)

            smx = tf.nn.softmax(-1.0/temperature * tf.abs(w_stack - centers), dim=-1)
            # Contract last dimension
            w_soft = tf.einsum('ijklm,m->ijkl', smx, centers)  # w_soft = tf.tensordot(smx, centers, axes=((-1),(0)))

            # Treat quantization as differentiable for optimization
            w_bar = tf.round(tf.stop_gradient(w_hard - w_soft) + w_soft)

            return w_bar 
开发者ID:Justin-Tan,项目名称:generative-compression,代码行数:23,代码来源:network.py

示例6: attention

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tensordot [as 别名]
def attention(inputs):
    # Trainable parameters
    hidden_size = inputs.shape[2].value
    u_omega = tf.get_variable("u_omega", [hidden_size], initializer=tf.keras.initializers.glorot_normal())

    with tf.name_scope('v'):
        v = tf.tanh(inputs)

    # For each of the timestamps its vector of size A from `v` is reduced with `u` vector
    vu = tf.tensordot(v, u_omega, axes=1, name='vu')  # (B,T) shape
    alphas = tf.nn.softmax(vu, name='alphas')  # (B,T) shape

    # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape
    output = tf.reduce_sum(inputs * tf.expand_dims(alphas, -1), 1)

    # Final output with tanh
    output = tf.tanh(output)

    return output, alphas 
开发者ID:SeoSangwoo,项目名称:Attention-Based-BiLSTM-relation-extraction,代码行数:21,代码来源:attention.py

示例7: node_attention

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tensordot [as 别名]
def node_attention(inputs, adj, return_weights=False):
        hidden_size = inputs.shape[-1].value
        H_v = tf.Variable(tf.random_normal([hidden_size, 1], stddev=0.1))

        # convert adj to sparse tensor
        zero = tf.constant(0, dtype=tf.float32)
        where = tf.not_equal(adj, zero)
        indices = tf.where(where)
        values = tf.gather_nd(adj, indices)
        adj = tf.SparseTensor(indices=indices,
                              values=values,
                              dense_shape=adj.shape)

        with tf.name_scope('v'):
            v = adj * tf.squeeze(tf.tensordot(inputs, H_v, axes=1))

        weights = tf.sparse_softmax(v, name='alphas')  # [nodes,nodes]
        output = tf.sparse_tensor_dense_matmul(weights, inputs)

        if not return_weights:
            return output
        else:
            return output, weights

    # view-level attention (equation (4) in SemiGNN) 
开发者ID:safe-graph,项目名称:DGFraud,代码行数:27,代码来源:layers.py

示例8: _call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tensordot [as 别名]
def _call(self, _inp, output_size, is_training):
        batch_size = tf.shape(_inp)[0]
        H, W, B, A = tuple(int(i) for i in _inp.shape[1:])

        if self.embedding is None:
            self.embedding = tf.get_variable(
                "embedding", shape=(int(A/2), self.n_objects), dtype=tf.float32)

        inp = tf.reshape(_inp, (batch_size, H * W * B, A))
        key, value = tf.split(inp, 2, axis=2)
        raw_attention = tf.tensordot(key, self.embedding, [[2], [0]])
        attention = tf.nn.softmax(raw_attention, axis=1)

        attention_t = tf.transpose(attention, (0, 2, 1))
        weighted_value = tf.matmul(attention_t, value)

        flat_weighted_value = tf.reshape(
            weighted_value, (batch_size, self.n_objects * int(A/2)))

        if self.output_network is None:
            self.output_network = cfg.build_math_output(scope="math_output")

        return self.output_network(flat_weighted_value, output_size, is_training) 
开发者ID:e2crawfo,项目名称:auto_yolo,代码行数:25,代码来源:networks.py

示例9: _create_corpus_embed

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tensordot [as 别名]
def _create_corpus_embed(self):
        """
            msg_embed: batch_size * max_n_days * max_n_msgs * msg_embed_size

            => corpus_embed: batch_size * max_n_days * corpus_embed_size
        """
        with tf.name_scope('corpus_embed'):
            with tf.variable_scope('u_t'):
                proj_u = self._linear(self.msg_embed, self.msg_embed_size, 'tanh', use_bias=False)
                w_u = tf.get_variable('w_u', shape=(self.msg_embed_size, 1), initializer=self.initializer)
            u = tf.reduce_mean(tf.tensordot(proj_u, w_u, axes=1), axis=-1)  # batch_size * max_n_days * max_n_msgs

            mask_msgs = tf.sequence_mask(self.n_msgs_ph, maxlen=self.max_n_msgs, dtype=tf.bool, name='mask_msgs')
            ninf = tf.fill(tf.shape(mask_msgs), np.NINF)
            masked_score = tf.where(mask_msgs, u, ninf)
            u = neural.softmax(masked_score)  # batch_size * max_n_days * max_n_msgs
            u = tf.where(tf.is_nan(u), tf.zeros_like(u), u)  # replace nan with 0.0

            u = tf.expand_dims(u, axis=-2)  # batch_size * max_n_days * 1 * max_n_msgs
            corpus_embed = tf.matmul(u, self.msg_embed)  # batch_size * max_n_days * 1 * msg_embed_size
            corpus_embed = tf.reduce_mean(corpus_embed, axis=-2)  # batch_size * max_n_days * msg_embed_size
            self.corpus_embed = tf.nn.dropout(corpus_embed, keep_prob=1-self.dropout_ce, name='corpus_embed') 
开发者ID:yumoxu,项目名称:stocknet-code,代码行数:24,代码来源:Model.py

示例10: call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tensordot [as 别名]
def call(self, inputs, **kwargs):

        if K.ndim(inputs[0]) != 3:
            raise ValueError(
                "Unexpected inputs dimensions %d, expect to be 3 dimensions" % (K.ndim(inputs)))

        embeds_vec_list = inputs
        row = []
        col = []

        for r, c in itertools.combinations(embeds_vec_list, 2):
            row.append(r)
            col.append(c)

        p = tf.concat(row, axis=1)
        q = tf.concat(col, axis=1)
        inner_product = p * q

        bi_interaction = inner_product
        attention_temp = tf.nn.relu(tf.nn.bias_add(tf.tensordot(
            bi_interaction, self.attention_W, axes=(-1, 0)), self.attention_b))
        #  Dense(self.attention_factor,'relu',kernel_regularizer=l2(self.l2_reg_w))(bi_interaction)
        self.normalized_att_score = tf.nn.softmax(tf.tensordot(
            attention_temp, self.projection_h, axes=(-1, 0)), dim=1)
        attention_output = tf.reduce_sum(
            self.normalized_att_score*bi_interaction, axis=1)

        attention_output = tf.nn.dropout(
            attention_output, self.keep_prob, seed=1024)
        # Dropout(1-self.keep_prob)(attention_output)
        afm_out = tf.tensordot(
            attention_output, self.projection_p, axes=(-1, 0))

        return afm_out 
开发者ID:ShenDezhou,项目名称:icme2019,代码行数:36,代码来源:interaction.py

示例11: cumsum

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tensordot [as 别名]
def cumsum(x, axis=0, exclusive=False):
  """TPU hack for tf.cumsum.

  This is equivalent to tf.cumsum and is faster on TPU as of 04/2018 unless
  the axis dimension is very large.

  Args:
    x: a Tensor
    axis: an integer
    exclusive: a boolean

  Returns:
    Tensor of the same shape as x.
  """
  if not is_on_tpu():
    return tf.cumsum(x, axis=axis, exclusive=exclusive)
  x_shape = shape_list(x)
  rank = len(x_shape)
  length = x_shape[axis]
  my_range = tf.range(length)
  comparator = tf.less if exclusive else tf.less_equal
  mask = tf.cast(
      comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),
      x.dtype)
  ret = tf.tensordot(x, mask, axes=[[axis], [0]])
  if axis != rank - 1:
    ret = tf.transpose(
        ret,
        list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))
  return ret 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:32,代码来源:common_layers.py

示例12: compute_attention_component

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tensordot [as 别名]
def compute_attention_component(antecedent,
                                total_depth,
                                filter_width=1,
                                padding="VALID",
                                name="c",
                                vars_3d_num_heads=0):
  """Computes attention compoenent (query, key or value).

  Args:
    antecedent: a Tensor with shape [batch, length, channels]
    total_depth: an integer
    filter_width: An integer specifying how wide you want the attention
      component to be.
    padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding.
    name: a string specifying scope name.
    vars_3d_num_heads: an optional integer (if we want to use 3d variables)

  Returns:
    c : [batch, length, depth] tensor
  """
  if vars_3d_num_heads > 0:
    assert filter_width == 1
    input_depth = antecedent.get_shape().as_list()[-1]
    depth_per_head = total_depth // vars_3d_num_heads
    initializer_stddev = input_depth ** -0.5
    if "q" in name:
      initializer_stddev *= depth_per_head ** -0.5
    var = tf.get_variable(
        name, [input_depth,
               vars_3d_num_heads,
               total_depth // vars_3d_num_heads],
        initializer=tf.random_normal_initializer(stddev=initializer_stddev))
    var = tf.cast(var, antecedent.dtype)
    var = tf.reshape(var, [input_depth, total_depth])
    return tf.tensordot(antecedent, var, axes=1)
  if filter_width == 1:
    return common_layers.dense(
        antecedent, total_depth, use_bias=False, name=name)
  else:
    return common_layers.conv1d(
        antecedent, total_depth, filter_width, padding, name=name) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:43,代码来源:common_attention.py

示例13: rotate_point_cloud_by_angle_y_tensor

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tensordot [as 别名]
def rotate_point_cloud_by_angle_y_tensor(data, rotation_angle):
	""" Rotate the point cloud along up direction with certain angle.
		Input:
		  Nx3 array, original batch of point clouds
		Return:
		  Nx3 array, rotated batch of point clouds
	"""
	cosval = tf.cos(rotation_angle)
	sinval = tf.sin(rotation_angle)
	rotation_matrix = tf.reshape([[cosval, 0, sinval],[0, 1, 0],[-sinval, 0, cosval]], [3,3])
	data = tf.reshape(data, [-1, 3])
	rotated_data = tf.transpose(tf.tensordot(rotation_matrix, tf.transpose(data), [1,0]))
	return rotated_data 
开发者ID:vinits5,项目名称:pointnet-registration-framework,代码行数:15,代码来源:helper.py

示例14: rotate_point_cloud_by_angle_x_tensor

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tensordot [as 别名]
def rotate_point_cloud_by_angle_x_tensor(data, rotation_angle):
	""" Rotate the point cloud along up direction with certain angle.
		Input:
		  Nx3 array, original batch of point clouds
		Return:
		  Nx3 array, rotated batch of point clouds
	"""
	cosval = tf.cos(rotation_angle)
	sinval = tf.sin(rotation_angle)
	rotation_matrix = tf.reshape([[1, 0, 0],[0, cosval, -sinval],[0, sinval, cosval]], [3,3])
	data = tf.reshape(data, [-1, 3])
	rotated_data = tf.transpose(tf.tensordot(rotation_matrix, tf.transpose(data), [1,0]))
	return rotated_data 
开发者ID:vinits5,项目名称:pointnet-registration-framework,代码行数:15,代码来源:helper.py

示例15: rotate_point_cloud_by_angle_z_tensor

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import tensordot [as 别名]
def rotate_point_cloud_by_angle_z_tensor(data, rotation_angle):
	""" Rotate the point cloud along up direction with certain angle.
		Input:
		  Nx3 array, original batch of point clouds
		Return:
		  Nx3 array, rotated batch of point clouds
	"""
	cosval = tf.cos(rotation_angle)
	sinval = tf.sin(rotation_angle)
	rotation_matrix = tf.reshape([[cosval, -sinval, 0],[sinval, cosval, 0],[0, 0, 1]], [3,3])
	data = tf.reshape(data, [-1, 3])
	rotated_data = tf.transpose(tf.tensordot(rotation_matrix, tf.transpose(data), [1,0]))
	return rotated_data 
开发者ID:vinits5,项目名称:pointnet-registration-framework,代码行数:15,代码来源:helper.py


注:本文中的tensorflow.tensordot方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。