当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.sparse_softmax方法代码示例

本文整理汇总了Python中tensorflow.sparse_softmax方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.sparse_softmax方法的具体用法?Python tensorflow.sparse_softmax怎么用?Python tensorflow.sparse_softmax使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.sparse_softmax方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testEquivalentToDensified

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_softmax [as 别名]
def testEquivalentToDensified(self):
    np.random.seed(1618)
    n, m = np.random.choice(20, size=2)

    for dtype in [np.float32, np.float64]:
      sp_vals_np = np.random.rand(n, m).astype(dtype)

      batched_sp_t, unused_nnz1 = _sparsify(
          sp_vals_np.reshape((1, n, m)), thresh=0.)  # No masking.

      with self.test_session(use_gpu=False):
        densified = tf.constant(sp_vals_np)

        sp_result = sparse_ops.sparse_softmax(
            batched_sp_t).eval().values.reshape((n, m))
        dense_result = tf.nn.softmax(densified)

        self.assertAllClose(dense_result.eval(), sp_result) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:20,代码来源:sparse_ops_test.py

示例2: node_attention

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_softmax [as 别名]
def node_attention(inputs, adj, return_weights=False):
        hidden_size = inputs.shape[-1].value
        H_v = tf.Variable(tf.random_normal([hidden_size, 1], stddev=0.1))

        # convert adj to sparse tensor
        zero = tf.constant(0, dtype=tf.float32)
        where = tf.not_equal(adj, zero)
        indices = tf.where(where)
        values = tf.gather_nd(adj, indices)
        adj = tf.SparseTensor(indices=indices,
                              values=values,
                              dense_shape=adj.shape)

        with tf.name_scope('v'):
            v = adj * tf.squeeze(tf.tensordot(inputs, H_v, axes=1))

        weights = tf.sparse_softmax(v, name='alphas')  # [nodes,nodes]
        output = tf.sparse_tensor_dense_matmul(weights, inputs)

        if not return_weights:
            return output
        else:
            return output, weights

    # view-level attention (equation (4) in SemiGNN) 
开发者ID:safe-graph,项目名称:DGFraud,代码行数:27,代码来源:layers.py

示例3: compute_inference

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_softmax [as 别名]
def compute_inference(self, node_features_in, sp_adj_matrix, is_training):
    """Forward pass for GAT model."""
    adj_matrix_pred = self.edge_model.compute_inference(
        node_features_in, sp_adj_matrix, is_training)
    sp_adj_mask = tf.SparseTensor(
        indices=sp_adj_matrix.indices,
        values=tf.ones_like(sp_adj_matrix.values),
        dense_shape=sp_adj_matrix.dense_shape)
    sp_adj_att = sp_adj_mask * adj_matrix_pred
    sp_adj_att = tf.SparseTensor(
        indices=sp_adj_att.indices,
        values=tf.nn.leaky_relu(sp_adj_att.values),
        dense_shape=sp_adj_att.dense_shape)
    sp_adj_att = tf.sparse_softmax(sp_adj_att)
    logits = self.node_model.compute_inference(node_features_in, sp_adj_att,
                                               is_training)
    return logits, adj_matrix_pred 
开发者ID:google,项目名称:gcnn-survey-paper,代码行数:19,代码来源:node_edge_models.py

示例4: testHigherRanks

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_softmax [as 别名]
def testHigherRanks(self):
    # For the first shape:
    # First batch:
    # [?   e.]
    # [1.  ? ]
    # Second batch:
    # [e   ? ]
    # [e   e ]
    #
    # The softmax results should be:
    # [?   1.]     [1    ?]
    # [1.  ? ] and [.5  .5]
    # where ? means implicitly zero.
    #
    # The second shape: same input data, but with a higher-rank shape.
    shapes = [[2, 2, 2], [2, 1, 2, 2]]
    for shape in shapes:
      values = np.asarray(
          [0., np.e, 1., 0., np.e, 0., np.e, np.e]).reshape(shape)
      sp_t, unused_nnz = _sparsify(values, thresh=1e-2)
      expected_values = [1., 1., 1., .5, .5]

      with self.test_session(use_gpu=False):
        result = sparse_ops.sparse_softmax(sp_t).eval()

        self.assertAllEqual(expected_values, result.values)
        self.assertAllEqual(sp_t.indices.eval(), result.indices)
        self.assertAllEqual(shape, result.shape) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:30,代码来源:sparse_ops_test.py

示例5: testGradient

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_softmax [as 别名]
def testGradient(self):
    x_shape = [2, 5, 10]
    with self.test_session(use_gpu=False):
      for dtype in [np.float32, np.float64]:
        x_np = np.random.randn(*x_shape).astype(dtype)
        x_tf, nnz = _sparsify(x_np)
        y_tf = tf.sparse_softmax(x_tf)
        err = tf.test.compute_gradient_error(x_tf.values, (nnz,), y_tf.values,
                                             (nnz,))
        self.assertLess(err, 1e-4) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:12,代码来源:sparse_ops_test.py

示例6: build_sparse_matrix_softmax

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_softmax [as 别名]
def build_sparse_matrix_softmax(self, idx_non_zero_values, X, dense_shape_A):
        A = tf.SparseTensorValue(idx_non_zero_values, tf.squeeze(X), dense_shape_A)
        A = tf.sparse_reorder(A)  # n_edges x n_edges
        A = tf.sparse_softmax(A)

        return A 
开发者ID:LPDI-EPFL,项目名称:masif,代码行数:8,代码来源:MaSIF_ligand.py

示例7: forward_incidence_matrix

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_softmax [as 别名]
def forward_incidence_matrix(self, normalization):
        if normalization[0] == "none":
            mtr_values = tf.to_float(tf.ones_like(self.receiver_indices))
            message_indices = tf.range(self.edge_count)

            mtr_indices = tf.to_int64(tf.transpose(tf.stack([self.receiver_indices, message_indices])))
            mtr_shape = tf.to_int64(tf.stack([self.vertex_count, self.edge_count]))

            tensor = tf.SparseTensor(indices=mtr_indices,
                                     values=mtr_values,
                                     dense_shape=mtr_shape)

            return tensor
        elif normalization[0] == "global":
            mtr_values = tf.to_float(tf.ones_like(self.receiver_indices))
            message_indices = tf.range(self.edge_count)

            mtr_indices = tf.to_int64(tf.transpose(tf.stack([self.receiver_indices, message_indices])))
            mtr_shape = tf.to_int64(tf.stack([self.vertex_count, self.edge_count]))

            tensor = tf.sparse_softmax(tf.SparseTensor(indices=mtr_indices,
                                   values=mtr_values,
                                                       dense_shape=mtr_shape))

            return tensor
        elif normalization[0] == "local":
            mtr_values = tf.to_float(tf.ones_like(self.receiver_indices))
            message_indices = tf.range(self.edge_count)

            mtr_indices = tf.to_int64(tf.transpose(tf.stack([self.message_types, self.receiver_indices, message_indices])))
            mtr_shape = tf.to_int64(tf.stack([self.label_count*2, self.vertex_count, self.edge_count]))

            tensor = tf.sparse_softmax(tf.SparseTensor(indices=mtr_indices,
                                   values=mtr_values,
                                                       dense_shape=mtr_shape))

            tensor = tf.sparse_reduce_sum_sparse(tensor, 0)

            return tensor 
开发者ID:MichSchli,项目名称:RelationPrediction,代码行数:41,代码来源:graph_representations.py

示例8: backward_incidence_matrix

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_softmax [as 别名]
def backward_incidence_matrix(self, normalization):
        if normalization[0] == "none":
            mtr_values = tf.to_float(tf.ones_like(self.sender_indices))
            message_indices = tf.range(self.edge_count)

            mtr_indices = tf.to_int64(tf.transpose(tf.stack([self.sender_indices, message_indices])))
            mtr_shape = tf.to_int64(tf.stack([self.vertex_count, self.edge_count]))

            tensor = tf.SparseTensor(indices=mtr_indices,
                                     values=mtr_values,
                                     dense_shape=mtr_shape)

            return tensor
        elif normalization[0] == "global":
            mtr_values = tf.to_float(tf.ones_like(self.sender_indices))
            message_indices = tf.range(self.edge_count)

            mtr_indices = tf.to_int64(tf.transpose(tf.stack([self.sender_indices, message_indices])))
            mtr_shape = tf.to_int64(tf.stack([self.vertex_count, self.edge_count]))

            tensor = tf.sparse_softmax(tf.SparseTensor(indices=mtr_indices,
                                   values=mtr_values,
                                                       dense_shape=mtr_shape))

            return tensor
        elif normalization[0] == "local":
            mtr_values = tf.to_float(tf.ones_like(self.sender_indices))
            message_indices = tf.range(self.edge_count)

            mtr_indices = tf.to_int64(tf.transpose(tf.stack([self.message_types, self.sender_indices, message_indices])))
            mtr_shape = tf.to_int64(tf.stack([self.label_count*2, self.vertex_count, self.edge_count]))

            tensor = tf.sparse_softmax(tf.SparseTensor(indices=mtr_indices,
                                   values=mtr_values,
                                   dense_shape=mtr_shape))

            tensor = tf.sparse_reduce_sum_sparse(tensor, 0)

            return tensor 
开发者ID:MichSchli,项目名称:RelationPrediction,代码行数:41,代码来源:graph_representations.py

示例9: sp_gat_layer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_softmax [as 别名]
def sp_gat_layer(node_features, adj_matrix, in_dim, out_dim, p_drop,
                 is_training, sparse):
  """Single graph attention layer using sparse tensors.

  Args:
    node_features: Sparse Tensor of shape (nb_nodes, in_dim) or SparseTensor.
    adj_matrix: Sparse Tensor.
    in_dim: integer specifying the input feature dimension.
    out_dim: integer specifying the output feature dimension.
    p_drop: dropout probability.
    is_training: boolean, True if the model is being trained, False otherwise
    sparse: True if node features are sparse.

  Returns:
    node_features: tensor of shape (nb_nodes, out_dim). New node
        features obtained from applying one head of attention to input.

  Raises:
  """
  # Linear transform
  node_features = dense(node_features, in_dim, out_dim, p_drop, is_training,
                        sparse)
  # Attention scores
  alpha = sp_compute_adj_att(node_features, adj_matrix)
  alpha = tf.SparseTensor(
      indices=alpha.indices,
      values=tf.nn.leaky_relu(alpha.values),
      dense_shape=alpha.dense_shape)
  alpha = tf.sparse_softmax(alpha)
  alpha = sparse_dropout(alpha, p_drop, is_training)
  node_features = tf.layers.dropout(
      inputs=node_features, rate=p_drop, training=is_training)
  # Compute self-attention features
  node_features = tf.sparse_tensor_dense_matmul(alpha, node_features)
  node_features = tf.contrib.layers.bias_add(node_features)
  return node_features 
开发者ID:google,项目名称:gcnn-survey-paper,代码行数:38,代码来源:model_utils.py

示例10: sp_egat_layer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_softmax [as 别名]
def sp_egat_layer(node_features, adj_matrix, in_dim, out_dim, p_drop,
                  is_training, sparse):
  """Single graph attention layer using sparse tensors.

  Args:
    node_features: Tensor of shape (nb_nodes, in_dim) or SparseTensor.
    adj_matrix: Sparse Tensor.
    in_dim: integer specifying the input feature dimension.
    out_dim: integer specifying the output feature dimension.
    p_drop: dropout probability.
    is_training: boolean, True if the model is being trained, False otherwise
    sparse: True if node features are sparse.

  Returns:
    node_features: tensor of shape (nb_nodes, out_dim). New node
        features obtained from applying one head of attention to input.

  Raises:
  """
  # Linear transform
  node_features = dense(node_features, in_dim, out_dim, p_drop, is_training,
                        sparse)
  # Attention scores
  alpha = sp_compute_adj_att(node_features, adj_matrix)
  alpha = tf.SparseTensor(
      indices=alpha.indices,
      values=tf.nn.leaky_relu(alpha.values),
      dense_shape=alpha.dense_shape)
  alpha = tf.sparse_softmax(alpha)
  alpha = sparse_dropout(alpha, p_drop, is_training)
  node_features = tf.layers.dropout(
      inputs=node_features, rate=p_drop, training=is_training)
  # Compute self-attention features
  node_features = tf.sparse_tensor_dense_matmul(alpha, node_features)
  node_features = tf.contrib.layers.bias_add(node_features)
  return node_features


############################## MULTI LAYERS ############################# 
开发者ID:google,项目名称:gcnn-survey-paper,代码行数:41,代码来源:model_utils.py

示例11: attention_mechanism

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_softmax [as 别名]
def attention_mechanism(features, graph_adj, adj_with_self_loops_indices, coefficient_dropout_prob, weight_decay, name):
    # apply a feedforward network parametrized with a weight vector to the transformed features.
    input_dim = int(features.get_shape()[1])
    a_i = tf.get_variable(f"{name}-att_i", [input_dim, 1], dtype=tf.float32,
                          initializer=tf.glorot_uniform_initializer(),
                          regularizer=slim.l2_regularizer(weight_decay))
    a_j = tf.get_variable(f"{name}-att_j", [input_dim, 1], dtype=tf.float32,
                          initializer=tf.glorot_uniform_initializer(),
                          regularizer=slim.l2_regularizer(weight_decay))
    tf.add_to_collection(ATTENTION_WEIGHTS, a_i)
    tf.add_to_collection(ATTENTION_WEIGHTS, a_j)

    # dims: num_nodes x input_dim, input_dim, 1 -> num_nodes x 1
    att_i = tf.matmul(features, a_i)
    att_i = tf.contrib.layers.bias_add(att_i)
    # dims: num_nodes x input_dim, input_dim, 1 -> num_nodes x 1
    att_j = tf.matmul(features, a_j)
    att_j = tf.contrib.layers.bias_add(att_j)

    # Extracts the relevant attention coefficients with respect to the 1-hop neighbours of each node
    # Method: first extract all the attention coefficients of the left nodes of each edge, then those
    # of the right nodes and add them up.
    # The result is a list of relevant attention weights ordered in the same way as the edges in the
    # sparse adjacency matrix.
    # dims: num_nodes x 1, num_edges, num_nodes x 1, num_edges -> 1 x num_edges x 1
    attention_weights_of_edges = tf.gather(att_i, adj_with_self_loops_indices[0], axis=0) + \
                                 tf.gather(att_j, adj_with_self_loops_indices[1], axis=0)
    # dims: 1 x num_edges x 1 -> num_edges
    attention_weights_of_edges = tf.squeeze(attention_weights_of_edges)

    # blow list of attention weights up into a sparse matrix. Use the coordinates from the original
    # adjacency matrix to specify which attention weight belongs to which edge.
    # Simultaneously applies the LeakyReLU as given in the paper.
    # dims: num_nodes x num_nodes, num_edges -> num_nodes x num_nodes
    attention_weight_matrix = tf.SparseTensor(
        indices=graph_adj.indices,
        values=tf.nn.leaky_relu(attention_weights_of_edges, alpha=0.2),
        dense_shape=graph_adj.dense_shape
    )

    # finish the attention by normalizing coefficients using softmax
    attention_coefficients = tf.sparse_softmax(attention_weight_matrix)

    # apply dropout to attention coefficients, meaning that in every epoch a single node is only exposed to a
    # sampled subset of its neighbour
    attention_coefficients = tf.cond(
        tf.cast(coefficient_dropout_prob, tf.bool),
        true_fn=(lambda: dropout_supporting_sparse_tensors(attention_coefficients, 1.0 - coefficient_dropout_prob)),
        false_fn=(lambda: attention_coefficients)
    )

    return attention_coefficients 
开发者ID:shchur,项目名称:gnn-benchmark,代码行数:54,代码来源:gat.py

示例12: sp_attn_head

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_softmax [as 别名]
def sp_attn_head(seq, out_sz, adj_mat, activation, nb_nodes, in_drop=0.0, coef_drop=0.0, residual=False):
    with tf.name_scope('sp_attn'):
        if in_drop != 0.0:
            seq = tf.nn.dropout(seq, 1.0 - in_drop)

        seq_fts = tf.layers.conv1d(seq, out_sz, 1, use_bias=False)

        # simplest self-attention possible
        f_1 = tf.layers.conv1d(seq_fts, 1, 1)
        f_2 = tf.layers.conv1d(seq_fts, 1, 1)
        
        f_1 = tf.reshape(f_1, (nb_nodes, 1))
        f_2 = tf.reshape(f_2, (nb_nodes, 1))

        f_1 = adj_mat*f_1
        f_2 = adj_mat * tf.transpose(f_2, [1,0])

        logits = tf.sparse_add(f_1, f_2)
        lrelu = tf.SparseTensor(indices=logits.indices, 
                values=tf.nn.leaky_relu(logits.values), 
                dense_shape=logits.dense_shape)
        coefs = tf.sparse_softmax(lrelu)

        if coef_drop != 0.0:
            coefs = tf.SparseTensor(indices=coefs.indices,
                    values=tf.nn.dropout(coefs.values, 1.0 - coef_drop),
                    dense_shape=coefs.dense_shape)
        if in_drop != 0.0:
            seq_fts = tf.nn.dropout(seq_fts, 1.0 - in_drop)

        # As tf.sparse_tensor_dense_matmul expects its arguments to have rank-2,
        # here we make an assumption that our input is of batch size 1, and reshape appropriately.
        # The method will fail in all other cases!
        coefs = tf.sparse_reshape(coefs, [nb_nodes, nb_nodes])
        seq_fts = tf.squeeze(seq_fts)
        vals = tf.sparse_tensor_dense_matmul(coefs, seq_fts)
        vals = tf.expand_dims(vals, axis=0)
        vals.set_shape([1, nb_nodes, out_sz])
        ret = tf.contrib.layers.bias_add(vals)

        # residual connection
        if residual:
            if seq.shape[-1] != ret.shape[-1]:
                ret = ret + conv1d(seq, ret.shape[-1], 1) # activation
            else:
                ret = ret + seq

        return activation(ret)  # activation 
开发者ID:didi,项目名称:hetsann,代码行数:50,代码来源:layers.py

示例13: sp_attn_head

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_softmax [as 别名]
def sp_attn_head(seq, out_sz, adj_mat, activation, nb_nodes, in_drop=0.0, coef_drop=0.0, residual=False):
    with tf.name_scope('sp_attn'):
        if in_drop != 0.0:
            seq = tf.nn.dropout(seq, 1.0 - in_drop)

        seq_fts = tf.layers.conv1d(seq, out_sz, 1, use_bias=False)

        # simplest self-attention possible
        f_1 = tf.layers.conv1d(seq_fts, 1, 1)
        f_2 = tf.layers.conv1d(seq_fts, 1, 1)
        logits = tf.sparse_add(adj_mat * f_1, adj_mat *
                               tf.transpose(f_2, [0, 2, 1]))
        lrelu = tf.SparseTensor(indices=logits.indices,
                                values=tf.nn.leaky_relu(logits.values),
                                dense_shape=logits.dense_shape)
        coefs = tf.sparse_softmax(lrelu)

        if coef_drop != 0.0:
            coefs = tf.SparseTensor(indices=coefs.indices,
                                    values=tf.nn.dropout(
                                        coefs.values, 1.0 - coef_drop),
                                    dense_shape=coefs.dense_shape)
        if in_drop != 0.0:
            seq_fts = tf.nn.dropout(seq_fts, 1.0 - in_drop)

        # As tf.sparse_tensor_dense_matmul expects its arguments to have rank-2,
        # here we make an assumption that our input is of batch size 1, and reshape appropriately.
        # The method will fail in all other cases!
        coefs = tf.sparse_reshape(coefs, [nb_nodes, nb_nodes])
        seq_fts = tf.squeeze(seq_fts)
        vals = tf.sparse_tensor_dense_matmul(coefs, seq_fts)
        vals = tf.expand_dims(vals, axis=0)
        vals.set_shape([1, nb_nodes, out_sz])
        ret = tf.contrib.layers.bias_add(vals)

        # residual connection
        if residual:
            if seq.shape[-1] != ret.shape[-1]:
                ret = ret + conv1d(seq, ret.shape[-1], 1)  # activation
            else:
                seq_fts = ret + seq

        return activation(ret)  # activation


# final_embed, att_val = layers.SimpleAttLayer(multi_embed, mp_att_size,
#                                                      time_major=False,
#                                                      return_alphas=True) 
开发者ID:BUPTDM,项目名称:OpenHINE,代码行数:50,代码来源:GAT.py


注:本文中的tensorflow.sparse_softmax方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。