本文整理汇总了Python中tensorflow.sparse_tensor_dense_matmul方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.sparse_tensor_dense_matmul方法的具体用法?Python tensorflow.sparse_tensor_dense_matmul怎么用?Python tensorflow.sparse_tensor_dense_matmul使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.sparse_tensor_dense_matmul方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _build_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_dense_matmul [as 别名]
def _build_model(self):
# define initial relation features
if self.use_context or (self.use_path and self.path_type == 'rnn'):
self._build_relation_feature()
self.scores = 0.0
if self.use_context:
edges_list, mask_list = self._get_neighbors_and_masks(self.labels, self.entity_pairs, self.train_edges)
self.aggregators = self._get_neighbor_aggregators() # define aggregators for each layer
self.aggregated_neighbors = self._aggregate_neighbors(edges_list, mask_list) # [batch_size, n_relations]
self.scores += self.aggregated_neighbors
if self.use_path:
if self.path_type == 'embedding':
self.W, self.b = self._get_weight_and_bias(self.n_paths, self.n_relations) # [batch_size, n_relations]
self.scores += tf.sparse_tensor_dense_matmul(self.path_features, self.W) + self.b
elif self.path_type == 'rnn':
rnn_output = self._rnn(self.path_ids) # [batch_size, path_samples, n_relations]
self.scores += self._aggregate_paths(rnn_output)
# narrow the range of scores to [0, 1] for the ease of calculating ranking-based metrics
self.scores_normalized = tf.sigmoid(self.scores)
示例2: matmul_wrapper
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_dense_matmul [as 别名]
def matmul_wrapper(A, B, optype):
"""Wrapper for handling sparse and dense versions of `tf.matmul` operation.
Parameters
----------
A : tf.Tensor
B : tf.Tensor
optype : str, {'dense', 'sparse'}
Returns
-------
tf.Tensor
"""
with tf.name_scope('matmul_wrapper') as scope:
if optype == 'dense':
return tf.matmul(A, B)
elif optype == 'sparse':
return tf.sparse_tensor_dense_matmul(A, B)
else:
raise NameError('Unknown input type in matmul_wrapper')
示例3: __build
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_dense_matmul [as 别名]
def __build(self):
w_init = tf.contrib.layers.xavier_initializer
sizes = [self.D] + self.n_hidden
for i in range(1, len(sizes)):
W = tf.get_variable(name='W{}'.format(i), shape=[sizes[i - 1], sizes[i]], dtype=tf.float32,
initializer=w_init())
b = tf.get_variable(name='b{}'.format(i), shape=[sizes[i]], dtype=tf.float32, initializer=w_init())
if i == 1:
encoded = tf.sparse_tensor_dense_matmul(self.X, W) + b
else:
encoded = tf.matmul(encoded, W) + b
encoded = tf.nn.relu(encoded)
W_mu = tf.get_variable(name='W_mu', shape=[sizes[-1], self.L], dtype=tf.float32, initializer=w_init())
b_mu = tf.get_variable(name='b_mu', shape=[self.L], dtype=tf.float32, initializer=w_init())
self.mu = tf.matmul(encoded, W_mu) + b_mu
W_sigma = tf.get_variable(name='W_sigma', shape=[sizes[-1], self.L], dtype=tf.float32, initializer=w_init())
b_sigma = tf.get_variable(name='b_sigma', shape=[self.L], dtype=tf.float32, initializer=w_init())
log_sigma = tf.matmul(encoded, W_sigma) + b_sigma
self.sigma = tf.nn.elu(log_sigma) + 1 + 1e-14
示例4: aggregate_maxpool
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_dense_matmul [as 别名]
def aggregate_maxpool(features, agg_transform_size, adj_with_self_loops_indices, num_features, name):
with tf.name_scope(name):
fc_weights = tf.get_variable(f"{name}-fc_weights",
shape=[num_features, agg_transform_size],
dtype=tf.float32,
initializer=tf.glorot_uniform_initializer(),
)
# dims: num_nodes x num_features, num_features x agg_transform_size -> num_nodes x agg_transform_size
if isinstance(features, tf.SparseTensor):
transformed_features = tf.sparse_tensor_dense_matmul(features, fc_weights)
else:
transformed_features = tf.matmul(features, fc_weights)
transformed_features = tf.nn.relu(transformed_features)
# Spread out the transformed features to neighbours.
# dims: num_nodes x agg_transform_size, num_nodes x max_degree -> num_nodes x agg_transform_size x max_degree
neighbours_features = tf.gather(transformed_features, adj_with_self_loops_indices)
# employ the max aggregator
output = tf.reduce_max(neighbours_features, axis=1)
return output
# dims:
# features: num_nodes x num_features
示例5: optimize
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_dense_matmul [as 别名]
def optimize(self, learning_rate, global_step):
if self.prop_type == 'vanilla':
# dims: num_nodes x num_nodes, num_nodes x num_labels, num_nodes -> num_nodes x num_labels
new_predicted_labels = tf.sparse_tensor_dense_matmul(self.graph_adj, self.predicted_labels) / self.degrees
# set entries where we have a label to zero...
new_predicted_labels *= self._get_labelled_nodes_mask()
# ... and add already known labels
new_predicted_labels += self.initial_predicted_labels
else:
new_predicted_labels = (1 - self.return_prob) * tf.sparse_tensor_dense_matmul(self.graph_adj,
self.predicted_labels) \
+ self.return_prob * self.initial_predicted_labels
# update predictions variable
predicted_labels_update_op = self.predicted_labels.assign(new_predicted_labels)
return predicted_labels_update_op, global_step.assign_add(1)
示例6: fully_connected_layer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_dense_matmul [as 别名]
def fully_connected_layer(inputs, output_dim, activation_fn, dropout_prob, weight_decay, name):
with tf.name_scope(name):
input_dim = int(inputs.get_shape()[1])
weights = tf.get_variable("%s-weights" % name, [input_dim, output_dim], dtype=tf.float32,
initializer=tf.glorot_uniform_initializer(),
regularizer=slim.l2_regularizer(weight_decay))
# Apply dropout to inputs if required
inputs = tf.cond(
tf.cast(dropout_prob, tf.bool),
true_fn=(lambda: dropout_supporting_sparse_tensors(inputs, 1 - dropout_prob)),
false_fn=(lambda: inputs),
)
if isinstance(inputs, tf.SparseTensor):
output = tf.sparse_tensor_dense_matmul(inputs, weights)
else:
output = tf.matmul(inputs, weights)
output = tf.contrib.layers.bias_add(output)
return activation_fn(output) if activation_fn else output
示例7: testShapeInference
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_dense_matmul [as 别名]
def testShapeInference(self):
x = np.random.rand(10, 10)
x[np.abs(x) < 0.5] = 0 # Make it sparse
y = np.random.randn(10, 20)
x_indices = np.vstack(np.where(x)).astype(np.int64).T
x_values = x[np.where(x)]
x_shape = x.shape
x_st = tf.SparseTensor(x_indices, x_values, x_shape)
result = tf.sparse_tensor_dense_matmul(x_st, y)
self.assertEqual(result.get_shape(), (10, 20))
x_shape_unknown = tf.placeholder(dtype=tf.int64, shape=None)
x_st_shape_unknown = tf.SparseTensor(x_indices, x_values, x_shape_unknown)
result_left_shape_unknown = tf.sparse_tensor_dense_matmul(
x_st_shape_unknown, y)
self.assertEqual(
result_left_shape_unknown.get_shape().as_list(), [None, 20])
x_shape_inconsistent = [10, 15]
x_st_shape_inconsistent = tf.SparseTensor(
x_indices, x_values, x_shape_inconsistent)
with self.assertRaisesRegexp(ValueError, "Dimensions must be equal"):
tf.sparse_tensor_dense_matmul(x_st_shape_inconsistent, y)
# Tests setting one dimension to be a high value.
示例8: _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_dense_matmul [as 别名]
def _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(
x_ind, x_val, x_shape, y, adjoint_a, adjoint_b):
sp_x = tf.SparseTensor(indices=x_ind, values=x_val, shape=x_shape)
def body(t, prev):
with tf.control_dependencies([prev]):
return (t + 1,
sparse_ops.sparse_tensor_dense_matmul(
sp_x, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b))
t0 = tf.constant(0)
v0 = tf.constant(0.0)
def _timeit(iterations, _):
(_, final) = tf.while_loop(
lambda t, _: t < iterations, body, (t0, v0),
parallel_iterations=1, back_prop=False)
return [final]
return _timeit
示例9: _testGradients
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_dense_matmul [as 别名]
def _testGradients(self, adjoint_a, adjoint_b, name, np_dtype):
n, k, m = np.random.randint(1, 10, size=3)
sp_t, nnz = self._randomTensor(
[n, k], np_dtype, adjoint=adjoint_a, sparse=True)
dense_t = self._randomTensor([k, m], np_dtype, adjoint=adjoint_b)
matmul = tf.sparse_tensor_dense_matmul(
sp_t, dense_t, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name=name)
with self.test_session(use_gpu=True):
dense_t_shape = [m, k] if adjoint_b else [k, m]
sp_t_val_shape = [nnz]
err = tf.test.compute_gradient_error([dense_t, sp_t.values],
[dense_t_shape, sp_t_val_shape],
matmul, [n, m])
print("%s gradient err = %s" % (name, err))
self.assertLess(err, 1e-3)
示例10: node_attention
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_dense_matmul [as 别名]
def node_attention(inputs, adj, return_weights=False):
hidden_size = inputs.shape[-1].value
H_v = tf.Variable(tf.random_normal([hidden_size, 1], stddev=0.1))
# convert adj to sparse tensor
zero = tf.constant(0, dtype=tf.float32)
where = tf.not_equal(adj, zero)
indices = tf.where(where)
values = tf.gather_nd(adj, indices)
adj = tf.SparseTensor(indices=indices,
values=values,
dense_shape=adj.shape)
with tf.name_scope('v'):
v = adj * tf.squeeze(tf.tensordot(inputs, H_v, axes=1))
weights = tf.sparse_softmax(v, name='alphas') # [nodes,nodes]
output = tf.sparse_tensor_dense_matmul(weights, inputs)
if not return_weights:
return output
else:
return output, weights
# view-level attention (equation (4) in SemiGNN)
示例11: _create_gcn_embed
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_dense_matmul [as 别名]
def _create_gcn_embed(self):
A_fold_hat = self._split_A_hat(self.norm_adj)
embeddings = tf.concat([self.weights['user_embedding'], self.weights['item_embedding']], axis=0)
all_embeddings = [embeddings]
for k in range(0, self.n_layers):
temp_embed = []
for f in range(self.n_fold):
temp_embed.append(tf.sparse_tensor_dense_matmul(A_fold_hat[f], embeddings))
embeddings = tf.concat(temp_embed, 0)
embeddings = tf.nn.leaky_relu(tf.matmul(embeddings, self.weights['W_gc_%d' %k]) + self.weights['b_gc_%d' %k])
embeddings = tf.nn.dropout(embeddings, 1 - self.mess_dropout[k])
all_embeddings += [embeddings]
all_embeddings = tf.concat(all_embeddings, 1)
u_g_embeddings, i_g_embeddings = tf.split(all_embeddings, [self.n_users, self.n_items], 0)
return u_g_embeddings, i_g_embeddings
示例12: _create_gcmc_embed
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_dense_matmul [as 别名]
def _create_gcmc_embed(self):
A_fold_hat = self._split_A_hat(self.norm_adj)
embeddings = tf.concat([self.weights['user_embedding'], self.weights['item_embedding']], axis=0)
all_embeddings = []
for k in range(0, self.n_layers):
temp_embed = []
for f in range(self.n_fold):
temp_embed.append(tf.sparse_tensor_dense_matmul(A_fold_hat[f], embeddings))
embeddings = tf.concat(temp_embed, 0)
# convolutional layer.
embeddings = tf.nn.leaky_relu(tf.matmul(embeddings, self.weights['W_gc_%d' % k]) + self.weights['b_gc_%d' % k])
# dense layer.
mlp_embeddings = tf.matmul(embeddings, self.weights['W_mlp_%d' %k]) + self.weights['b_mlp_%d' %k]
mlp_embeddings = tf.nn.dropout(mlp_embeddings, 1 - self.mess_dropout[k])
all_embeddings += [mlp_embeddings]
all_embeddings = tf.concat(all_embeddings, 1)
u_g_embeddings, i_g_embeddings = tf.split(all_embeddings, [self.n_users, self.n_items], 0)
return u_g_embeddings, i_g_embeddings
示例13: connect_representation_graph
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_dense_matmul [as 别名]
def connect_representation_graph(self, tf_features, n_components, n_features, node_name_ending):
# Infer ReLU layer size if necessary
if self.relu_size is None:
relu_size = 4 * n_components
else:
relu_size = self.relu_size
# Create variable nodes
tf_relu_weights = tf.Variable(tf.random_normal([n_features, relu_size], stddev=.5),
name='relu_weights_{}'.format(node_name_ending))
tf_relu_biases = tf.Variable(tf.zeros([1, relu_size]),
name='relu_biases_{}'.format(node_name_ending))
tf_linear_weights = tf.Variable(tf.random_normal([relu_size, n_components], stddev=.5),
name='linear_weights_{}'.format(node_name_ending))
# Create ReLU layer
tf_relu = tf.nn.relu(tf.add(tf.sparse_tensor_dense_matmul(tf_features, tf_relu_weights),
tf_relu_biases))
tf_repr = tf.matmul(tf_relu, tf_linear_weights)
# Return repr layer and variables
return tf_repr, [tf_relu_weights, tf_linear_weights, tf_relu_biases]
示例14: project_biases
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_dense_matmul [as 别名]
def project_biases(tf_features, n_features):
"""
Projects the biases from the feature space to calculate bias per actor
:param tf_features:
:param n_features:
:return:
"""
tf_feature_biases = tf.Variable(tf.zeros([n_features, 1]))
# The reduce sum is to perform a rank reduction
tf_projected_biases = tf.reduce_sum(
tf.sparse_tensor_dense_matmul(tf_features, tf_feature_biases),
axis=1
)
return tf_feature_biases, tf_projected_biases
示例15: _call
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_dense_matmul [as 别名]
def _call(self, inputs):
# vecs: input feature of the current layer.
# adj_partition_list: the row partitions of the full graph adj
# (only used in full-batch evaluation on the val/test sets)
vecs, adj_norm, len_feat, adj_partition_list, _ = inputs
vecs = tf.nn.dropout(vecs, 1-self.dropout)
vecs_hop = [tf.identity(vecs) for o in range(self.order+1)]
for o in range(self.order):
for a in range(o+1):
ans1 = tf.sparse_tensor_dense_matmul(adj_norm,vecs_hop[o+1])
ans_partition = [tf.sparse_tensor_dense_matmul(adj,vecs_hop[o+1]) for adj in adj_partition_list]
ans2 = tf.concat(ans_partition,0)
vecs_hop[o+1]=tf.cond(self.is_train,lambda: tf.identity(ans1),lambda: tf.identity(ans2))
vecs_hop = [self._F_nonlinear(v,o) for o,v in enumerate(vecs_hop)]
if self.aggr == 'mean':
ret = vecs_hop[0]
for o in range(len(vecs_hop)-1):
ret += vecs_hop[o+1]
elif self.aggr == 'concat':
ret = tf.concat(vecs_hop,axis=1)
else:
raise NotImplementedError
return ret