本文整理汇总了Python中tensorflow.sparse_reorder方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.sparse_reorder方法的具体用法?Python tensorflow.sparse_reorder怎么用?Python tensorflow.sparse_reorder使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.sparse_reorder方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_architecture
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_reorder [as 别名]
def get_architecture():
inputs_ph = tf.placeholder(
dtype=tf.float32, shape=[None, FLAGS.features_dim], name="features_")
support_ph = tf.sparse_placeholder(
dtype=tf.float32, shape=[None, None], name="support_")
tf.logging.info("Reordering indices of support - this is extremely "
"important as sparse operations assume sparse indices have "
"been ordered.")
support_reorder = tf.sparse_reorder(support_ph)
rgat_layer = RGAT(units=FLAGS.units, relations=FLAGS.relations)
outputs = rgat_layer(inputs=inputs_ph, support=support_reorder)
return inputs_ph, support_ph, outputs
示例2: init_placeholders
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_reorder [as 别名]
def init_placeholders(self):
if self.input_type == 'dense':
self.train_x = tf.placeholder(tf.float32, shape=[None, self.n_features], name='x')
else:
with tf.name_scope('sparse_placeholders') as scope:
self.raw_indices = tf.placeholder(tf.int64, shape=[None, 2], name='raw_indices')
self.raw_values = tf.placeholder(tf.float32, shape=[None], name='raw_data')
self.raw_shape = tf.placeholder(tf.int64, shape=[2], name='raw_shape')
# tf.sparse_reorder is not needed since scipy return COO in canonical order
self.train_x = tf.SparseTensor(self.raw_indices, self.raw_values, self.raw_shape)
self.train_y = tf.placeholder(tf.float32, shape=[None], name='Y')
示例3: _generate_sketch_matrix
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_reorder [as 别名]
def _generate_sketch_matrix(rand_h, rand_s, output_dim):
"""
Return a sparse matrix used for tensor sketch operation in compact bilinear
pooling
Args:
rand_h: an 1D numpy array containing indices in interval `[0, output_dim)`.
rand_s: an 1D numpy array of 1 and -1, having the same shape as `rand_h`.
output_dim: the output dimensions of compact bilinear pooling.
Returns:
a sparse matrix of shape [input_dim, output_dim] for tensor sketch.
"""
# Generate a sparse matrix for tensor count sketch
rand_h = rand_h.astype(np.int64)
rand_s = rand_s.astype(np.float32)
assert(rand_h.ndim==1 and rand_s.ndim==1 and len(rand_h)==len(rand_s))
assert(np.all(rand_h >= 0) and np.all(rand_h < output_dim))
input_dim = len(rand_h)
indices = np.concatenate((np.arange(input_dim)[..., np.newaxis],
rand_h[..., np.newaxis]), axis=1)
sparse_sketch_matrix = tf.sparse_reorder(
tf.SparseTensor(indices, rand_s, [input_dim, output_dim]))
return sparse_sketch_matrix
示例4: testAlreadyInOrder
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_reorder [as 别名]
def testAlreadyInOrder(self):
with self.test_session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6(np.arange(6))
sp_output = tf.sparse_reorder(input_val)
output_val = sess.run(sp_output)
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.shape, input_val.shape)
示例5: testFeedAlreadyInOrder
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_reorder [as 别名]
def testFeedAlreadyInOrder(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6(np.arange(6))
sp_output = tf.sparse_reorder(sp_input)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.shape, input_val.shape)
示例6: testOutOfOrder
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_reorder [as 别名]
def testOutOfOrder(self):
expected_output_val = self._SparseTensorValue_5x6(np.arange(6))
with self.test_session(use_gpu=False) as sess:
for _ in range(5): # To test various random permutations
input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
sp_output = tf.sparse_reorder(input_val)
output_val = sess.run(sp_output)
self.assertAllEqual(output_val.indices, expected_output_val.indices)
self.assertAllEqual(output_val.values, expected_output_val.values)
self.assertAllEqual(output_val.shape, expected_output_val.shape)
示例7: testFeedOutOfOrder
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_reorder [as 别名]
def testFeedOutOfOrder(self):
expected_output_val = self._SparseTensorValue_5x6(np.arange(6))
with self.test_session(use_gpu=False) as sess:
for _ in range(5): # To test various random permutations
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
sp_output = tf.sparse_reorder(sp_input)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, expected_output_val.indices)
self.assertAllEqual(output_val.values, expected_output_val.values)
self.assertAllEqual(output_val.shape, expected_output_val.shape)
示例8: testGradients
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_reorder [as 别名]
def testGradients(self):
with self.test_session(use_gpu=False):
for _ in range(5): # To test various random permutations
input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
sp_input = tf.SparseTensor(
input_val.indices, input_val.values, input_val.shape)
sp_output = tf.sparse_reorder(sp_input)
err = tf.test.compute_gradient_error(
sp_input.values,
input_val.values.shape,
sp_output.values,
input_val.values.shape,
x_init_value=input_val.values)
self.assertLess(err, 1e-11)
示例9: build_sparse_matrix_softmax
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_reorder [as 别名]
def build_sparse_matrix_softmax(self, idx_non_zero_values, X, dense_shape_A):
A = tf.SparseTensorValue(idx_non_zero_values, tf.squeeze(X), dense_shape_A)
A = tf.sparse_reorder(A) # n_edges x n_edges
A = tf.sparse_softmax(A)
return A
示例10: _build_sparse_matrix
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_reorder [as 别名]
def _build_sparse_matrix(L):
L = L.tocoo()
indices = np.column_stack((L.row, L.col))
L = tf.SparseTensor(indices, L.data, L.shape)
return tf.sparse_reorder(L)
示例11: chebyshev5
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_reorder [as 别名]
def chebyshev5(self, x, L, Fout, K):
N, M, Fin = x.get_shape()
N, M, Fin = int(N), int(M), int(Fin)
# Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
L = scipy.sparse.csr_matrix(L)
L = graph.rescale_L(L, lmax=2)
L = L.tocoo()
indices = np.column_stack((L.row, L.col))
L = tf.SparseTensor(indices, L.data, L.shape)
L = tf.sparse_reorder(L)
# Transform to Chebyshev basis
x0 = tf.transpose(x, perm=[1, 2, 0]) # M x Fin x N
x0 = tf.reshape(x0, [M, Fin*N]) # M x Fin*N
x = tf.expand_dims(x0, 0) # 1 x M x Fin*N
def concat(x, x_):
x_ = tf.expand_dims(x_, 0) # 1 x M x Fin*N
return tf.concat(0, [x, x_]) # K x M x Fin*N
if K > 1:
x1 = tf.sparse_tensor_dense_matmul(L, x0)
x = concat(x, x1)
for k in range(2, K):
x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0 # M x Fin*N
x = concat(x, x2)
x0, x1 = x1, x2
x = tf.reshape(x, [K, M, Fin, N]) # K x M x Fin x N
x = tf.transpose(x, perm=[3,1,2,0]) # N x M x Fin x K
x = tf.reshape(x, [N*M, Fin*K]) # N*M x Fin*K
# Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair.
W = self._weight_variable([Fin*K, Fout], regularization=False)
x = tf.matmul(x, W) # N*M x Fout
return tf.reshape(x, [N, M, Fout]) # N x M x Fout
示例12: _generate_sketch_matrix
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_reorder [as 别名]
def _generate_sketch_matrix(rand_h, rand_s, output_dim):
"""
Return a sparse matrix used for tensor sketch operation in compact bilinear
pooling
Args:
rand_h: an 1D numpy array containing indices in interval `[0, output_dim)`.
rand_s: an 1D numpy array of 1 and -1, having the same shape as `rand_h`.
output_dim: the output dimensions of compact bilinear pooling.
Returns:
a sparse matrix of shape [input_dim, output_dim] for tensor sketch.
"""
# Generate a sparse matrix for tensor count sketch
rand_h = rand_h.astype(np.int64)
rand_s = rand_s.astype(np.float32)
assert(rand_h.ndim==1 and rand_s.ndim==1 and len(rand_h)==len(rand_s))
assert(np.all(rand_h >= 0) and np.all(rand_h < output_dim))
input_dim = len(rand_h)
indices = np.concatenate((np.arange(input_dim)[..., np.newaxis],
rand_h[..., np.newaxis]), axis=1)
sparse_sketch_matrix = tf.sparse_reorder(
tf.SparseTensor(indices, rand_s, [input_dim, output_dim]))
return sparse_sketch_matrix
示例13: sparse_transpose
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_reorder [as 别名]
def sparse_transpose(sp_input):
transposed_indices = tf.reverse(tf.cast(sp_input.indices, tf.int32), [False, True])
transposed_values = sp_input.values
transposed_shape = tf.reverse(tf.cast(sp_input.shape, tf.int32), [True])
sp_output = tf.SparseTensor(tf.cast(transposed_indices, tf.int64), transposed_values, tf.cast(transposed_shape, tf.int64))
sp_output = tf.sparse_reorder(sp_output)
return sp_output
示例14: cheby_conv
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_reorder [as 别名]
def cheby_conv(x, L, lmax, feat_out, K, W):
'''
x : [batch_size, N_node, feat_in] - input of each time step
nSample : number of samples = batch_size
nNode : number of node in graph
feat_in : number of input feature
feat_out : number of output feature
L : laplacian
lmax : ?
K : size of kernel(number of cheby coefficients)
W : cheby_conv weight [K * feat_in, feat_out]
'''
nSample, nNode, feat_in = x.get_shape()
nSample, nNode, feat_in = int(nSample), int(nNode), int(feat_in)
L = graph.rescale_L(L, lmax) #What is this operation?? --> rescale Laplacian
L = L.tocoo()
indices = np.column_stack((L.row, L.col))
L = tf.SparseTensor(indices, L.data, L.shape)
L = tf.sparse_reorder(L)
x0 = tf.transpose(x, perm=[1, 2, 0]) #change it to [nNode, feat_in, nSample]
x0 = tf.reshape(x0, [nNode, feat_in*nSample])
x = tf.expand_dims(x0, 0) # make it [1, nNode, feat_in*nSample]
def concat(x, x_):
x_ = tf.expand_dims(x_, 0)
return tf.concat([x, x_], axis=0)
if K > 1:
x1 = tf.sparse_tensor_dense_matmul(L, x0)
x = concat(x, x1)
for k in range(2, K):
x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0
x = concat(x, x2)
x0, x1 = x1, x2
x = tf.reshape(x, [K, nNode, feat_in, nSample])
x = tf.transpose(x, perm=[3,1,2,0])
x = tf.reshape(x, [nSample*nNode, feat_in*K])
x = tf.matmul(x, W) #No Bias term?? -> Yes
out = tf.reshape(x, [nSample, nNode, feat_out])
return out
# gconvLSTM
示例15: graph_conv_chebyshev
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_reorder [as 别名]
def graph_conv_chebyshev(self, x, L, K, F_out):
"""
Graph convolutional operation.
"""
# K = Chebyshev polynomial order & support size
# F_out = No. of output features (per vertex)
# B = Batch size
# V = No. of vertices
# F_in = No. of input features (per vertex)
B, V, F_in = x.get_shape()
B, V, F_in = int(B), int(V), int(F_in)
# Rescale Laplacian and store as a TF sparse tensor (copy to not modify the shared L)
L = scipy.sparse.csr_matrix(L)
L = graph.rescale_L(L, lmax=2)
L = L.tocoo()
indices = np.column_stack((L.row, L.col))
L = tf.SparseTensor(indices, L.data, L.shape)
L = tf.sparse_reorder(L)
L = tf.cast(L, tf.float32)
# Transform to Chebyshev basis
x0 = tf.transpose(x, perm=[1, 2, 0]) # V x F_in x B
x0 = tf.reshape(x0, [V, F_in * B]) # V x F_in*B
x = tf.expand_dims(x0, 0) # 1 x V x F_in*B
def concat(x, x_):
x_ = tf.expand_dims(x_, 0) # 1 x V x F_in*B
return tf.concat([x, x_], axis=0) # K x V x F_in*B
if K > 1:
x1 = tf.sparse_tensor_dense_matmul(L, x0)
x = concat(x, x1)
for k in range(2, K):
x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0 # V x F_in*B
x = concat(x, x2)
x0, x1 = x1, x2
x = tf.reshape(x, [K, V, F_in, B]) # K x V x F_in x B
x = tf.transpose(x, perm=[3, 1, 2, 0]) # B x V x F_in x K
x = tf.reshape(x, [B * V, F_in * K]) # B*V x F_in*K
# Compose linearly F_in features to get F_out features
W = tf.Variable(tf.truncated_normal([F_in * K, F_out], stddev=0.1), name="W")
x = tf.matmul(x, W) # B*V x F_out
x = tf.reshape(x, [B, V, F_out]) # B x V x F_out
return x