本文整理汇总了Python中tensorflow.batch_matmul方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.batch_matmul方法的具体用法?Python tensorflow.batch_matmul怎么用?Python tensorflow.batch_matmul使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.batch_matmul方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: transition
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import batch_matmul [as 别名]
def transition(h,share=None):
# compute A,B,o linearization matrices
with tf.variable_scope("trans",reuse=share):
for l in range(2):
h=ReLU(h,100,"l"+str(l))
with tf.variable_scope("A"):
v,r=tf.split(1,2,linear(h,z_dim*2))
v1=tf.expand_dims(v,-1) # (batch, z_dim, 1)
rT=tf.expand_dims(r,1) # batch, 1, z_dim
I=tf.diag([1.]*z_dim)
A=(I+tf.batch_matmul(v1,rT)) # (z_dim, z_dim) + (batch, z_dim, 1)*(batch, 1, z_dim) (I is broadcasted)
with tf.variable_scope("B"):
B=linear(h,z_dim*u_dim)
B=tf.reshape(B,[-1,z_dim,u_dim])
with tf.variable_scope("o"):
o=linear(h,z_dim)
return A,B,o,v,r
示例2: transition
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import batch_matmul [as 别名]
def transition(h):
# compute A,B,o linearization matrices
with tf.variable_scope("trans"):
for l in range(2):
h=ReLU(h,100,"l"+str(l))
with tf.variable_scope("A"):
v,r=tf.split(1,2,linear(h,z_dim*2))
v1=tf.expand_dims(v,-1) # (batch, z_dim, 1)
rT=tf.expand_dims(r,1) # batch, 1, z_dim
I=tf.diag([1.]*z_dim)
A=(I+tf.batch_matmul(v1,rT)) # (z_dim, z_dim) + (batch, z_dim, 1)*(batch, 1, z_dim) (I is broadcasted)
with tf.variable_scope("B"):
B=linear(h,z_dim*u_dim)
B=tf.reshape(B,[-1,z_dim,u_dim])
with tf.variable_scope("o"):
o=linear(h,z_dim)
return A,B,o,v,r
示例3: gesd_similarity
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import batch_matmul [as 别名]
def gesd_similarity(a, b):
a = tf.nn.l2_normalize(a, dim=1)
b = tf.nn.l2_normalize(b, dim=1)
euclidean = tf.sqrt(tf.reduce_sum((a - b) ** 2, 1))
mm = tf.reshape(
tf.batch_matmul(
tf.reshape(a, [-1, 1, tf.shape(a)[1]]),
tf.transpose(
tf.reshape(b, [-1, 1, tf.shape(a)[1]]),
[0, 2, 1]
)
),
[-1]
)
sigmoid_dot = tf.exp(-1 * (mm + 1))
return 1.0 / (1.0 + euclidean) * 1.0 / (1.0 + sigmoid_dot)
示例4: __call__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import batch_matmul [as 别名]
def __call__(self, input_data):
batch_size = input_data.get_shape()[0].value
num_steps = input_data.get_shape()[1].value
memory_matrix = []
for step in range(num_steps):
left_num = tf.maximum(0, step + 1 - self._memory_size)
right_num = num_steps - step - 1
mem = self._memory_weights[tf.minimum(step, self._memory_size)::-1]
d_batch = tf.pad(mem, [[left_num, right_num]])
memory_matrix.append([d_batch])
memory_matrix = tf.concat(0, memory_matrix)
h_hatt = tf.batch_matmul([memory_matrix] * batch_size, input_data)
h = tf.batch_matmul(input_data, [self._W1] * batch_size)
h += tf.batch_matmul(h_hatt, [self._W2] * batch_size) + self._bias
return h
示例5: channel_wise_fc_layer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import batch_matmul [as 别名]
def channel_wise_fc_layer(bottom, name, bias=True):
"""
channel-wise fully connected layer
"""
_, width, height, n_feat_map = bottom.get_shape().as_list()
input_reshape = tf.reshape( bottom, [-1, width*height, n_feat_map] ) # order='C'
input_transpose = tf.transpose( input_reshape, [2,0,1] ) # n_feat_map * batch * d
with tf.variable_scope(name):
W = tf.get_variable(
"W",
shape=[n_feat_map,width*height, width*height], # n_feat_map * d * d_filter
initializer=tf.truncated_normal_initializer(0., 0.005))
output = tf.batch_matmul(input_transpose, W) # n_feat_map * batch * d_filter
if bias == True:
b = tf.get_variable(
"b",
shape=width*height,
initializer=tf.constant_initializer(0.))
output = tf.nn.bias_add(output, b)
output_transpose = tf.transpose(output, [1,2,0]) # batch * d_filter * n_feat_map
output_reshape = tf.reshape( output_transpose, [-1, width, height, n_feat_map] )
return output_reshape
示例6: channel_wise_fc_layer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import batch_matmul [as 别名]
def channel_wise_fc_layer(bottom, name, bias=True):
"""
channel-wise fully connected layer
"""
_, width, height, n_feat_map = bottom.get_shape().as_list()
input_reshape = tf.reshape( bottom, [-1, width*height, n_feat_map] ) # order='C'
input_transpose = tf.transpose( input_reshape, [2,0,1] ) # n_feat_map * batch * d
with tf.variable_scope(name):
W = tf.get_variable(
"W",
shape=[n_feat_map,width*height, width*height], # n_feat_map * d * d_filter
initializer=tf.truncated_normal_initializer(0., 0.005))
output = tf.batch_matmul(input_transpose, W) # n_feat_map * batch * d_filter
if bias == True:
b = tf.get_variable(
"b",
shape=width*height,
initializer=tf.constant_initializer(0.))
output = tf.nn.bias_add(output, b)
output_transpose = tf.transpose(output, [1,2,0]) # batch * d_filter * n_feat_map
output_reshape = tf.reshape( output_transpose, [-1, width, height, n_feat_map] )
return output_reshape
示例7: test_MatMul
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import batch_matmul [as 别名]
def test_MatMul(self):
t = tf.matmul(*self.random((4, 3), (3, 5)), transpose_a=False, transpose_b=False)
self.check(t)
t = tf.matmul(*self.random((3, 4), (3, 5)), transpose_a=True, transpose_b=False)
self.check(t)
t = tf.matmul(*self.random((4, 3), (5, 3)), transpose_a=False, transpose_b=True)
self.check(t)
t = tf.matmul(*self.random((3, 4), (5, 3)), transpose_a=True, transpose_b=True)
self.check(t)
# def test_BatchMatMul(self):
# t = tf.batch_matmul(*self.random((2, 4, 4, 3), (2, 4, 3, 5)), adj_x=False, adj_y=False)
# self.check(t)
# t = tf.batch_matmul(*self.random((2, 4, 3, 4), (2, 4, 3, 5)), adj_x=True, adj_y=False)
# self.check(t)
# t = tf.batch_matmul(*self.random((2, 4, 4, 3), (2, 4, 5, 3)), adj_x=False, adj_y=True)
# self.check(t)
# t = tf.batch_matmul(*self.random((2, 4, 3, 4), (2, 4, 5, 3)), adj_x=True, adj_y=True)
# self.check(t)
示例8: __call__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import batch_matmul [as 别名]
def __call__(self, u_t, a, b, scope=None):
"""
:param u_t: [N, M, d]
:param a: [N, M. 1]
:param b: [N, M. 1]
:param mask: [N, M]
:return:
"""
N, M, d = self.batch_size, self.mem_size, self.hidden_size
L, sL = self.L, self.sL
with tf.name_scope(scope or self.__class__.__name__):
L = tf.tile(tf.expand_dims(L, 0), [N, 1, 1])
sL = tf.tile(tf.expand_dims(sL, 0), [N, 1, 1])
logb = tf.log(b + 1e-9)
logb = tf.concat(1, [tf.zeros([N, 1, 1]), tf.slice(logb, [0, 1, 0], [-1, -1, -1])])
left = L * tf.exp(tf.batch_matmul(L, logb * sL)) # [N, M, M]
right = a * u_t # [N, M, d]
u = tf.batch_matmul(left, right) # [N, M, d]
return u
示例9: channel_wise_fc_layer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import batch_matmul [as 别名]
def channel_wise_fc_layer(self, input, name): # bottom: (7x7x512)
_, width, height, n_feat_map = input.get_shape().as_list()
input_reshape = tf.reshape( input, [-1, width*height, n_feat_map] )
input_transpose = tf.transpose( input_reshape, [2,0,1] )
with tf.variable_scope(name):
W = tf.get_variable(
"W",
shape=[n_feat_map,width*height, width*height], # (512,49,49)
initializer=tf.random_normal_initializer(0., 0.005))
output = tf.batch_matmul(input_transpose, W)
output_transpose = tf.transpose(output, [1,2,0])
output_reshape = tf.reshape( output_transpose, [-1, height, width, n_feat_map] )
return output_reshape
示例10: _verifyInverse
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import batch_matmul [as 别名]
def _verifyInverse(self, x):
for np_type in [np.float32, np.float64]:
for adjoint in False, True:
y = x.astype(np_type)
with self.test_session():
# Verify that x^{-1} * x == Identity matrix.
inv = tf.matrix_inverse(y, adjoint=adjoint)
tf_ans = tf.batch_matmul(inv, y, adj_y=adjoint)
np_ans = np.identity(y.shape[-1])
if x.ndim > 2:
tiling = list(y.shape)
tiling[-2:] = [1, 1]
np_ans = np.tile(np_ans, tiling)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(y, tf_ans)
示例11: _define_diag_covariance_probs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import batch_matmul [as 别名]
def _define_diag_covariance_probs(self, shard_id, shard):
"""Defines the diagonal covariance probabilities per example in a class.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
Returns a matrix num_examples * num_classes.
"""
# num_classes X 1
# TODO(xavigonzalvo): look into alternatives to log for
# reparametrization of variance parameters.
det_expanded = tf.reduce_sum(tf.log(self._covs + 1e-3),
1, keep_dims=True)
diff = shard - self._means
x2 = tf.square(diff)
cov_expanded = tf.expand_dims(1.0 / (self._covs + 1e-3), 2)
# num_classes X num_examples
x2_cov = tf.batch_matmul(x2, cov_expanded)
x2_cov = tf.transpose(tf.squeeze(x2_cov, [2]))
self._probs[shard_id] = -0.5 * (
tf.to_float(self._dimensions) * tf.log(2.0 * np.pi) +
tf.transpose(det_expanded) + x2_cov)
示例12: _define_partial_maximization_operation
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import batch_matmul [as 别名]
def _define_partial_maximization_operation(self, shard_id, shard):
"""Computes the partial statistics of the means and covariances.
Args:
shard_id: current shard id.
shard: current data shard, 1 X num_examples X dimensions.
"""
# Soft assignment of each data point to each of the two clusters.
self._points_in_k[shard_id] = tf.reduce_sum(self._w[shard_id], 0,
keep_dims=True)
# Partial means.
w_mul_x = tf.expand_dims(
tf.matmul(self._w[shard_id],
tf.squeeze(shard, [0]), transpose_a=True), 1)
self._w_mul_x.append(w_mul_x)
# Partial covariances.
x = tf.concat(0, [shard for _ in range(self._num_classes)])
x_trans = tf.transpose(x, perm=[0, 2, 1])
x_mul_w = tf.concat(0, [
tf.expand_dims(x_trans[k, :, :] * self._w[shard_id][:, k], 0)
for k in range(self._num_classes)])
self._w_mul_x2.append(tf.batch_matmul(x_mul_w, x))
示例13: _updated_mat
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import batch_matmul [as 别名]
def _updated_mat(self, mat, v, diag):
# Get dense matrix defined by its square root, which is an update of `mat`:
# A = (mat + v D v^T) (mat + v D v^T)^T
# D is the diagonal matrix with `diag` on the diagonal.
# If diag is None, then it defaults to the identity matrix, so DV^T = V^T
if diag is None:
diag_vt = tf.matrix_transpose(v)
else:
diag_mat = tf.matrix_diag(diag)
diag_vt = tf.batch_matmul(diag_mat, v, adj_y=True)
v_diag_vt = tf.batch_matmul(v, diag_vt)
sqrt = mat + v_diag_vt
a = tf.batch_matmul(sqrt, sqrt, adj_y=True)
return a.eval()
示例14: testSqrtMatmulSingleMatrix
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import batch_matmul [as 别名]
def testSqrtMatmulSingleMatrix(self):
with self.test_session():
batch_shape = ()
for k in [1, 4]:
x_shape = batch_shape + (k, 3)
x = self._rng.rand(*x_shape)
chol_shape = batch_shape + (k, k)
chol = self._random_cholesky_array(chol_shape)
operator = operator_pd_cholesky.OperatorPDCholesky(chol)
sqrt_operator_times_x = operator.sqrt_matmul(x)
expected = tf.batch_matmul(chol, x)
self.assertEqual(expected.get_shape(),
sqrt_operator_times_x.get_shape())
self.assertAllClose(expected.eval(), sqrt_operator_times_x.eval())
示例15: testSqrtMatmulBatchMatrixWithTranspose
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import batch_matmul [as 别名]
def testSqrtMatmulBatchMatrixWithTranspose(self):
with self.test_session():
batch_shape = (2, 3)
for k in [1, 4]:
x_shape = batch_shape + (5, k)
x = self._rng.rand(*x_shape)
chol_shape = batch_shape + (k, k)
chol = self._random_cholesky_array(chol_shape)
operator = operator_pd_cholesky.OperatorPDCholesky(chol)
sqrt_operator_times_x = operator.sqrt_matmul(x, transpose_x=True)
# tf.batch_matmul is defined x * y, so "y" is on the right, not "x".
expected = tf.batch_matmul(chol, x, adj_y=True)
self.assertEqual(expected.get_shape(),
sqrt_operator_times_x.get_shape())
self.assertAllClose(expected.eval(), sqrt_operator_times_x.eval())