本文整理汇总了Python中keras.backend.batch_dot方法的典型用法代码示例。如果您正苦于以下问题:Python backend.batch_dot方法的具体用法?Python backend.batch_dot怎么用?Python backend.batch_dot使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.batch_dot方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_dot [as 别名]
def call(self, inputs, **kwargs):
# (batch_size, 1, input_num_capsule, input_dim_capsule)
expand_inputs = K.expand_dims(inputs, axis=1)
# (batch_size, num_capsule, input_num_capsule, input_dim_capsule)
expand_inputs = K.tile(expand_inputs, (1, self.num_capsule, 1, 1))
# (batch_size, num_capsule, input_num_capsule, dim_capsule)
u_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, axes=[2, 3]), expand_inputs)
if self.num_routing <= 0:
self.num_routing = 3
# (batch_size, num_capsule, input_num_capsule)
b = K.zeros((K.shape(u_hat)[0], self.num_capsule, self.input_num_capsule))
for i in xrange(self.num_routing):
# (batch_size, num_capsule, input_num_capsule)
c = softmax(b, axis=1)
# (batch_size, num_capsule, dim_capsule)
s = K.batch_dot(c, u_hat, axes=[2, 2])
squashed_s = squash(s)
if i < self.num_routing - 1:
# (batch_size, num_capsule, input_num_capsule)
b += K.batch_dot(squashed_s, u_hat, axes=[2, 3])
return squashed_s
示例2: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_dot [as 别名]
def call(self , x, mask=None):
e1=x[0].T
e2=x[1].T
batch_size = K.shape(x[0])[0]
sim = []
V_out = K.dot(self.V, K.concatenate([e1,e2],axis=0))
for i in range(self.k):
temp = K.batch_dot(K.dot(e1.T,self.W[i,:,:]),e2.T,axes=1)
sim.append(temp)
sim=K.reshape(sim,(self.k,batch_size))
tensor_bi_product = self.activation(V_out+sim)
tensor_bi_product = K.dot(self.U.T, tensor_bi_product).T
return tensor_bi_product
示例3: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_dot [as 别名]
def call(self, inputs, training=None):
inputs_expand = K.expand_dims(inputs, 1)
inputs_tiled = K.tile(inputs_expand, [1, self.num_capsule, 1, 1])
if(self.channels!=0):
W2 = K.repeat_elements(self.W,int(self.input_num_capsule/self.channels),1)
else:
W2 = self.W
inputs_hat = K.map_fn(lambda x: K.batch_dot(x, W2, [2, 3]) , elems=inputs_tiled)
b = tf.zeros(shape=[K.shape(inputs_hat)[0], self.num_capsule, self.input_num_capsule])
assert self.routings > 0, 'The routings should be > 0.'
for i in range(self.routings):
c = tf.nn.softmax(b, dim=1)
outputs = squash(K.batch_dot(c, inputs_hat, [2, 2])+ self.B)
if i < self.routings - 1:
b += K.batch_dot(outputs, inputs_hat, [2, 3])
return outputs
示例4: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_dot [as 别名]
def call(self, u_vecs):
if self.share_weights:
u_hat_vecs = K.conv1d(u_vecs, self.W)
else:
u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])
batch_size = K.shape(u_vecs)[0]
input_num_capsule = K.shape(u_vecs)[1]
u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule,
self.num_capsule, self.dim_capsule))
u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))
b = K.zeros_like(u_hat_vecs[:, :, :, 0]) # shape = [None, num_capsule, input_num_capsule]
for i in range(self.routings):
b = K.permute_dimensions(b, (0, 2, 1)) # shape = [None, input_num_capsule, num_capsule]
c = K.softmax(b)
c = K.permute_dimensions(c, (0, 2, 1))
b = K.permute_dimensions(b, (0, 2, 1))
outputs = self.activation(K.batch_dot(c, u_hat_vecs, [2, 2]))
if i < self.routings - 1:
b = K.batch_dot(outputs, u_hat_vecs, [2, 3])
return outputs
示例5: routing
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_dot [as 别名]
def routing(u_hat_vecs, beta_a, iterations, output_capsule_num, i_activations):
b = keras.backend.zeros_like(u_hat_vecs[:,:,:,0])
if i_activations is not None:
i_activations = i_activations[...,tf.newaxis]
for i in range(iterations):
if False:
leak = tf.zeros_like(b, optimize=True)
leak = tf.reduce_sum(leak, axis=1, keep_dims=True)
leaky_logits = tf.concat([leak, b], axis=1)
leaky_routing = tf.nn.softmax(leaky_logits, dim=1)
c = tf.split(leaky_routing, [1, output_capsule_num], axis=1)[1]
else:
c = softmax(b, 1)
# if i_activations is not None:
# tf.transpose(tf.transpose(c, perm=[0,2,1]) * i_activations, perm=[0,2,1])
outputs = squash_v1(K.batch_dot(c, u_hat_vecs, [2, 2]))
if i < iterations - 1:
b = b + K.batch_dot(outputs, u_hat_vecs, [2, 3])
poses = outputs
activations = K.sqrt(K.sum(K.square(poses), 2))
return poses, activations
示例6: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_dot [as 别名]
def call(self, inputs, mask=None, **kwargs):
if isinstance(inputs, list):
query, key, value = inputs
else:
query = key = value = inputs
if isinstance(mask, list):
mask = mask[1]
feature_dim = K.shape(query)[-1]
e = K.batch_dot(query, key, axes=2) / K.sqrt(K.cast(feature_dim, dtype=K.floatx()))
e = K.exp(e - K.max(e, axis=-1, keepdims=True))
if self.history_only:
query_len, key_len = K.shape(query)[1], K.shape(key)[1]
indices = K.tile(K.expand_dims(K.arange(key_len), axis=0), [query_len, 1])
upper = K.expand_dims(K.arange(key_len), axis=-1)
e *= K.expand_dims(K.cast(indices <= upper, K.floatx()), axis=0)
if mask is not None:
e *= K.cast(K.expand_dims(mask, axis=-2), K.floatx())
a = e / (K.sum(e, axis=-1, keepdims=True) + K.epsilon())
v = K.batch_dot(a, value)
if self.return_attention:
return [v, a]
return v
示例7: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_dot [as 别名]
def call(self, u_vecs):
if self.share_weights:
u_hat_vecs = K.conv1d(u_vecs, self.W)
else:
u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])
batch_size = K.shape(u_vecs)[0]
input_num_capsule = K.shape(u_vecs)[1]
u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule,
self.num_capsule, self.dim_capsule))
u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))
# final u_hat_vecs.shape = [None, num_capsule, input_num_capsule, dim_capsule]
b = K.zeros_like(u_hat_vecs[:, :, :, 0]) # shape = [None, num_capsule, input_num_capsule]
outputs = None
for i in range(self.routings):
b = K.permute_dimensions(b, (0, 2, 1)) # shape = [None, input_num_capsule, num_capsule]
c = K.softmax(b)
c = K.permute_dimensions(c, (0, 2, 1))
b = K.permute_dimensions(b, (0, 2, 1))
outputs = self.activation(K.batch_dot(c, u_hat_vecs, [2, 2]))
if i < self.routings - 1:
b = K.batch_dot(outputs, u_hat_vecs, [2, 3])
return outputs
示例8: _get_weight_vector
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_dot [as 别名]
def _get_weight_vector(self, M, w_tm1, k, beta, g, s, gamma):
# M = tf.Print(M, [M, w_tm1, k], message='get weights beg1: ')
# M = tf.Print(M, [beta, g, s, gamma], message='get weights beg2: ')
# Content adressing, see Chapter 3.3.1:
num = beta * _cosine_distance(M, k)
w_c = K.softmax(num) # It turns out that equation (5) is just softmax.
# Location adressing, see Chapter 3.3.2:
# Equation 7:
w_g = (g * w_c) + (1-g)*w_tm1
# C_s is the circular convolution
#C_w = K.sum((self.C[None, :, :, :] * w_g[:, None, None, :]),axis=3)
# Equation 8:
# TODO: Explain
C_s = K.sum(K.repeat_elements(self.C[None, :, :, :], self.batch_size, axis=0) * s[:,:,None,None], axis=1)
w_tilda = K.batch_dot(C_s, w_g)
# Equation 9:
w_out = _renorm(w_tilda ** gamma)
return w_out
示例9: test_linear_operations
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_dot [as 别名]
def test_linear_operations(self):
check_two_tensor_operation('dot', (4, 2), (2, 4), BACKENDS)
check_two_tensor_operation('dot', (4, 2), (5, 2, 3), BACKENDS)
check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 5, 3),
BACKENDS, cntk_two_dynamicity=True, axes=(2, 2))
check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 3),
BACKENDS, cntk_two_dynamicity=True, axes=(2, 1))
check_two_tensor_operation('batch_dot', (4, 2), (4, 2, 3),
BACKENDS, cntk_two_dynamicity=True, axes=(1, 1))
check_two_tensor_operation('batch_dot', (32, 20), (32, 20),
BACKENDS, cntk_two_dynamicity=True, axes=1)
check_two_tensor_operation('batch_dot', (32, 20), (32, 20),
BACKENDS, cntk_two_dynamicity=True, axes=(1, 1))
check_single_tensor_operation('transpose', (4, 2), BACKENDS)
check_single_tensor_operation('reverse', (4, 3, 2), BACKENDS, axes=1)
check_single_tensor_operation('reverse', (4, 3, 2), [KTH, KTF], axes=(1, 2))
示例10: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_dot [as 别名]
def call(self, x, mask=None):
uit = K.tanh(K.dot(x, self.Ws1))
ait = K.dot(uit, self.Ws2)
ait = K.permute_dimensions(ait, (0, 2, 1))
A = K.softmax(ait, axis=1)
M = K.batch_dot(A, x)
if self.punish:
A_T = K.permute_dimensions(A, (0, 2, 1))
tile_eye = K.tile(K.eye(self.weight_ws2), [self.batch_size, 1])
tile_eye = K.reshape(
tile_eye, shape=[-1, self.weight_ws2, self.weight_ws2])
AA_T = K.batch_dot(A, A_T) - tile_eye
P = K.l2_normalize(AA_T, axis=(1, 2))
return M, P
else:
return M
示例11: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_dot [as 别名]
def call(self, x, mask=None):
q, k, v = x
d_k = q.shape.as_list()[2]
# in pure tensorflow:
# weights = tf.matmul(x_batch, tf.transpose(y_batch, perm=[0, 2, 1]))
# normalized_weights = tf.nn.softmax(weights/scaling)
# output = tf.matmul(normalized_weights, x_batch)
weights = K.batch_dot(q, k, axes=[2, 2])
if mask is not None:
# add mask weights
if isinstance(mask, (list, tuple)):
if len(mask) > 0:
raise ValueError("mask can only be a Tensor or a list of length 1 containing a tensor.")
mask = mask[0]
weights += -1e10*(1-mask)
normalized_weights = K.softmax(weights / np.sqrt(d_k))
output = K.batch_dot(normalized_weights, v)
if self._return_attention:
return [output, normalized_weights]
else:
return output
示例12: _additive_similarity
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_dot [as 别名]
def _additive_similarity(self, source, query):
concatenation = K.concatenate([source, query], axis=2)
nonlinearity = K.tanh(K.dot(concatenation, self._weights["w_a"]))
# tile the weight vector (1, 1, dim) for each time step and each element of the batch -> (bs, T, dim)
source_shape = K.shape(source)
vaeff = K.tile(K.expand_dims(self._weights["v_a"], 0), [source_shape[0], source_shape[1], 1])
similarity = K.batch_dot(K.permute_dimensions(vaeff, [0, 2, 1]), nonlinearity, axes=[1, 2])
return similarity
示例13: _multiplicative_similarity
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_dot [as 别名]
def _multiplicative_similarity(self, source, query):
qp = K.dot(query, self._weights["w_a"])
similarity = K.batch_dot(K.permute_dimensions(qp, [0, 2, 1]), source, axes=[1, 2])
return similarity
示例14: _transform_trans
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_dot [as 别名]
def _transform_trans(theta,input):
batch1, step1, dim1 = input.shape
input = K.reshape(input,(batch1,step1,dim1//3,3))
input = K.reshape(input,(batch1*step1,dim1//3,3))
input = K.permute_dimensions(input,[0,2,1])
add = T.ones((batch1*step1,1,dim1//3))
input= K.concatenate([input,add],axis=1)
output = K.batch_dot(theta,input)
output = K.permute_dimensions(output,[0,2,1])
output = K.reshape(output,(output.shape[0],dim1))
output = K.reshape(output,(batch1,step1,output.shape[1]))
return output
开发者ID:microsoft,项目名称:View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition,代码行数:16,代码来源:transform_rnn.py
示例15: _fusion
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_dot [as 别名]
def _fusion(theta):
rotation_x = _rotation_x(theta)
rotation_y = _rotation_y(theta)
rotation_z = _rotation_z(theta)
rot = K.batch_dot(rotation_z,rotation_y)
rot = K.batch_dot(rot,rotation_x)
return rot
开发者ID:microsoft,项目名称:View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition,代码行数:12,代码来源:transform_rnn.py