本文整理汇总了Python中keras.backend.local_conv1d方法的典型用法代码示例。如果您正苦于以下问题:Python backend.local_conv1d方法的具体用法?Python backend.local_conv1d怎么用?Python backend.local_conv1d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.local_conv1d方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import local_conv1d [as 别名]
def call(self, u_vecs):
if self.share_weights:
u_hat_vecs = K.conv1d(u_vecs, self.W)
else:
u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])
batch_size = K.shape(u_vecs)[0]
input_num_capsule = K.shape(u_vecs)[1]
u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule,
self.num_capsule, self.dim_capsule))
u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))
b = K.zeros_like(u_hat_vecs[:, :, :, 0]) # shape = [None, num_capsule, input_num_capsule]
for i in range(self.routings):
b = K.permute_dimensions(b, (0, 2, 1)) # shape = [None, input_num_capsule, num_capsule]
c = K.softmax(b)
c = K.permute_dimensions(c, (0, 2, 1))
b = K.permute_dimensions(b, (0, 2, 1))
outputs = self.activation(K.batch_dot(c, u_hat_vecs, [2, 2]))
if i < self.routings - 1:
b = K.batch_dot(outputs, u_hat_vecs, [2, 3])
return outputs
示例2: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import local_conv1d [as 别名]
def call(self, u_vecs):
if self.share_weights:
u_hat_vecs = K.conv1d(u_vecs, self.W)
else:
u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])
batch_size = K.shape(u_vecs)[0]
input_num_capsule = K.shape(u_vecs)[1]
u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule,
self.num_capsule, self.dim_capsule))
u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))
# final u_hat_vecs.shape = [None, num_capsule, input_num_capsule, dim_capsule]
b = K.zeros_like(u_hat_vecs[:, :, :, 0]) # shape = [None, num_capsule, input_num_capsule]
outputs = None
for i in range(self.routings):
b = K.permute_dimensions(b, (0, 2, 1)) # shape = [None, input_num_capsule, num_capsule]
c = K.softmax(b)
c = K.permute_dimensions(c, (0, 2, 1))
b = K.permute_dimensions(b, (0, 2, 1))
outputs = self.activation(K.batch_dot(c, u_hat_vecs, [2, 2]))
if i < self.routings - 1:
b = K.batch_dot(outputs, u_hat_vecs, [2, 3])
return outputs
示例3: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import local_conv1d [as 别名]
def call(self, u_vecs):
if self.share_weights:
u_hat_vecs = K.conv1d(u_vecs, self.W)
else:
u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])
batch_size = K.shape(u_vecs)[0]
input_num_capsule = K.shape(u_vecs)[1]
u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule,
self.num_capsule, self.dim_capsule)) # noqa
u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))
# final u_hat_vecs.shape = [None, num_capsule, input_num_capsule, dim_capsule] # noqa
b = K.zeros_like(u_hat_vecs[:, :, :, 0]) # shape = [None, num_capsule, input_num_capsule] # noqa
for i in range(self.routings):
b = K.permute_dimensions(b, (0, 2, 1)) # shape = [None, input_num_capsule, num_capsule] # noqa
c = K.softmax(b)
c = K.permute_dimensions(c, (0, 2, 1))
b = K.permute_dimensions(b, (0, 2, 1))
outputs = self.activation(tf.keras.backend.batch_dot(c, u_hat_vecs, [2, 2])) # noqa
if i < self.routings - 1:
b = tf.keras.backend.batch_dot(outputs, u_hat_vecs, [2, 3])
return outputs
示例4: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import local_conv1d [as 别名]
def call(self, inputs):
if self.share_weights:
u_hat_vectors = K.conv1d(inputs, self.W)
else:
u_hat_vectors = K.local_conv1d(inputs, self.W, [1], [1])
# u_hat_vectors : The spatially transformed input vectors (with local_conv_1d)
batch_size = K.shape(inputs)[0]
input_num_capsule = K.shape(inputs)[1]
u_hat_vectors = K.reshape(u_hat_vectors, (batch_size,
input_num_capsule,
self.num_capsule,
self.dim_capsule))
u_hat_vectors = K.permute_dimensions(u_hat_vectors, (0, 2, 1, 3))
routing_weights = K.zeros_like(u_hat_vectors[:, :, :, 0])
for i in range(self.routings):
capsule_weights = K.softmax(routing_weights, 1)
outputs = K.batch_dot(capsule_weights, u_hat_vectors, [2, 2])
if K.ndim(outputs) == 4:
outputs = K.sum(outputs, axis=1)
if i < self.routings - 1:
outputs = K.l2_normalize(outputs, -1)
routing_weights = K.batch_dot(outputs, u_hat_vectors, [2, 3])
if K.ndim(routing_weights) == 4:
routing_weights = K.sum(routing_weights, axis=1)
return self.activation(outputs)
示例5: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import local_conv1d [as 别名]
def call(self, inputs):
"""Following the routing algorithm from Hinton's paper,
but replace b = b + <u,v> with b = <u,v>.
This change can improve the feature representation of Capsule.
However, you can replace
b = K.batch_dot(outputs, hat_inputs, [2, 3])
with
b += K.batch_dot(outputs, hat_inputs, [2, 3])
to realize a standard routing.
"""
if self.share_weights:
hat_inputs = K.conv1d(inputs, self.kernel)
else:
hat_inputs = K.local_conv1d(inputs, self.kernel, [1], [1])
batch_size = K.shape(inputs)[0]
input_num_capsule = K.shape(inputs)[1]
hat_inputs = K.reshape(hat_inputs,
(batch_size, input_num_capsule,
self.num_capsule, self.dim_capsule))
hat_inputs = K.permute_dimensions(hat_inputs, (0, 2, 1, 3))
b = K.zeros_like(hat_inputs[:, :, :, 0])
for i in range(self.routings):
c = softmax(b, 1)
if K.backend() == 'theano':
o = K.sum(o, axis=1)
o = self.activation(K.batch_dot(c, hat_inputs, [2, 2]))
if i < self.routings - 1:
b = K.batch_dot(o, hat_inputs, [2, 3])
if K.backend() == 'theano':
o = K.sum(o, axis=1)
return o