本文整理汇总了Python中keras.backend.conv1d方法的典型用法代码示例。如果您正苦于以下问题:Python backend.conv1d方法的具体用法?Python backend.conv1d怎么用?Python backend.conv1d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.conv1d方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv1d [as 别名]
def call(self, u_vecs):
if self.share_weights:
u_hat_vecs = K.conv1d(u_vecs, self.W)
else:
u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])
batch_size = K.shape(u_vecs)[0]
input_num_capsule = K.shape(u_vecs)[1]
u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule,
self.num_capsule, self.dim_capsule))
u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))
b = K.zeros_like(u_hat_vecs[:, :, :, 0]) # shape = [None, num_capsule, input_num_capsule]
for i in range(self.routings):
b = K.permute_dimensions(b, (0, 2, 1)) # shape = [None, input_num_capsule, num_capsule]
c = K.softmax(b)
c = K.permute_dimensions(c, (0, 2, 1))
b = K.permute_dimensions(b, (0, 2, 1))
outputs = self.activation(K.batch_dot(c, u_hat_vecs, [2, 2]))
if i < self.routings - 1:
b = K.batch_dot(outputs, u_hat_vecs, [2, 3])
return outputs
示例2: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv1d [as 别名]
def call(self, u_vecs):
if self.share_weights:
u_hat_vecs = K.conv1d(u_vecs, self.W)
else:
u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])
batch_size = K.shape(u_vecs)[0]
input_num_capsule = K.shape(u_vecs)[1]
u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule,
self.num_capsule, self.dim_capsule))
u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))
# final u_hat_vecs.shape = [None, num_capsule, input_num_capsule, dim_capsule]
b = K.zeros_like(u_hat_vecs[:, :, :, 0]) # shape = [None, num_capsule, input_num_capsule]
outputs = None
for i in range(self.routings):
b = K.permute_dimensions(b, (0, 2, 1)) # shape = [None, input_num_capsule, num_capsule]
c = K.softmax(b)
c = K.permute_dimensions(c, (0, 2, 1))
b = K.permute_dimensions(b, (0, 2, 1))
outputs = self.activation(K.batch_dot(c, u_hat_vecs, [2, 2]))
if i < self.routings - 1:
b = K.batch_dot(outputs, u_hat_vecs, [2, 3])
return outputs
示例3: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv1d [as 别名]
def call(self, u_vecs):
if self.share_weights:
u_hat_vecs = K.conv1d(u_vecs, self.W)
else:
u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])
batch_size = K.shape(u_vecs)[0]
input_num_capsule = K.shape(u_vecs)[1]
u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule,
self.num_capsule, self.dim_capsule)) # noqa
u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))
# final u_hat_vecs.shape = [None, num_capsule, input_num_capsule, dim_capsule] # noqa
b = K.zeros_like(u_hat_vecs[:, :, :, 0]) # shape = [None, num_capsule, input_num_capsule] # noqa
for i in range(self.routings):
b = K.permute_dimensions(b, (0, 2, 1)) # shape = [None, input_num_capsule, num_capsule] # noqa
c = K.softmax(b)
c = K.permute_dimensions(c, (0, 2, 1))
b = K.permute_dimensions(b, (0, 2, 1))
outputs = self.activation(tf.keras.backend.batch_dot(c, u_hat_vecs, [2, 2])) # noqa
if i < self.routings - 1:
b = tf.keras.backend.batch_dot(outputs, u_hat_vecs, [2, 3])
return outputs
示例4: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv1d [as 别名]
def call(self, inputs):
input_shape = K.shape(inputs)
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
ker_shape = self.kernel_size + (input_dim, self.filters)
nb_kernels = ker_shape[-2] * ker_shape[-1]
kernel_shape_4_norm = (np.prod(self.kernel_size), nb_kernels)
reshaped_kernel = K.reshape(self.kernel, kernel_shape_4_norm)
normalized_weight = K.l2_normalize(reshaped_kernel, axis=0, epsilon=self.epsilon)
normalized_weight = K.reshape(self.gamma, (1, ker_shape[-2] * ker_shape[-1])) * normalized_weight
shaped_kernel = K.reshape(normalized_weight, ker_shape)
shaped_kernel._keras_shape = ker_shape
convArgs = {"strides": self.strides[0] if self.rank == 1 else self.strides,
"padding": self.padding,
"data_format": self.data_format,
"dilation_rate": self.dilation_rate[0] if self.rank == 1 else self.dilation_rate}
convFunc = {1: K.conv1d,
2: K.conv2d,
3: K.conv3d}[self.rank]
output = convFunc(inputs, shaped_kernel, **convArgs)
if self.use_bias:
output = K.bias_add(
output,
self.bias,
data_format=self.data_format
)
if self.activation is not None:
output = self.activation(output)
return output
示例5: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv1d [as 别名]
def call(self, inputs, training=None):
def _l2normalize(v, eps=1e-12):
return v / (K.sum(v ** 2) ** 0.5 + eps)
def power_iteration(W, u):
#Accroding the paper, we only need to do power iteration one time.
_u = u
_v = _l2normalize(K.dot(_u, K.transpose(W)))
_u = _l2normalize(K.dot(_v, W))
return _u, _v
#Spectral Normalization
W_shape = self.kernel.shape.as_list()
#Flatten the Tensor
W_reshaped = K.reshape(self.kernel, [-1, W_shape[-1]])
_u, _v = power_iteration(W_reshaped, self.u)
#Calculate Sigma
sigma=K.dot(_v, W_reshaped)
sigma=K.dot(sigma, K.transpose(_u))
#normalize it
W_bar = W_reshaped / sigma
#reshape weight tensor
if training in {0, False}:
W_bar = K.reshape(W_bar, W_shape)
else:
with tf.control_dependencies([self.u.assign(_u)]):
W_bar = K.reshape(W_bar, W_shape)
outputs = K.conv1d(
inputs,
W_bar,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
示例6: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv1d [as 别名]
def call(self, inputs):
if self.share_weights:
u_hat_vectors = K.conv1d(inputs, self.W)
else:
u_hat_vectors = K.local_conv1d(inputs, self.W, [1], [1])
# u_hat_vectors : The spatially transformed input vectors (with local_conv_1d)
batch_size = K.shape(inputs)[0]
input_num_capsule = K.shape(inputs)[1]
u_hat_vectors = K.reshape(u_hat_vectors, (batch_size,
input_num_capsule,
self.num_capsule,
self.dim_capsule))
u_hat_vectors = K.permute_dimensions(u_hat_vectors, (0, 2, 1, 3))
routing_weights = K.zeros_like(u_hat_vectors[:, :, :, 0])
for i in range(self.routings):
capsule_weights = K.softmax(routing_weights, 1)
outputs = K.batch_dot(capsule_weights, u_hat_vectors, [2, 2])
if K.ndim(outputs) == 4:
outputs = K.sum(outputs, axis=1)
if i < self.routings - 1:
outputs = K.l2_normalize(outputs, -1)
routing_weights = K.batch_dot(outputs, u_hat_vectors, [2, 3])
if K.ndim(routing_weights) == 4:
routing_weights = K.sum(routing_weights, axis=1)
return self.activation(outputs)
示例7: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv1d [as 别名]
def call(self, inputs):
"""Following the routing algorithm from Hinton's paper,
but replace b = b + <u,v> with b = <u,v>.
This change can improve the feature representation of Capsule.
However, you can replace
b = K.batch_dot(outputs, hat_inputs, [2, 3])
with
b += K.batch_dot(outputs, hat_inputs, [2, 3])
to realize a standard routing.
"""
if self.share_weights:
hat_inputs = K.conv1d(inputs, self.kernel)
else:
hat_inputs = K.local_conv1d(inputs, self.kernel, [1], [1])
batch_size = K.shape(inputs)[0]
input_num_capsule = K.shape(inputs)[1]
hat_inputs = K.reshape(hat_inputs,
(batch_size, input_num_capsule,
self.num_capsule, self.dim_capsule))
hat_inputs = K.permute_dimensions(hat_inputs, (0, 2, 1, 3))
b = K.zeros_like(hat_inputs[:, :, :, 0])
for i in range(self.routings):
c = softmax(b, 1)
if K.backend() == 'theano':
o = K.sum(o, axis=1)
o = self.activation(K.batch_dot(c, hat_inputs, [2, 2]))
if i < self.routings - 1:
b = K.batch_dot(o, hat_inputs, [2, 3])
if K.backend() == 'theano':
o = K.sum(o, axis=1)
return o
示例8: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv1d [as 别名]
def call(self, inputs):
channel_axis = 1 if self.data_format == 'channels_first' else -1
input_dim = K.shape(inputs)[channel_axis] // 4
index2 = self.filters*2
index3 = self.filters*3
if self.rank == 1:
f_r = self.kernel[:, :, :self.filters]
f_i = self.kernel[:, :, self.filters:index2]
f_j = self.kernel[:, :, index2:index3]
f_k = self.kernel[:, :, index3:]
elif self.rank == 2:
f_r = self.kernel[:, :, :, :self.filters]
f_i = self.kernel[:, :, :, self.filters:index2]
f_j = self.kernel[:, :, :, index2:index3]
f_k = self.kernel[:, :, :, index3:]
elif self.rank == 3:
f_r = self.kernel[:, :, :, :, :self.filters]
f_i = self.kernel[:, :, :, :, self.filters:index2]
f_j = self.kernel[:, :, :, :, index2:index3]
f_k = self.kernel[:, :, :, :, index3:]
convArgs = {"strides": self.strides[0] if self.rank == 1 else self.strides,
"padding": self.padding,
"data_format": self.data_format,
"dilation_rate": self.dilation_rate[0] if self.rank == 1 else self.dilation_rate}
convFunc = {1: K.conv1d,
2: K.conv2d,
3: K.conv3d}[self.rank]
#
# Performing quaternion convolution
#
f_r._keras_shape = self.kernel_shape
f_i._keras_shape = self.kernel_shape
f_j._keras_shape = self.kernel_shape
f_k._keras_shape = self.kernel_shape
cat_kernels_4_r = K.concatenate([f_r, -f_i, -f_j, -f_k], axis=-2)
cat_kernels_4_i = K.concatenate([f_i, f_r, -f_k, f_j], axis=-2)
cat_kernels_4_j = K.concatenate([f_j, f_k, f_r, -f_i], axis=-2)
cat_kernels_4_k = K.concatenate([f_k, -f_j, f_i, f_r], axis=-2)
cat_kernels_4_quaternion = K.concatenate([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], axis=-1)
cat_kernels_4_quaternion._keras_shape = self.kernel_size + (4 * input_dim, 4 * self.filters)
output = convFunc(inputs, cat_kernels_4_quaternion, **convArgs)
if self.use_bias:
output = K.bias_add(
output,
self.bias,
data_format=self.data_format
)
if self.activation is not None:
output = self.activation(output)
return output
开发者ID:Orkis-Research,项目名称:Quaternion-Convolutional-Neural-Networks-for-End-to-End-Automatic-Speech-Recognition,代码行数:60,代码来源:conv.py
示例9: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv1d [as 别名]
def call(self, inputs, mask=None):
# compute channels_firste candidate hidden state
# Arguments
'''
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow/CNTK data format
for inputs/kernels/outputs.
dilation_rate: tuple of 3 integers.
'''
transform = K.conv1d(
inputs,
self.kernel,
strides=self.strides[0],
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate[0])
if self.use_bias:
transform = K.bias_add(
transform,
self.bias,
data_format=self.data_format)
if self.activation is not None:
transform = self.activation(transform)
transform_gate = K.conv1d(
inputs,
self.kernel_gate,
strides=self.strides[0],
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate[0])
if self.use_bias:
transform = K.bias_add(
transform,
self.bias_gate,
data_format=self.data_format)
transform_gate = K.sigmoid(transform_gate)
carry_gate = 1.0 - transform_gate
return transform * transform_gate + inputs * carry_gate
示例10: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv1d [as 别名]
def call(self, inputs):
if self.rank == 1:
expert_outputs = K.conv1d(
inputs,
self.expert_kernel,
strides=self.strides[0],
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate[0])
if self.rank == 2:
expert_outputs = K.conv2d(
inputs,
self.expert_kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.rank == 3:
expert_outputs = K.conv3d(
inputs,
self.expert_kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
expert_outputs = K.reshape(expert_outputs, (-1,) + self.o_shape[1:-1] + (self.n_filters, self.n_experts_per_filter))
if self.use_expert_bias:
expert_outputs = K.bias_add(
expert_outputs,
self.expert_bias,
data_format=self.data_format)
if self.expert_activation is not None:
expert_outputs = self.expert_activation(expert_outputs)
gating_outputs = tf.tensordot(inputs, self.gating_kernel, axes=self.rank+1) # samples x n_filters x n_experts_per_filter
if self.use_gating_bias:
gating_outputs = K.bias_add(
gating_outputs,
self.gating_bias,
data_format=self.data_format)
if self.gating_activation is not None:
gating_outputs = self.gating_activation(gating_outputs)
gating_outputs = K.reshape(gating_outputs, self.new_gating_outputs_shape)
outputs = K.sum(expert_outputs * gating_outputs, axis=-1, keepdims=False)
return outputs