本文整理汇总了Python中keras.backend.conv2d_transpose方法的典型用法代码示例。如果您正苦于以下问题:Python backend.conv2d_transpose方法的具体用法?Python backend.conv2d_transpose怎么用?Python backend.conv2d_transpose使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.conv2d_transpose方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv2d_transpose [as 别名]
def call(self, x):
shape = self.compute_output_shape(x.shape.as_list())
batch_size = K.shape(x)[0]
output_shape = (batch_size, *shape)
return K.conv2d_transpose(x, self._W, output_shape=output_shape, strides=tuple(self._upscaling_factors), padding="same")
示例2: gconv2d
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv2d_transpose [as 别名]
def gconv2d(x, kernel, gconv_indices, gconv_shape_info, strides=(1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1), transpose=False, output_shape=None):
"""2D group equivariant convolution.
# Arguments
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow data format
for inputs/kernels/ouputs.
dilation_rate: tuple of 2 integers.
# Returns
A tensor, result of 2D convolution.
# Raises
ValueError: if `data_format` is neither `channels_last` or `channels_first`.
"""
# Transform the filters
transformed_filter = transform_filter_2d_nhwc(w=kernel, flat_indices=gconv_indices, shape_info=gconv_shape_info)
if transpose:
output_shape = (K.shape(x)[0], output_shape[1], output_shape[2], output_shape[3])
transformed_filter = transform_filter_2d_nhwc(w=kernel, flat_indices=gconv_indices, shape_info=gconv_shape_info)
transformed_filter = K.permute_dimensions(transformed_filter, [0, 1, 3, 2])
return K.conv2d_transpose(x=x, kernel=transformed_filter, output_shape=output_shape, strides=strides,
padding=padding, data_format=data_format)
return K.conv2d(x=x, kernel=transformed_filter, strides=strides, padding=padding, data_format=data_format,
dilation_rate=dilation_rate)
示例3: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv2d_transpose [as 别名]
def call(self, inputs):
input_shape = K.shape(inputs)
batch_size = input_shape[0]
if self.data_format == 'channels_first':
h_axis, w_axis = 2, 3
else:
h_axis, w_axis = 1, 2
height, width = input_shape[h_axis], input_shape[w_axis]
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
# Infer the dynamic output shape:
if self._output_shape is None:
out_height = deconv_length(height, stride_h, kernel_h, self.padding)
out_width = deconv_length(width, stride_w, kernel_w, self.padding)
if self.data_format == 'channels_first':
output_shape = (
batch_size, self.filters, out_height, out_width
)
else:
output_shape = (
batch_size, out_height, out_width, self.filters
)
else:
output_shape = (batch_size,) + self._output_shape
outputs = K.conv2d_transpose(
inputs,
self.kernel,
output_shape,
self.strides,
padding=self.padding,
data_format=self.data_format
)
if self.bias:
outputs = K.bias_add(
outputs, self.bias, data_format=self.data_format
)
if self.activation is not None:
return self.activation(outputs)
return outputs
示例4: expand_layer
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv2d_transpose [as 别名]
def expand_layer(a=0.4, padding_mode='same'):
kernel_1d = [0.25 - a/2, 0.25, a, 0.25, 0.25 - a/2]
kernel_3d = np.zeros((5, 1, 3, 3), 'float32')
kernel_3d[:, 0, 0, 0] = kernel_1d
kernel_3d[:, 0, 1, 1] = kernel_1d
kernel_3d[:, 0, 2, 2] = kernel_1d
def fn(x):
#conv_even = K.conv2d(K.conv2d(x, even_kernel_3d),
#K.permute_dimensions(even_kernel_3d, (1, 0, 2, 3)))
#conv_odd = K.conv2d(K.conv2d(x, odd_kernel_3d),
#K.permute_dimensions(odd_kernel_3d, (1, 0, 2, 3)))
input_shape = K.shape(x)
dim1 = conv_utils.conv_input_length(
input_shape[1],
5,
padding=padding_mode,
stride=2)
dim2 = conv_utils.conv_input_length(
input_shape[2],
5,
padding=padding_mode,
stride=2)
output_shape_a = (input_shape[0], dim1, input_shape[2], input_shape[3])
output_shape_b = (input_shape[0], dim1, dim2, input_shape[3])
upconvolved = K.conv2d_transpose(x,
kernel_3d,
output_shape_a,
strides = (2,1),
padding=padding_mode)
upconvolved = K.conv2d_transpose(upconvolved,
K.permute_dimensions(kernel_3d, (1, 0, 2, 3)),
output_shape_b,
strides = (1,2),
padding=padding_mode)
return 4 * upconvolved
return Lambda(fn)
示例5: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv2d_transpose [as 别名]
def call(self, input_tensor, training=None):
input_transposed = tf.transpose(input_tensor, [3, 0, 1, 2, 4])
input_shape = K.shape(input_transposed)
input_tensor_reshaped = K.reshape(input_transposed, [
input_shape[1] * input_shape[0], self.input_height, self.input_width, self.input_num_atoms])
input_tensor_reshaped.set_shape((None, self.input_height, self.input_width, self.input_num_atoms))
if self.upsamp_type == 'resize':
upsamp = K.resize_images(input_tensor_reshaped, self.scaling, self.scaling, 'channels_last')
outputs = K.conv2d(upsamp, kernel=self.W, strides=(1, 1), padding=self.padding, data_format='channels_last')
elif self.upsamp_type == 'subpix':
conv = K.conv2d(input_tensor_reshaped, kernel=self.W, strides=(1, 1), padding='same',
data_format='channels_last')
outputs = tf.depth_to_space(conv, self.scaling)
else:
batch_size = input_shape[1] * input_shape[0]
# Infer the dynamic output shape:
out_height = deconv_length(self.input_height, self.scaling, self.kernel_size, self.padding)
out_width = deconv_length(self.input_width, self.scaling, self.kernel_size, self.padding)
output_shape = (batch_size, out_height, out_width, self.num_capsule * self.num_atoms)
outputs = K.conv2d_transpose(input_tensor_reshaped, self.W, output_shape, (self.scaling, self.scaling),
padding=self.padding, data_format='channels_last')
votes_shape = K.shape(outputs)
_, conv_height, conv_width, _ = outputs.get_shape()
votes = K.reshape(outputs, [input_shape[1], input_shape[0], votes_shape[1], votes_shape[2],
self.num_capsule, self.num_atoms])
votes.set_shape((None, self.input_num_capsule, conv_height.value, conv_width.value,
self.num_capsule, self.num_atoms))
logit_shape = K.stack([
input_shape[1], input_shape[0], votes_shape[1], votes_shape[2], self.num_capsule])
biases_replicated = K.tile(self.b, [votes_shape[1], votes_shape[2], 1, 1])
activations = update_routing(
votes=votes,
biases=biases_replicated,
logit_shape=logit_shape,
num_dims=6,
input_dim=self.input_num_capsule,
output_dim=self.num_capsule,
num_routing=self.routings)
return activations