本文整理汇总了Python中keras.backend.conv2d方法的典型用法代码示例。如果您正苦于以下问题:Python backend.conv2d方法的具体用法?Python backend.conv2d怎么用?Python backend.conv2d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.conv2d方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: preprocess_input
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv2d [as 别名]
def preprocess_input(self, inputs, training=None):
if self.window_size > 1:
inputs = K.temporal_padding(inputs, (self.window_size - 1, 0))
inputs = K.expand_dims(inputs, 2) # add a dummy dimension
output = K.conv2d(inputs, self.kernel, strides=self.strides,
padding='valid',
data_format='channels_last')
output = K.squeeze(output, 2) # remove the dummy dimension
if self.use_bias:
output = K.bias_add(output, self.bias, data_format='channels_last')
if self.dropout is not None and 0. < self.dropout < 1.:
z = output[:, :, :self.units]
f = output[:, :, self.units:2 * self.units]
o = output[:, :, 2 * self.units:]
f = K.in_train_phase(1 - _dropout(1 - f, self.dropout), f, training=training)
return K.concatenate([z, f, o], -1)
else:
return output
示例2: _preprocess_conv2d_input
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv2d [as 别名]
def _preprocess_conv2d_input(x, data_format):
"""Transpose and cast the input before the conv2d.
# Arguments
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
# Returns
A tensor.
"""
if K.dtype(x) == 'float64':
x = tf.cast(x, 'float32')
if data_format == 'channels_first':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, rows, cols)
# TF input shape: (samples, rows, cols, input_depth)
x = tf.transpose(x, (0, 2, 3, 1))
return x
示例3: _postprocess_conv2d_output
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv2d [as 别名]
def _postprocess_conv2d_output(x, data_format):
"""Transpose and cast the output from conv2d if needed.
# Arguments
x: A tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
# Returns
A tensor.
"""
if data_format == 'channels_first':
x = tf.transpose(x, (0, 3, 1, 2))
if K.floatx() == 'float64':
x = tf.cast(x, 'float64')
return x
示例4: conv2d
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv2d [as 别名]
def conv2d(x, kernel, strides=(1, 1), padding='valid', data_format='channels_first',
image_shape=None, filter_shape=None):
"""2D convolution.
# Arguments
x: Input tensor
kernel: kernel tensor.
strides: strides tuple.
padding: string, "same" or "valid".
data_format: 'channels_first' or 'channels_last'.
Whether to use Theano or TensorFlow dimension
ordering in inputs/kernels/ouputs.
image_shape: Optional, the input tensor shape
filter_shape: Optional, the kernel shape.
# Returns
x convolved with the kernel.
# Raises
Exception: In case of invalid border mode or data format.
"""
return K.conv2d(x, kernel, strides, padding, data_format)
示例5: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv2d [as 别名]
def call(self, inputs):
binary_kernel = binarize(self.kernel, H=self.H)
outputs = K.conv2d(
inputs,
binary_kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
示例6: preprocess_input
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv2d [as 别名]
def preprocess_input(self, inputs, training=None):
if self.window_size > 1:
inputs = K.temporal_padding(inputs, (self.window_size-1, 0))
inputs = K.expand_dims(inputs, 2) # add a dummy dimension
output = K.conv2d(inputs, self.kernel, strides=self.strides,
padding='valid',
data_format='channels_last')
output = K.squeeze(output, 2) # remove the dummy dimension
if self.use_bias:
output = K.bias_add(output, self.bias, data_format='channels_last')
if self.dropout is not None and 0. < self.dropout < 1.:
z = output[:, :, :self.units]
f = output[:, :, self.units:2 * self.units]
o = output[:, :, 2 * self.units:]
f = K.in_train_phase(1 - _dropout(1 - f, self.dropout), f, training=training)
return K.concatenate([z, f, o], -1)
else:
return output
示例7: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv2d [as 别名]
def call(self, inputs):
ternary_kernel = ternarize(self.kernel, H=self.H)
outputs = K.conv2d(
inputs,
ternary_kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
示例8: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv2d [as 别名]
def call(self, inputs):
if self.r_num == 1:
# if there is no routing (and this is so when r_num is 1 and all c are equal)
# then this is a common convolution
outputs = K.conv2d(K.reshape(inputs, (-1, self.h_i, self.w_i,
self.ch_i * self.n_i)),
K.reshape(self.w, self.kernel_size +
(self.ch_i * self.n_i, self.ch_j * self.n_j)),
data_format='channels_last',
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate)
outputs = squeeze(K.reshape(outputs, ((-1, self.h_j, self.w_j,
self.ch_j, self.n_j))))
return outputs
示例9: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv2d [as 别名]
def call(self, inputs):
if self.rank == 2:
outputs = K.conv2d(
inputs,
self.kernel*self.mask, ### add mask multiplication
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
示例10: inst_weight
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv2d [as 别名]
def inst_weight(output_y, output_x, output_dr, output_dl, config=None):
dy = output_y[:,2:,2:]-output_y[:, :-2,2:] + \
2*(output_y[:,2:,1:-1]- output_y[:,:-2,1:-1]) + \
output_y[:,2:,:-2]-output_y[:,:-2,:-2]
dx = output_x[:,2:,2:]- output_x[:,2:,:-2] + \
2*( output_x[:,1:-1,2:]- output_x[:,1:-1,:-2]) +\
output_x[:,:-2,2:]- output_x[:,:-2,:-2]
ddr= (output_dr[:,2:,2:]-output_dr[:,:-2,:-2] +\
output_dr[:,1:-1,2:]-output_dr[:,:-2,1:-1]+\
output_dr[:,2:,1:-1]-output_dr[:,1:-1,:-2])*K.constant(2)
ddl= (output_dl[:,2:,:-2]-output_dl[:,:-2,2:] +\
output_dl[:,2:,1:-1]-output_dl[:,1:-1,2:]+\
output_dl[:,1:-1,:-2]-output_dl[:,:-2,1:-1])*K.constant(2)
dpred = K.concatenate([dy,dx,ddr,ddl],axis=-1)
dpred = K.spatial_2d_padding(dpred)
weight_fg = K.cast(K.all(dpred>K.constant(config.GRADIENT_THRES), axis=3,
keepdims=True), K.floatx())
weight = K.clip(K.sqrt(weight_fg*K.prod(dpred, axis=3, keepdims=True)),
config.WEIGHT_AREA/config.CLIP_AREA_HIGH,
config.WEIGHT_AREA/config.CLIP_AREA_LOW)
weight +=(1-weight_fg)*config.WEIGHT_AREA/config.BG_AREA
weight = K.conv2d(weight, K.constant(config.GAUSSIAN_KERNEL),
padding='same')
return K.stop_gradient(weight)
示例11: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv2d [as 别名]
def call(self, inputs, **kwargs):
outputs = K.conv2d(
inputs,
self.kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
outputs = BatchNormalization(momentum=self.momentum)(outputs)
if self.activation is not None:
return self.activation(outputs)
return outputs
示例12: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv2d [as 别名]
def call(self, inputs):
input_shape = K.shape(inputs)
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
ker_shape = self.kernel_size + (input_dim, self.filters)
nb_kernels = ker_shape[-2] * ker_shape[-1]
kernel_shape_4_norm = (np.prod(self.kernel_size), nb_kernels)
reshaped_kernel = K.reshape(self.kernel, kernel_shape_4_norm)
normalized_weight = K.l2_normalize(reshaped_kernel, axis=0, epsilon=self.epsilon)
normalized_weight = K.reshape(self.gamma, (1, ker_shape[-2] * ker_shape[-1])) * normalized_weight
shaped_kernel = K.reshape(normalized_weight, ker_shape)
shaped_kernel._keras_shape = ker_shape
convArgs = {"strides": self.strides[0] if self.rank == 1 else self.strides,
"padding": self.padding,
"data_format": self.data_format,
"dilation_rate": self.dilation_rate[0] if self.rank == 1 else self.dilation_rate}
convFunc = {1: K.conv1d,
2: K.conv2d,
3: K.conv3d}[self.rank]
output = convFunc(inputs, shaped_kernel, **convArgs)
if self.use_bias:
output = K.bias_add(
output,
self.bias,
data_format=self.data_format
)
if self.activation is not None:
output = self.activation(output)
return output
示例13: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv2d [as 别名]
def call(self, x):
def hw_flatten(x):
return K.reshape(x, shape=[K.shape(x)[0], K.shape(x)[1]*K.shape(x)[2], K.shape(x)[-1]])
f = K.conv2d(x,
kernel=self.kernel_f,
strides=(1, 1), padding='same') # [bs, h, w, c']
f = K.bias_add(f, self.bias_f)
g = K.conv2d(x,
kernel=self.kernel_g,
strides=(1, 1), padding='same') # [bs, h, w, c']
g = K.bias_add(g, self.bias_g)
h = K.conv2d(x,
kernel=self.kernel_h,
strides=(1, 1), padding='same') # [bs, h, w, c]
h = K.bias_add(h, self.bias_h)
s = tf.matmul(hw_flatten(g), hw_flatten(f), transpose_b=True) # # [bs, N, N]
beta = K.softmax(s, axis=-1) # attention map
o = K.batch_dot(beta, hw_flatten(h)) # [bs, N, C]
o = K.reshape(o, shape=K.shape(x)) # [bs, h, w, C]
x = self.gamma * o + x
return x
示例14: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv2d [as 别名]
def call(self, inputs, training=None):
def _l2normalize(v, eps=1e-12):
return v / (K.sum(v ** 2) ** 0.5 + eps)
def power_iteration(W, u):
#Accroding the paper, we only need to do power iteration one time.
_u = u
_v = _l2normalize(K.dot(_u, K.transpose(W)))
_u = _l2normalize(K.dot(_v, W))
return _u, _v
#Spectral Normalization
W_shape = self.kernel.shape.as_list()
#Flatten the Tensor
W_reshaped = K.reshape(self.kernel, [-1, W_shape[-1]])
_u, _v = power_iteration(W_reshaped, self.u)
#Calculate Sigma
sigma=K.dot(_v, W_reshaped)
sigma=K.dot(sigma, K.transpose(_u))
#normalize it
W_bar = W_reshaped / sigma
#reshape weight tensor
if training in {0, False}:
W_bar = K.reshape(W_bar, W_shape)
else:
with tf.control_dependencies([self.u.assign(_u)]):
W_bar = K.reshape(W_bar, W_shape)
outputs = K.conv2d(
inputs,
W_bar,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
示例15: gconv2d
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import conv2d [as 别名]
def gconv2d(x, kernel, gconv_indices, gconv_shape_info, strides=(1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1), transpose=False, output_shape=None):
"""2D group equivariant convolution.
# Arguments
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow data format
for inputs/kernels/ouputs.
dilation_rate: tuple of 2 integers.
# Returns
A tensor, result of 2D convolution.
# Raises
ValueError: if `data_format` is neither `channels_last` or `channels_first`.
"""
# Transform the filters
transformed_filter = transform_filter_2d_nhwc(w=kernel, flat_indices=gconv_indices, shape_info=gconv_shape_info)
if transpose:
output_shape = (K.shape(x)[0], output_shape[1], output_shape[2], output_shape[3])
transformed_filter = transform_filter_2d_nhwc(w=kernel, flat_indices=gconv_indices, shape_info=gconv_shape_info)
transformed_filter = K.permute_dimensions(transformed_filter, [0, 1, 3, 2])
return K.conv2d_transpose(x=x, kernel=transformed_filter, output_shape=output_shape, strides=strides,
padding=padding, data_format=data_format)
return K.conv2d(x=x, kernel=transformed_filter, strides=strides, padding=padding, data_format=data_format,
dilation_rate=dilation_rate)