本文整理汇总了Python中keras.backend.spatial_2d_padding方法的典型用法代码示例。如果您正苦于以下问题:Python backend.spatial_2d_padding方法的具体用法?Python backend.spatial_2d_padding怎么用?Python backend.spatial_2d_padding使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.spatial_2d_padding方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: crosschannelnormalization
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import spatial_2d_padding [as 别名]
def crosschannelnormalization(alpha=1e-4, k=2, beta=0.75, n=5, **kwargs):
"""
This is the function used for cross channel normalization in the original
Alexnet
"""
def f(X):
b, ch, r, c = X.shape
half = n // 2
square = K.square(X)
extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0, 2, 3, 1))
, (0, half))
extra_channels = K.permute_dimensions(extra_channels, (0, 3, 1, 2))
scale = k
for i in range(n):
scale += alpha * extra_channels[:, i:i + ch, :, :]
scale = scale ** beta
return X / scale
return Lambda(f, output_shape=lambda input_shape: input_shape, **kwargs)
示例2: crosschannelnormalization
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import spatial_2d_padding [as 别名]
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
"""
This is the function used for cross channel normalization in the original
Alexnet
"""
def f(X):
b, ch, r, c = X.shape
half = n // 2
square = K.square(X)
extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1))
, (0,half))
extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
scale = k
for i in range(n):
scale += alpha * extra_channels[:,i:i+ch,:,:]
scale = scale ** beta
return X / scale
return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs)
示例3: crosschannelnormalization
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import spatial_2d_padding [as 别名]
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
"""
This is the function used for cross channel normalization in the original
Alexnet
"""
def f(X):
b, ch, r, c = X.shape
half = n // 2
square = K.square(X)
extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1)))
extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
scale = k
for i in range(n):
scale += alpha * extra_channels[:,i:i+ch,:,:]
scale = scale ** beta
return X / scale
return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs)
示例4: inst_weight
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import spatial_2d_padding [as 别名]
def inst_weight(output_y, output_x, output_dr, output_dl, config=None):
dy = output_y[:,2:,2:]-output_y[:, :-2,2:] + \
2*(output_y[:,2:,1:-1]- output_y[:,:-2,1:-1]) + \
output_y[:,2:,:-2]-output_y[:,:-2,:-2]
dx = output_x[:,2:,2:]- output_x[:,2:,:-2] + \
2*( output_x[:,1:-1,2:]- output_x[:,1:-1,:-2]) +\
output_x[:,:-2,2:]- output_x[:,:-2,:-2]
ddr= (output_dr[:,2:,2:]-output_dr[:,:-2,:-2] +\
output_dr[:,1:-1,2:]-output_dr[:,:-2,1:-1]+\
output_dr[:,2:,1:-1]-output_dr[:,1:-1,:-2])*K.constant(2)
ddl= (output_dl[:,2:,:-2]-output_dl[:,:-2,2:] +\
output_dl[:,2:,1:-1]-output_dl[:,1:-1,2:]+\
output_dl[:,1:-1,:-2]-output_dl[:,:-2,1:-1])*K.constant(2)
dpred = K.concatenate([dy,dx,ddr,ddl],axis=-1)
dpred = K.spatial_2d_padding(dpred)
weight_fg = K.cast(K.all(dpred>K.constant(config.GRADIENT_THRES), axis=3,
keepdims=True), K.floatx())
weight = K.clip(K.sqrt(weight_fg*K.prod(dpred, axis=3, keepdims=True)),
config.WEIGHT_AREA/config.CLIP_AREA_HIGH,
config.WEIGHT_AREA/config.CLIP_AREA_LOW)
weight +=(1-weight_fg)*config.WEIGHT_AREA/config.BG_AREA
weight = K.conv2d(weight, K.constant(config.GAUSSIAN_KERNEL),
padding='same')
return K.stop_gradient(weight)
示例5: _deconv
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import spatial_2d_padding [as 别名]
def _deconv(self, X, lname, d_switch, feat_map=None):
o_width, o_height = self[lname].output_shape[-2:]
# Get filter size
f_width = self[lname].W_shape[2]
f_height = self[lname].W_shape[3]
# Compute padding needed
i_width, i_height = X.shape[-2:]
pad_width = (o_width - i_width + f_width - 1) / 2
pad_height = (o_height - i_height + f_height - 1) / 2
assert isinstance(
pad_width, int), "Pad width size issue at layer %s" % lname
assert isinstance(
pad_height, int), "Pad height size issue at layer %s" % lname
# Set to zero based on switch values
X[d_switch[lname]] = 0
# Get activation function
activation = self[lname].activation
X = activation(X)
if feat_map is not None:
print("Setting other feat map to zero")
for i in range(X.shape[1]):
if i != feat_map:
X[:, i, :, :] = 0
print("Setting non max activations to zero")
for i in range(X.shape[0]):
iw, ih = np.unravel_index(
X[i, feat_map, :, :].argmax(), X[i, feat_map, :, :].shape)
m = np.max(X[i, feat_map, :, :])
X[i, feat_map, :, :] = 0
X[i, feat_map, iw, ih] = m
# Get filters. No bias for now
W = self[lname].W
# Transpose filter
W = W.transpose([1, 0, 2, 3])
W = W[:, :, ::-1, ::-1]
# CUDNN for conv2d ?
conv_out = K.T.nnet.conv2d(
input=self.x, filters=W, border_mode='valid')
# Add padding to get correct size
pad = K.function([self.x], K.spatial_2d_padding(
self.x, padding=(pad_width, pad_height), dim_ordering="th"))
X_pad = pad([X])
# Get Deconv output
deconv_func = K.function([self.x], conv_out)
X_deconv = deconv_func([X_pad])
assert X_deconv.shape[-2:] == (o_width, o_height),\
"Deconv output at %s has wrong size" % lname
return X_deconv