本文整理汇总了Python中keras.layers.average方法的典型用法代码示例。如果您正苦于以下问题:Python layers.average方法的具体用法?Python layers.average怎么用?Python layers.average使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers
的用法示例。
在下文中一共展示了layers.average方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_dense_elementwise_params
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import average [as 别名]
def test_dense_elementwise_params(self):
options = dict(modes=[add, multiply, concatenate, average, maximum])
def build_model(mode):
x1 = Input(shape=(3,))
x2 = Input(shape=(3,))
y1 = Dense(4)(x1)
y2 = Dense(4)(x2)
z = mode([y1, y2])
model = Model([x1, x2], z)
return mode, model
product = itertools.product(*options.values())
args = [build_model(p[0]) for p in product]
print("Testing a total of %s cases. This could take a while" % len(args))
for param, model in args:
self._run_test(model, param)
示例2: test_merge_average
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import average [as 别名]
def test_merge_average():
i1 = layers.Input(shape=(4, 5))
i2 = layers.Input(shape=(4, 5))
o = layers.average([i1, i2])
assert o._keras_shape == (None, 4, 5)
model = models.Model([i1, i2], o)
avg_layer = layers.Average()
o2 = avg_layer([i1, i2])
assert avg_layer.output_shape == (None, 4, 5)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
assert out.shape == (2, 4, 5)
assert_allclose(out, 0.5 * (x1 + x2), atol=1e-4)
示例3: test_imdb_fasttext_first_2
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import average [as 别名]
def test_imdb_fasttext_first_2(self):
max_features = 10
max_len = 6
embedding_dims = 4
pool_length = 2
model = Sequential()
model.add(Embedding(max_features, embedding_dims, input_length=max_len))
# we add a AveragePooling1D, which will average the embeddings
# of all words in the document
model.add(AveragePooling1D(pool_size=pool_length))
self._test_model(model, one_dim_seq_flags=[True])
示例4: fconcatenate
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import average [as 别名]
def fconcatenate(path_orig, path_down):
if path_orig._keras_shape == path_down._keras_shape:
path_down_cropped = path_down
else:
crop_x_1 = int(np.ceil((path_down._keras_shape[2] - path_orig._keras_shape[2]) / 2))
crop_x_0 = path_down._keras_shape[2] - path_orig._keras_shape[2] - crop_x_1
crop_y_1 = int(np.ceil((path_down._keras_shape[3] - path_orig._keras_shape[3]) / 2))
crop_y_0 = path_down._keras_shape[3] - path_orig._keras_shape[3] - crop_y_1
crop_z_1 = int(np.ceil((path_down._keras_shape[4] - path_orig._keras_shape[4]) / 2))
crop_z_0 = path_down._keras_shape[4] - path_orig._keras_shape[4] - crop_z_1
path_down_cropped = Cropping3D(cropping=((crop_x_0, crop_x_1), (crop_y_0, crop_y_1), (crop_z_0, crop_z_1)))(path_down)
connected = average([path_orig, path_down_cropped])
return connected
示例5: two_stream_fuse
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import average [as 别名]
def two_stream_fuse(self):
# spatial stream (frozen)
cnn_spatial_multi = self.cnn_spatial_multi()
# temporal stream (frozen)
cnn_temporal_multi = self.cnn_temporal_multi()
# fused by taking average
outputs = average([cnn_spatial_multi.output, cnn_temporal_multi.output])
model = Model([cnn_spatial_multi.input, cnn_temporal_multi.input], outputs)
return model
# CNN model for the temporal stream with multiple inputs
示例6: cnn_spatial
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import average [as 别名]
def cnn_spatial(self):
base_model = InceptionV3(weights='imagenet', include_top=False)
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
# and a logistic layer
predictions = Dense(self.nb_classes, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
return model
# CNN model for the temporal stream
示例7: eltwise
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import average [as 别名]
def eltwise(layer, layer_in, layerId):
out = {}
if (layer['params']['layer_type'] == 'Multiply'):
# This input reverse is to handle visualization
out[layerId] = multiply(layer_in[::-1])
elif (layer['params']['layer_type'] == 'Sum'):
out[layerId] = add(layer_in[::-1])
elif (layer['params']['layer_type'] == 'Average'):
out[layerId] = average(layer_in[::-1])
elif (layer['params']['layer_type'] == 'Dot'):
out[layerId] = dot(layer_in[::-1], -1)
else:
out[layerId] = maximum(layer_in[::-1])
return out
示例8: fCreateModel_SPP_MultiPath
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import average [as 别名]
def fCreateModel_SPP_MultiPath(patchSize, patchSize2, dr_rate=0.0, iPReLU=0, l2_reg=1e-6):
# Total params: 2,057,510
# There are 2 pathway, whose receptive fields are in multiple relation.
# Their outputs are averaged as the final prediction
# The third down sampling convolutional layer in each pathway is replaced by the SPP module
Strides = fgetStrides()
kernelnumber = fgetKernelNumber()
sharedConv1 = fCreateVNet_Block
sharedDown1 = fCreateVNet_DownConv_Block
sharedConv2 = fCreateVNet_Block
sharedDown2 = fCreateVNet_DownConv_Block
sharedConv3 = fCreateVNet_Block
sharedSPP = fSPP
inp1 = Input(shape=(1, patchSize[0], patchSize[1], patchSize[2]))
inp1_Conv_1 = sharedConv1(inp1, kernelnumber[0], type=fgetLayerNumConv(), l2_reg=l2_reg)
inp1_DownConv_1 = sharedDown1(inp1_Conv_1, inp1_Conv_1._keras_shape[1], Strides[0],
iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)
inp1_Conv_2 = sharedConv2(inp1_DownConv_1, kernelnumber[1], type=fgetLayerNumConv(), l2_reg=l2_reg)
inp1_DownConv_2 = sharedDown2(inp1_Conv_2, inp1_Conv_2._keras_shape[1], Strides[1],
iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)
inp1_Conv_3 = sharedConv3(inp1_DownConv_2, kernelnumber[2], type=fgetLayerNumConv(), l2_reg=l2_reg)
inp1_SPP = sharedSPP(inp1_Conv_3, level=3)
inp2 = Input(shape=(1, patchSize2[0], patchSize2[1], patchSize2[2]))
inp2_Conv_1 = sharedConv1(inp2, kernelnumber[0], type=fgetLayerNumConv(), l2_reg=l2_reg)
inp2_DownConv_1 = sharedDown1(inp2_Conv_1, inp2_Conv_1._keras_shape[1], Strides[0],
iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)
inp2_Conv_2 = sharedConv2(inp2_DownConv_1, kernelnumber[1], type=fgetLayerNumConv(), l2_reg=l2_reg)
inp2_DownConv_2 = sharedDown2(inp2_Conv_2, inp2_Conv_2._keras_shape[1], Strides[1],
iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)
inp2_Conv_3 = sharedConv3(inp2_DownConv_2, kernelnumber[2], type=fgetLayerNumConv(), l2_reg=l2_reg)
inp2_SPP = sharedSPP(inp2_Conv_3, level=3)
SPP_aver = average([inp1_SPP, inp2_SPP])
dropout_out = Dropout(dr_rate)(SPP_aver)
dense_out = Dense(units=2,
kernel_initializer='normal',
kernel_regularizer=l2(l2_reg))(dropout_out)
output_fc = Activation('softmax')(dense_out)
model_shared = Model(inputs=[inp1, inp2], outputs = output_fc)
return model_shared
示例9: fCreateModel_FCN_MultiFM
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import average [as 别名]
def fCreateModel_FCN_MultiFM(patchSize, dr_rate=0.0, iPReLU=0,l1_reg=0, l2_reg=1e-6):
# Total params: 1,420,549
# The dense layer is repleced by a convolutional layer with filters=2 for the two classes
# The FM from the third down scaled convolutional layer is upsempled by deconvolution and
# added with the FM from the second down scaled convolutional layer.
# The combined FM goes through a convolutional layer with filters=2 for the two classes
# The two predictions are averages as the final result.
Strides = fgetStrides()
kernelnumber = fgetKernelNumber()
inp = Input(shape=(1, int(patchSize[0]), int(patchSize[1]), int(patchSize[2])))
after_Conv_1 = fCreateVNet_Block(inp, kernelnumber[0], type=fgetLayerNumConv(), l2_reg=l2_reg)
after_DownConv_1 = fCreateVNet_DownConv_Block(after_Conv_1, after_Conv_1._keras_shape[1], Strides[0],
iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)
after_Conv_2 = fCreateVNet_Block(after_DownConv_1, kernelnumber[1], type=fgetLayerNumConv(), l2_reg=l2_reg)
after_DownConv_2 = fCreateVNet_DownConv_Block(after_Conv_2, after_Conv_2._keras_shape[1], Strides[1],
iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)
after_Conv_3 = fCreateVNet_Block(after_DownConv_2, kernelnumber[2], type=fgetLayerNumConv(), l2_reg=l2_reg)
after_DownConv_3 = fCreateVNet_DownConv_Block(after_Conv_3, after_Conv_3._keras_shape[1], Strides[2],
iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)
# fully convolution over the FM from the deepest level
dropout_out1 = Dropout(dr_rate)(after_DownConv_3)
fclayer1 = Conv3D(2,
kernel_size=(1,1,1),
kernel_initializer='he_normal',
weights=None,
padding='valid',
strides=(1, 1, 1),
kernel_regularizer=l1_l2(l1_reg, l2_reg),
)(dropout_out1)
fclayer1 = GlobalAveragePooling3D()(fclayer1)
# Upsample FM from the deepest level, add with FM from level 2,
UpedFM_Level3 = Conv3DTranspose(filters=97, kernel_size=(3,3,1), strides=(2,2,1), padding='same')(after_DownConv_3)
conbined_FM_Level23 = add([UpedFM_Level3, after_DownConv_2])
fclayer2 = Conv3D(2,
kernel_size=(1,1,1),
kernel_initializer='he_normal',
weights=None,
padding='valid',
strides=(1, 1, 1),
kernel_regularizer=l1_l2(l1_reg, l2_reg),
)(conbined_FM_Level23)
fclayer2 = GlobalAveragePooling3D()(fclayer2)
# combine the two predictions using average
fcl_aver = average([fclayer1, fclayer2])
predict = Activation('softmax')(fcl_aver)
cnn_fcl_msfm = Model(inputs=inp, outputs=predict)
return cnn_fcl_msfm