本文整理汇总了Python中keras.layers.MaxPooling3D方法的典型用法代码示例。如果您正苦于以下问题:Python layers.MaxPooling3D方法的具体用法?Python layers.MaxPooling3D怎么用?Python layers.MaxPooling3D使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers
的用法示例。
在下文中一共展示了layers.MaxPooling3D方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_liveness_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling3D [as 别名]
def get_liveness_model():
model = Sequential()
model.add(Conv3D(32, kernel_size=(3, 3, 3),
activation='relu',
input_shape=(24,100,100,1)))
model.add(Conv3D(64, (3, 3, 3), activation='relu'))
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Conv3D(64, (3, 3, 3), activation='relu'))
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Conv3D(64, (3, 3, 3), activation='relu'))
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))
return model
示例2: timeception_layers
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling3D [as 别名]
def timeception_layers(tensor, n_layers=4, n_groups=8, is_dilated=True):
input_shape = K.int_shape(tensor)
assert len(input_shape) == 5
expansion_factor = 1.25
_, n_timesteps, side_dim, side_dim, n_channels_in = input_shape
# how many layers of timeception
for i in range(n_layers):
layer_num = i + 1
# get details about grouping
n_channels_per_branch, n_channels_out = __get_n_channels_per_branch(n_groups, expansion_factor, n_channels_in)
# temporal conv per group
tensor = __grouped_convolutions(tensor, n_groups, n_channels_per_branch, is_dilated, layer_num)
# downsample over time
tensor = MaxPooling3D(pool_size=(2, 1, 1), name='maxpool_tc%d' % (layer_num))(tensor)
n_channels_in = n_channels_out
return tensor
示例3: __define_timeception_layers
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling3D [as 别名]
def __define_timeception_layers(self, n_channels_in, n_layers, n_groups, expansion_factor, is_dilated):
"""
Define layers inside the timeception layers.
"""
# how many layers of timeception
for i in range(n_layers):
layer_num = i + 1
# get details about grouping
n_channels_per_branch, n_channels_out = self.__get_n_channels_per_branch(n_groups, expansion_factor, n_channels_in)
# temporal conv per group
self.__define_grouped_convolutions(n_channels_in, n_groups, n_channels_per_branch, is_dilated, layer_num)
# downsample over time
layer_name = 'maxpool_tc%d' % (layer_num)
layer = MaxPooling3D(pool_size=(2, 1, 1), name=layer_name)
setattr(self, layer_name, layer)
n_channels_in = n_channels_out
示例4: get_model_compiled
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling3D [as 别名]
def get_model_compiled(shapeinput, num_class, w_decay=0, lr=1e-3):
clf = Sequential()
clf.add(Conv3D(32, kernel_size=(5, 5, 24), input_shape=shapeinput))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Conv3D(64, (5, 5, 16)))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling3D(pool_size=(2, 2, 1)))
clf.add(Flatten())
clf.add(Dense(300, kernel_regularizer=regularizers.l2(w_decay)))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Dense(num_class, activation='softmax'))
clf.compile(loss=categorical_crossentropy, optimizer=Adam(lr=lr), metrics=['accuracy'])
return clf
示例5: dsrff3D
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling3D [as 别名]
def dsrff3D(image_size, num_labels):
num_channels=1
inputs = Input(shape = (image_size, image_size, image_size, num_channels))
# modified VGG19 architecture
bn_axis = 3
m = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(inputs)
m = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(m)
m = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2))(m)
m = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(m)
m = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(m)
m = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2))(m)
m = Flatten(name='flatten')(m)
m = Dense(512, activation='relu', name='fc1')(m)
m = Dense(512, activation='relu', name='fc2')(m)
m = Dense(num_labels, activation='softmax')(m)
mod = KM.Model(inputs=inputs, outputs=m)
return mod
示例6: __temporal_convolutional_block
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling3D [as 别名]
def __temporal_convolutional_block(tensor, n_channels_per_branch, kernel_sizes, dilation_rates, layer_num, group_num):
"""
Define 5 branches of convolutions that operate of channels of each group.
"""
# branch 1: dimension reduction only and no temporal conv
t_1 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b1_g%d_tc%d' % (group_num, layer_num))(tensor)
t_1 = BatchNormalization(name='bn_b1_g%d_tc%d' % (group_num, layer_num))(t_1)
# branch 2: dimension reduction followed by depth-wise temp conv (kernel-size 3)
t_2 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b2_g%d_tc%d' % (group_num, layer_num))(tensor)
t_2 = DepthwiseConv1DLayer(kernel_sizes[0], dilation_rates[0], padding='same', name='convdw_b2_g%d_tc%d' % (group_num, layer_num))(t_2)
t_2 = BatchNormalization(name='bn_b2_g%d_tc%d' % (group_num, layer_num))(t_2)
# branch 3: dimension reduction followed by depth-wise temp conv (kernel-size 5)
t_3 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b3_g%d_tc%d' % (group_num, layer_num))(tensor)
t_3 = DepthwiseConv1DLayer(kernel_sizes[1], dilation_rates[1], padding='same', name='convdw_b3_g%d_tc%d' % (group_num, layer_num))(t_3)
t_3 = BatchNormalization(name='bn_b3_g%d_tc%d' % (group_num, layer_num))(t_3)
# branch 4: dimension reduction followed by depth-wise temp conv (kernel-size 7)
t_4 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b4_g%d_tc%d' % (group_num, layer_num))(tensor)
t_4 = DepthwiseConv1DLayer(kernel_sizes[2], dilation_rates[2], padding='same', name='convdw_b4_g%d_tc%d' % (group_num, layer_num))(t_4)
t_4 = BatchNormalization(name='bn_b4_g%d_tc%d' % (group_num, layer_num))(t_4)
# branch 5: dimension reduction followed by temporal max pooling
t_5 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b5_g%d_tc%d' % (group_num, layer_num))(tensor)
t_5 = MaxPooling3D(pool_size=(2, 1, 1), strides=(1, 1, 1), padding='same', name='maxpool_b5_g%d_tc%d' % (group_num, layer_num))(t_5)
t_5 = BatchNormalization(name='bn_b5_g%d_tc%d' % (group_num, layer_num))(t_5)
# concatenate channels of branches
tensor = Concatenate(axis=4, name='concat_g%d_tc%d' % (group_num, layer_num))([t_1, t_2, t_3, t_4, t_5])
return tensor
示例7: contracting_layer
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling3D [as 别名]
def contracting_layer(input, neurons):
conv1 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(input)
conv2 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(conv1)
conc1 = concatenate([input, conv2], axis=4)
pool = MaxPooling3D(pool_size=(2, 2, 2))(conc1)
return pool, conv2
# Create the middle layer between the contracting and expanding layers
示例8: contracting_layer
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling3D [as 别名]
def contracting_layer(input, neurons):
conv1 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(input)
conv2 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(conv1)
pool = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
return pool, conv2
# Create the middle layer between the contracting and expanding layers
示例9: get_model_compiled
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling3D [as 别名]
def get_model_compiled(args, inputshape, num_class):
model = Sequential()
if args.arch == "CNN1D":
model.add(Conv1D(20, (24), activation='relu', input_shape=inputshape))
model.add(MaxPooling1D(pool_size=5))
model.add(Flatten())
model.add(Dense(100))
elif "CNN2D" in args.arch:
model.add(Conv2D(50, kernel_size=(5, 5), input_shape=inputshape))
model.add(Activation('relu'))
model.add(Conv2D(100, (5, 5)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(100))
elif args.arch == "CNN3D":
model.add(Conv3D(32, kernel_size=(5, 5, 24), input_shape=inputshape))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv3D(64, (5, 5, 16)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(2, 2, 1)))
model.add(Flatten())
model.add(Dense(300))
if args.arch != "CNN2D": model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(num_class, activation='softmax'))
model.compile(loss=categorical_crossentropy, optimizer=Adam(args.lr1), metrics=['accuracy'])
return model
示例10: nn_architecture_seg_3d
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling3D [as 别名]
def nn_architecture_seg_3d(input_shape, pool_size=(2, 2, 2), n_labels=1, initial_learning_rate=0.00001,
depth=3, n_base_filters=16, metrics=dice_coefficient, batch_normalization=True):
inputs = Input(input_shape)
current_layer = inputs
levels = list()
for layer_depth in range(depth):
layer1 = create_convolution_block(input_layer=current_layer, n_filters=n_base_filters * (2**layer_depth),
batch_normalization=batch_normalization)
layer2 = create_convolution_block(input_layer=layer1, n_filters=n_base_filters * (2**layer_depth) * 2,
batch_normalization=batch_normalization)
if layer_depth < depth - 1:
current_layer = MaxPooling3D(pool_size=pool_size)(layer2)
levels.append([layer1, layer2, current_layer])
else:
current_layer = layer2
levels.append([layer1, layer2])
for layer_depth in range(depth - 2, -1, -1):
up_convolution = UpSampling3D(size=pool_size)
concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)
current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],
input_layer=concat, batch_normalization=batch_normalization)
current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],
input_layer=current_layer,
batch_normalization=batch_normalization)
final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)
act = Activation('sigmoid')(final_convolution)
model = Model(inputs=inputs, outputs=act)
if not isinstance(metrics, list):
metrics = [metrics]
model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coefficient_loss, metrics=metrics)
return model
示例11: inception3D
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling3D [as 别名]
def inception3D(image_size, num_labels):
num_channels=1
inputs = Input(shape = (image_size, image_size, image_size, num_channels))
m = Convolution3D(32, 5, 5, 5, subsample=(1, 1, 1), activation='relu', border_mode='valid', input_shape=())(inputs)
m = MaxPooling3D(pool_size=(2, 2, 2), strides=None, border_mode='same')(m)
# inception module 0
branch1x1 = Convolution3D(32, 1, 1, 1, subsample=(1, 1, 1), activation='relu', border_mode='same')(m)
branch3x3_reduce = Convolution3D(32, 1, 1, 1, subsample=(1, 1, 1), activation='relu', border_mode='same')(m)
branch3x3 = Convolution3D(64, 3, 3, 3, subsample=(1, 1, 1), activation='relu', border_mode='same')(branch3x3_reduce)
branch5x5_reduce = Convolution3D(16, 1, 1, 1, subsample=(1, 1, 1), activation='relu', border_mode='same')(m)
branch5x5 = Convolution3D(32, 5, 5, 5, subsample=(1, 1, 1), activation='relu', border_mode='same')(branch5x5_reduce)
branch_pool = MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), border_mode='same')(m)
branch_pool_proj = Convolution3D(32, 1, 1, 1, subsample=(1, 1, 1), activation='relu', border_mode='same')(branch_pool)
#m = merge([branch1x1, branch3x3, branch5x5, branch_pool_proj], mode='concat', concat_axis=-1)
from keras.layers import concatenate
m = concatenate([branch1x1, branch3x3, branch5x5, branch_pool_proj],axis=-1)
m = AveragePooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), border_mode='valid')(m)
m = Flatten()(m)
m = Dropout(0.7)(m)
# expliciately seperate Dense and Activation layers in order for projecting to structural feature space
m = Dense(num_labels, activation='linear')(m)
m = Activation('softmax')(m)
mod = KM.Model(input=inputs, output=m)
return mod
示例12: model_simple_upsampling__reshape
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling3D [as 别名]
def model_simple_upsampling__reshape(img_shape, class_n=None):
from keras.layers import Input, Dense, Convolution3D, MaxPooling3D, UpSampling3D, Reshape, Flatten
from keras.models import Sequential, Model
from keras.layers.core import Activation
from aitom.classify.deep.unsupervised.autoencoder.seg_util import conv_block
NUM_CHANNELS=1
input_shape = (None, img_shape[0], img_shape[1], img_shape[2], NUM_CHANNELS)
# use relu activation for hidden layer to guarantee non-negative outputs are passed to the max pooling layer. In such case, as long as the output layer is linear activation, the network can still accomodate negative image intendities, just matter of shift back using the bias term
input_img = Input(shape=input_shape[1:])
x = input_img
x = conv_block(x, 32, 3, 3, 3)
x = MaxPooling3D((2, 2, 2), border_mode='same')(x)
x = conv_block(x, 32, 3, 3, 3)
x = MaxPooling3D((2, 2, 2), border_mode='same')(x)
x = conv_block(x, 32, 3, 3, 3)
x = UpSampling3D((2, 2, 2))(x)
x = conv_block(x, 32, 3, 3, 3)
x = UpSampling3D((2, 2, 2))(x)
x = conv_block(x, 32, 3, 3, 3)
x = Convolution3D(class_n, 1, 1, 1, border_mode='same')(x)
x = Reshape((N.prod(img_shape), class_n))(x)
x = Activation('softmax')(x)
model = Model(input=input_img, output=x)
print('model layers:')
for l in model.layers: print (l.output_shape, l.name)
return model
示例13: c3d_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling3D [as 别名]
def c3d_model():
input_shape = (112, 112, 8, 3)
weight_decay = 0.005
nb_classes = 101
inputs = Input(input_shape)
x = Conv3D(64,(3,3,3),strides=(1,1,1),padding='same',
activation='relu',kernel_regularizer=l2(weight_decay))(inputs)
x = MaxPooling3D((2,2,1),strides=(2,2,1),padding='same')(x)
x = Conv3D(128,(3,3,3),strides=(1,1,1),padding='same',
activation='relu',kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling3D((2,2,2),strides=(2,2,2),padding='same')(x)
x = Conv3D(128,(3,3,3),strides=(1,1,1),padding='same',
activation='relu',kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling3D((2,2,2),strides=(2,2,2),padding='same')(x)
x = Conv3D(256,(3,3,3),strides=(1,1,1),padding='same',
activation='relu',kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling3D((2,2,2),strides=(2,2,2),padding='same')(x)
x = Conv3D(256, (3, 3, 3), strides=(1, 1, 1), padding='same',
activation='relu',kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling3D((2, 2, 1), strides=(2, 2, 1), padding='same')(x)
x = Flatten()(x)
x = Dense(2048,activation='relu',kernel_regularizer=l2(weight_decay))(x)
x = Dropout(0.5)(x)
x = Dense(2048,activation='relu',kernel_regularizer=l2(weight_decay))(x)
x = Dropout(0.5)(x)
x = Dense(nb_classes,kernel_regularizer=l2(weight_decay))(x)
x = Activation('softmax')(x)
model = Model(inputs, x)
return model
示例14: timeception_temporal_convolutions
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling3D [as 别名]
def timeception_temporal_convolutions(tensor, n_layers, n_groups, expansion_factor, is_dilated=True):
input_shape = K.int_shape(tensor)
assert len(input_shape) == 5
_, n_timesteps, side_dim, side_dim, n_channels_in = input_shape
# collapse regions in one dim
tensor = ReshapeLayer((n_timesteps, side_dim * side_dim, 1, n_channels_in))(tensor)
for i in range(n_layers):
n_channels_per_branch, n_channels_out = __get_n_channels_per_branch(n_groups, expansion_factor, n_channels_in)
# add global pooling as local regions
tensor = __global_spatial_pooling(tensor)
# temporal conv (inception-style, shuffled)
if is_dilated:
tensor = __timeception_shuffled_depthwise_dilated(tensor, n_groups, n_channels_per_branch)
else:
tensor = __timeception_shuffled_depthwise(tensor, n_groups, n_channels_per_branch)
# downsample over time
tensor = MaxPooling3D(pool_size=(2, 1, 1))(tensor)
n_channels_in = n_channels_out
return tensor
示例15: timeception_temporal_convolutions_parallelized
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling3D [as 别名]
def timeception_temporal_convolutions_parallelized(tensor, n_layers, n_groups, expansion_factor, is_dilated=True):
input_shape = K.int_shape(tensor)
assert len(input_shape) == 5
raise Exception('Sorry, not implemented now')
_, n_timesteps, side_dim, side_dim, n_channels_in = input_shape
# collapse regions in one dim
tensor = ReshapeLayer((n_timesteps, side_dim * side_dim, 1, n_channels_in))(tensor)
for i in range(n_layers):
# add global pooling as regions
tensor = __global_spatial_pooling(tensor)
# temporal conv (inception-style, shuffled)
n_channels_per_branch, n_channels_out = __get_n_channels_per_branch(n_groups, expansion_factor, n_channels_in)
if is_dilated:
tensor = __timeception_shuffled_depthwise_dilated_parallelized(tensor, n_groups, n_channels_per_branch)
else:
tensor = __timeception_shuffled_depthwise_parallelized(tensor, n_groups, n_channels_per_branch)
tensor = MaxPooling3D(pool_size=(2, 1, 1))(tensor)
n_channels_in = n_channels_out
return tensor
# endregion
# region Timeception Block