本文整理汇总了Python中keras.layers.GlobalMaxPooling2D方法的典型用法代码示例。如果您正苦于以下问题:Python layers.GlobalMaxPooling2D方法的具体用法?Python layers.GlobalMaxPooling2D怎么用?Python layers.GlobalMaxPooling2D使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers
的用法示例。
在下文中一共展示了layers.GlobalMaxPooling2D方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: channel_attention
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling2D [as 别名]
def channel_attention(input_feature, ratio=8):
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
channel = input_feature._keras_shape[channel_axis]
shared_layer_one = Dense(channel//ratio,
activation='relu',
kernel_initializer='he_normal',
use_bias=True,
bias_initializer='zeros')
shared_layer_two = Dense(channel,
kernel_initializer='he_normal',
use_bias=True,
bias_initializer='zeros')
avg_pool = GlobalAveragePooling2D()(input_feature)
avg_pool = Reshape((1,1,channel))(avg_pool)
assert avg_pool._keras_shape[1:] == (1,1,channel)
avg_pool = shared_layer_one(avg_pool)
assert avg_pool._keras_shape[1:] == (1,1,channel//ratio)
avg_pool = shared_layer_two(avg_pool)
assert avg_pool._keras_shape[1:] == (1,1,channel)
max_pool = GlobalMaxPooling2D()(input_feature)
max_pool = Reshape((1,1,channel))(max_pool)
assert max_pool._keras_shape[1:] == (1,1,channel)
max_pool = shared_layer_one(max_pool)
assert max_pool._keras_shape[1:] == (1,1,channel//ratio)
max_pool = shared_layer_two(max_pool)
assert max_pool._keras_shape[1:] == (1,1,channel)
cbam_feature = Add()([avg_pool,max_pool])
cbam_feature = Activation('sigmoid')(cbam_feature)
if K.image_data_format() == "channels_first":
cbam_feature = Permute((3, 1, 2))(cbam_feature)
return multiply([input_feature, cbam_feature])
示例2: get_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling2D [as 别名]
def get_model(base_model,
layer,
lr=1e-3,
input_shape=(224,224,1),
classes=2,
activation="softmax",
dropout=None,
pooling="avg",
weights=None,
pretrained="imagenet"):
base = base_model(input_shape=input_shape,
include_top=False,
weights=pretrained,
channels="gray")
if pooling == "avg":
x = GlobalAveragePooling2D()(base.output)
elif pooling == "max":
x = GlobalMaxPooling2D()(base.output)
elif pooling is None:
x = Flatten()(base.output)
if dropout is not None:
x = Dropout(dropout)(x)
x = Dense(classes, activation=activation)(x)
model = Model(inputs=base.input, outputs=x)
if weights is not None:
model.load_weights(weights)
for l in model.layers[:layer]:
l.trainable = False
model.compile(loss="binary_crossentropy", metrics=["accuracy"],
optimizer=optimizers.Adam(lr))
return model
##########
## DATA ##
##########
# == PREPROCESSING == #
示例3: get_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling2D [as 别名]
def get_model(base_model,
layer,
lr=1e-3,
input_shape=(224,224,1),
classes=2,
activation="softmax",
dropout=None,
pooling="avg",
weights=None,
pretrained=None):
base = base_model(input_shape=input_shape,
include_top=False,
weights=pretrained,
channels="gray")
if pooling == "avg":
x = GlobalAveragePooling2D()(base.output)
elif pooling == "max":
x = GlobalMaxPooling2D()(base.output)
elif pooling is None:
x = Flatten()(base.output)
if dropout is not None:
x = Dropout(dropout)(x)
x = Dense(classes, activation=activation)(x)
model = Model(inputs=base.input, outputs=x)
if weights is not None:
model.load_weights(weights)
for l in model.layers[:layer]:
l.trainable = False
model.compile(loss="binary_crossentropy", metrics=["accuracy"],
optimizer=optimizers.Adam(lr))
return model
##########
## DATA ##
##########
# == PREPROCESSING == #
示例4: test_global_max_pooling
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling2D [as 别名]
def test_global_max_pooling(self):
model = Sequential()
model.add(GlobalMaxPooling2D(input_shape=(16, 16, 3)))
self._test_keras_model(model, has_variables = False)
示例5: keypoint_confidence
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling2D [as 别名]
def keypoint_confidence(x, name=None):
"""Implements the keypoint (body joint) confidence, given a set of
probability maps as input. No parameters required.
"""
def _keypoint_confidence(x):
x = 4 * AveragePooling2D((2, 2), strides=(1, 1))(x)
x = K.expand_dims(GlobalMaxPooling2D()(x), axis=-1)
return x
f = Lambda(_keypoint_confidence, name=name)
return TimeDistributed(f, name=name)(x) if K.ndim(x) == 5 else f(x)
示例6: global_max_min_pooling
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling2D [as 别名]
def global_max_min_pooling(x, name=None):
if 'global_max_min_pool_cnt' not in globals():
global global_max_min_pool_cnt
global_max_min_pool_cnt = 0
if name is None:
name = 'GlobalMaxMinPooling2D_%d' % global_max_min_pool_cnt
global_max_min_pool_cnt += 1
def _global_max_plus_min(x):
x1 = GlobalMaxPooling2D()(x)
x2 = GlobalMaxPooling2D()(-x)
return x1 - x2
return Lambda(_global_max_plus_min, name=name)(x)
示例7: test_keras_import
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling2D [as 别名]
def test_keras_import(self):
# Global Pooling 1D
model = Sequential()
model.add(GlobalMaxPooling1D(input_shape=(16, 1)))
model.build()
self.keras_param_test(model, 0, 5)
# Global Pooling 2D
model = Sequential()
model.add(GlobalMaxPooling2D(input_shape=(16, 16, 1)))
model.build()
self.keras_param_test(model, 0, 8)
# Pooling 1D
model = Sequential()
model.add(MaxPooling1D(pool_size=2, strides=2, padding='same', input_shape=(16, 1)))
model.build()
self.keras_param_test(model, 0, 5)
# Pooling 2D
model = Sequential()
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', input_shape=(16, 16, 1)))
model.build()
self.keras_param_test(model, 0, 8)
# Pooling 3D
model = Sequential()
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same',
input_shape=(16, 16, 16, 1)))
model.build()
self.keras_param_test(model, 0, 11)
# ********** Locally-connected Layers **********
示例8: enrich_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling2D [as 别名]
def enrich_model(base_model, pooling, dropout, reg, n_classes, params, verbose):
# Init params if not done before
params = {} if params is None else params
# Loading appropriate params
params["pooling"] = select_param("pooling", pooling, params)
params["n_classes"] = select_param("n_classes", n_classes, params)
x = base_model.layers[-1].output
if params["pooling"] == 'None' :
x = Flatten()(x)
elif params["pooling"] == 'avg' :
x = GlobalAveragePooling2D()(x)
elif params["pooling"] == 'max' :
x = GlobalMaxPooling2D()(x)
if dropout is not None and dropout != 0.0 :
x = Dropout(dropout)(x)
if verbose:
print("Adding dropout to model with rate: {}".format(dropout))
regularizer = None
if reg is not None:
reg_l2 = reg["l2"]
reg_l1 = reg["l1"]
if (reg_l1 != 0.0) and (reg_l2 != 0.0) :
regularizer = regularizers.l1_l2(l1=reg_l1, l2=reg_l2)
if (reg_l1 == 0.0) and (reg_l2 != 0.0) :
regularizer = regularizers.l2(reg_l2)
if (reg_l1 != 0.0) and (reg_l2 == 0.0) :
regularizer = regularizers.l1(reg_l1)
if verbose:
print("Using regularizer for model: {}".format(reg))
predictions = Dense(params["n_classes"], activation='softmax', name='predictions', kernel_regularizer=regularizer)(x)
model = Model(input=base_model.input, output=predictions)
return model, params
###################################################################################################################
## GPU
###################################################################################################################
示例9: cnn
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling2D [as 别名]
def cnn(embedding_matrix, dimx=50, dimy=50, nb_filter = 120,
embedding_dim = 50,filter_length = (50,4), vocab_size = 8000, depth = 1):
print 'Model Uses Basic CNN......'
inpx = Input(shape=(dimx,),dtype='int32',name='inpx')
inpy = Input(shape=(dimy,),dtype='int32',name='inpy')
x = word2vec_embedding_layer(embedding_matrix,train=False)(inpx)
y = word2vec_embedding_layer(embedding_matrix,train=False)(inpy)
x = Permute((2,1))(x)
y = Permute((2,1))(y)
conv1 = Reshape((embedding_dim,dimx,1))(x)
conv2 = Reshape((embedding_dim,dimy,1))(y)
channel_1, channel_2 = [], []
for dep in range(depth):
#conv1 = ZeroPadding2D((filter_width - 1, 0))(conv1)
#conv2 = ZeroPadding2D((filter_width - 1, 0))(conv2)
ques = Conv2D(nb_filter=nb_filter, kernel_size = filter_length, activation='relu',
data_format = 'channels_last',border_mode="valid")(conv1)
ans = Conv2D(nb_filter, kernel_size = filter_length, activation='relu',
data_format="channels_last",border_mode="valid")(conv2)
#conv1 = GlobalMaxPooling2D()(ques)
#conv2 = GlobalMaxPooling2D()(ans)
#conv1 = MaxPooling2D()(ques)
#conv2 = MaxPooling2D()(ans)
channel_1.append(GlobalMaxPooling2D()(ques))
channel_2.append(GlobalMaxPooling2D()(ans))
#channel_1.append(GlobalAveragePooling2D()(ques))
#channel_2.append(GlobalAveragePooling2D()(ans))
h1 = channel_1.pop(-1)
if channel_1:
h1 = merge([h1] + channel_1, mode="concat")
h2 = channel_2.pop(-1)
if channel_2:
h2 = merge([h2] + channel_2, mode="concat")
h = Merge(mode="concat",name='h')([h1, h2])
#h = Dropout(0.2)(h)
#h = Dense(50, kernel_regularizer=regularizers.l2(reg2),activation='relu')(h)
#wrap = Dropout(0.5)(h)
#wrap = Dense(64, activation='tanh')(h)
score = Dense(2,activation='softmax',name='score')(h)
model = Model([inpx, inpy],[score])
model.compile( loss='categorical_crossentropy',optimizer='adam')
return model
示例10: _add_auxilary_head
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling2D [as 别名]
def _add_auxilary_head(x, classes, weight_decay, pooling, include_top):
'''Adds an auxilary head for training the model
From section A.7 "Training of ImageNet models" of the paper, all NASNet models are
trained using an auxilary classifier around 2/3 of the depth of the network, with
a loss weight of 0.4
# Arguments
x: input tensor
classes: number of output classes
weight_decay: l2 regularization weight
# Returns
a keras Tensor
'''
weights = load_auxilary_branch()
img_height = 1 if K.image_data_format() == 'channels_last' else 2
img_width = 2 if K.image_data_format() == 'channels_last' else 3
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
with K.name_scope('auxilary_branch'):
auxilary_x = Activation('relu')(x)
auxilary_x = AveragePooling2D((5, 5), strides=(3, 3), padding='valid', name='aux_pool')(auxilary_x)
auxilary_x = Conv2D(128, (1, 1), padding='same', use_bias=False, name='aux_conv_projection',
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay),
weights=[weights['conv1']])(auxilary_x)
auxilary_x = BatchNormalization(axis=channel_axis, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
name='aux_bn_projection',
weights=weights['bn1'])(auxilary_x)
auxilary_x = Activation('relu')(auxilary_x)
auxilary_x = Conv2D(768, (auxilary_x._keras_shape[img_height], auxilary_x._keras_shape[img_width]),
padding='valid', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), name='aux_conv_reduction',
weights=[weights['conv2']])(auxilary_x)
auxilary_x = BatchNormalization(axis=channel_axis, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
name='aux_bn_reduction',
weights=weights['bn2'])(auxilary_x)
auxilary_x = Activation('relu')(auxilary_x)
if include_top:
auxilary_x = GlobalAveragePooling2D()(auxilary_x)
auxilary_x = Dense(classes, activation='softmax', kernel_regularizer=l2(weight_decay),
name='aux_predictions', weights=weights['fc'])(auxilary_x)
else:
if pooling == 'avg':
auxilary_x = GlobalAveragePooling2D()(auxilary_x)
elif pooling == 'max':
auxilary_x = GlobalMaxPooling2D()(auxilary_x)
return auxilary_x
示例11: _add_auxiliary_head
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling2D [as 别名]
def _add_auxiliary_head(x, classes, weight_decay, pooling, include_top):
'''Adds an auxiliary head for training the model
From section A.7 "Training of ImageNet models" of the paper, all NASNet models are
trained using an auxiliary classifier around 2/3 of the depth of the network, with
a loss weight of 0.4
# Arguments
x: input tensor
classes: number of output classes
weight_decay: l2 regularization weight
# Returns
a keras Tensor
'''
img_height = 1 if K.image_data_format() == 'channels_last' else 2
img_width = 2 if K.image_data_format() == 'channels_last' else 3
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
with K.name_scope('auxiliary_branch'):
auxiliary_x = Activation('relu')(x)
auxiliary_x = AveragePooling2D((5, 5), strides=(3, 3), padding='valid', name='aux_pool')(auxiliary_x)
auxiliary_x = Conv2D(128, (1, 1), padding='same', use_bias=False, name='aux_conv_projection',
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(auxiliary_x)
auxiliary_x = BatchNormalization(axis=channel_axis, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
name='aux_bn_projection')(auxiliary_x)
auxiliary_x = Activation('relu')(auxiliary_x)
auxiliary_x = Conv2D(768, (auxiliary_x._keras_shape[img_height], auxiliary_x._keras_shape[img_width]),
padding='valid', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), name='aux_conv_reduction')(auxiliary_x)
auxiliary_x = BatchNormalization(axis=channel_axis, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
name='aux_bn_reduction')(auxiliary_x)
auxiliary_x = Activation('relu')(auxiliary_x)
if include_top:
auxiliary_x = Flatten()(auxiliary_x)
auxiliary_x = Dense(classes, activation='softmax', kernel_regularizer=l2(weight_decay),
name='aux_predictions')(auxiliary_x)
else:
if pooling == 'avg':
auxiliary_x = GlobalAveragePooling2D()(auxiliary_x)
elif pooling == 'max':
auxiliary_x = GlobalMaxPooling2D()(auxiliary_x)
return auxiliary_x
示例12: classifier_block
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling2D [as 别名]
def classifier_block(input_tensor, include_top=True, top='classification',
classes=1, activation='sigmoid',
input_shape=None, final_pooling=None, name='', verbose=1):
""" Performs the final Activation for the classification of a given problem.
# Arguments
include_top: Whether to include the fully-connected
layer at the top of the network. Also maps to require_flatten
option in `keras.applications.imagenet_utils._obtain_input_shape()`.
"""
x = input_tensor
if include_top and top == 'classification':
if verbose:
print(" classification of x: " + str(x))
x = Dense(units=classes, activation=activation,
kernel_initializer="he_normal", name=name + 'fc' + str(classes))(x)
elif include_top and top == 'segmentation':
if verbose > 0:
print(" segmentation of x: " + str(x))
x = Conv2D(classes, (1, 1), activation='linear', padding='same')(x)
if K.image_data_format() == 'channels_first':
channel, row, col = input_shape
else:
row, col, channel = input_shape
x = Reshape((row * col, classes))(x)
x = Activation(activation)(x)
x = Reshape((row, col, classes))(x)
elif include_top and top == 'quaternion':
x = Dense(units=classes, activation='linear',
kernel_initializer="he_normal", name=name + 'fc' + str(classes))(x)
# normalize the output so we have a unit quaternion
x = Lambda(lambda x: K.l2_normalize(x, axis=1))(x)
elif final_pooling == 'avg':
if verbose:
print(" GlobalAveragePooling2D")
x = GlobalAveragePooling2D()(x)
elif final_pooling == 'max':
if verbose:
print(" GlobalMaxPooling2D")
x = GlobalMaxPooling2D()(x)
else:
raise ValueError('hypertree_model.py::classifier_block() unsupported top: ' + str(top))
return x