本文整理汇总了Python中keras.layers.PReLU方法的典型用法代码示例。如果您正苦于以下问题:Python layers.PReLU方法的具体用法?Python layers.PReLU怎么用?Python layers.PReLU使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers
的用法示例。
在下文中一共展示了layers.PReLU方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: CapsuleNet
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import PReLU [as 别名]
def CapsuleNet(n_capsule = 10, n_routings = 5, capsule_dim = 16,
n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001):
K.clear_session()
inputs = Input(shape=(170,))
x = Embedding(21099, 300, trainable=True)(inputs)
x = SpatialDropout1D(dropout_rate)(x)
x = Bidirectional(
CuDNNGRU(n_recurrent, return_sequences=True,
kernel_regularizer=l2(l2_penalty),
recurrent_regularizer=l2(l2_penalty)))(x)
x = PReLU()(x)
x = Capsule(
num_capsule=n_capsule, dim_capsule=capsule_dim,
routings=n_routings, share_weights=True)(x)
x = Flatten(name = 'concatenate')(x)
x = Dropout(dropout_rate)(x)
# fc = Dense(128, activation='sigmoid')(x)
outputs = Dense(6, activation='softmax')(x)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
return model
示例2: CapsuleNet_v2
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import PReLU [as 别名]
def CapsuleNet_v2(n_capsule = 10, n_routings = 5, capsule_dim = 16,
n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001):
K.clear_session()
inputs = Input(shape=(200,))
x = Embedding(20000, 300, trainable=True)(inputs)
x = SpatialDropout1D(dropout_rate)(x)
x = Bidirectional(
CuDNNGRU(n_recurrent, return_sequences=True,
kernel_regularizer=l2(l2_penalty),
recurrent_regularizer=l2(l2_penalty)))(x)
x = PReLU()(x)
x = Capsule(
num_capsule=n_capsule, dim_capsule=capsule_dim,
routings=n_routings, share_weights=True)(x)
x = Flatten(name = 'concatenate')(x)
x = Dropout(dropout_rate)(x)
# fc = Dense(128, activation='sigmoid')(x)
outputs = Dense(6, activation='softmax')(x)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
return model
示例3: model_definition
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import PReLU [as 别名]
def model_definition():
""" Keras RNetwork for MTCNN """
input_ = Input(shape=(24, 24, 3))
var_x = Conv2D(28, (3, 3), strides=1, padding='valid', name='conv1')(input_)
var_x = PReLU(shared_axes=[1, 2], name='prelu1')(var_x)
var_x = MaxPool2D(pool_size=3, strides=2, padding='same')(var_x)
var_x = Conv2D(48, (3, 3), strides=1, padding='valid', name='conv2')(var_x)
var_x = PReLU(shared_axes=[1, 2], name='prelu2')(var_x)
var_x = MaxPool2D(pool_size=3, strides=2)(var_x)
var_x = Conv2D(64, (2, 2), strides=1, padding='valid', name='conv3')(var_x)
var_x = PReLU(shared_axes=[1, 2], name='prelu3')(var_x)
var_x = Permute((3, 2, 1))(var_x)
var_x = Flatten()(var_x)
var_x = Dense(128, name='conv4')(var_x)
var_x = PReLU(name='prelu4')(var_x)
classifier = Dense(2, activation='softmax', name='conv5-1')(var_x)
bbox_regress = Dense(4, name='conv5-2')(var_x)
return [input_], [classifier, bbox_regress]
示例4: get_srresnet_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import PReLU [as 别名]
def get_srresnet_model(input_channel_num=3, feature_dim=64, resunit_num=16):
def _residual_block(inputs):
x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(inputs)
x = BatchNormalization()(x)
x = PReLU(shared_axes=[1, 2])(x)
x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(x)
x = BatchNormalization()(x)
m = Add()([x, inputs])
return m
inputs = Input(shape=(None, None, input_channel_num))
x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(inputs)
x = PReLU(shared_axes=[1, 2])(x)
x0 = x
for i in range(resunit_num):
x = _residual_block(x)
x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(x)
x = BatchNormalization()(x)
x = Add()([x, x0])
x = Conv2D(input_channel_num, (3, 3), padding="same", kernel_initializer="he_normal")(x)
model = Model(inputs=inputs, outputs=x)
return model
# UNet: code from https://github.com/pietz/unet-keras
示例5: emit_PRelu
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import PReLU [as 别名]
def emit_PRelu(self, IR_node, in_scope=False):
if in_scope:
raise NotImplementedError
else:
code = "{:<15} = layers.PReLU(name='{}')({})".format(
IR_node.variable_name,
IR_node.name,
self.parent_variable_name(IR_node)
)
return code
示例6: ResCNN
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import PReLU [as 别名]
def ResCNN(self, x):
"""
repeat of two conv
:param x: tensor, input shape
:return: tensor, result of two conv of resnet
"""
# pre-activation
# x = PReLU()(x)
x = Conv1D(self.filters_num,
kernel_size=1,
padding='SAME',
kernel_regularizer=l2(self.l2),
bias_regularizer=l2(self.l2),
activation=self.activation_conv,
)(x)
x = BatchNormalization()(x)
#x = PReLU()(x)
x = Conv1D(self.filters_num,
kernel_size=1,
padding='SAME',
kernel_regularizer=l2(self.l2),
bias_regularizer=l2(self.l2),
activation=self.activation_conv,
)(x)
x = BatchNormalization()(x)
# x = Dropout(self.dropout)(x)
x = PReLU()(x)
return x
示例7: __init__
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import PReLU [as 别名]
def __init__(self):
super(PReLUNet, self).__init__()
self.prelu = nn.PReLU(3)
示例8: test_prelu
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import PReLU [as 别名]
def test_prelu(self):
keras_model = Sequential()
keras_model.add(PReLU(input_shape=(3, 32, 32), shared_axes=(2, 3),
name='prelu'))
keras_model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.SGD())
pytorch_model = PReLUNet()
self.transfer(keras_model, pytorch_model)
self.assertEqualPrediction(keras_model, pytorch_model, self.test_data)
示例9: test_prelu
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import PReLU [as 别名]
def test_prelu():
layer_test(layers.PReLU, kwargs={},
input_shape=(2, 3, 4))
示例10: test_prelu_share
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import PReLU [as 别名]
def test_prelu_share():
layer_test(layers.PReLU, kwargs={'shared_axes': 1},
input_shape=(2, 3, 4))
示例11: activate
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import PReLU [as 别名]
def activate(self, layer):
""" activate layer with given activation function
:param layer: the input layer
:return: the layer after activation
"""
if self.activ == 'lrelu':
return layers.LeakyReLU(0.2)(layer)
elif self.activ == 'prelu':
return layers.PReLU()(layer)
else:
return Activation(self.activ)(layer)
示例12: activate
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import PReLU [as 别名]
def activate(self, layer):
""" activate layer with given activation function
:param layer: the input layer
:return: the layer after activation
"""
if self.activ == 'lrelu':
return layers.LeakyReLU()(layer)
elif self.activ == 'prelu':
return layers.PReLU()(layer)
else:
return Activation(self.activ)(layer)
示例13: RnnVersion1
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import PReLU [as 别名]
def RnnVersion1( n_recurrent=50, n_filters=30, dropout_rate=0.2, l2_penalty=0.0001,n_capsule = 10, n_routings = 5, capsule_dim = 16):
K.clear_session()
def conv_block(x, n, kernel_size):
x = Conv1D(n, kernel_size, activation='relu') (x)
x = Conv1D(n_filters, kernel_size, activation='relu') (x)
x_att = AttentionWithContext()(x)
x_avg = GlobalAveragePooling1D()(x)
x_max = GlobalMaxPooling1D()(x)
return concatenate([x_att, x_avg, x_max])
def att_max_avg_pooling(x):
x_att = AttentionWithContext()(x)
x_avg = GlobalAveragePooling1D()(x)
x_max = GlobalMaxPooling1D()(x)
return concatenate([x_att, x_avg, x_max])
inputs = Input(shape=(170,))
emb = Embedding(21099, 300, trainable=True)(inputs)
# model 0
x0 = BatchNormalization()(emb)
x0 = SpatialDropout1D(dropout_rate)(x0)
x0 = Bidirectional(
CuDNNGRU(n_recurrent, return_sequences=True,
kernel_regularizer=l2(l2_penalty),
recurrent_regularizer=l2(l2_penalty)))(x0)
x0 = Conv1D(n_filters, kernel_size=3)(x0)
x0 = PReLU()(x0)
# x0 = Dropout(dropout_rate)(x0)
x0 = att_max_avg_pooling(x0)
# model 1
x1 = SpatialDropout1D(dropout_rate)(emb)
x1 = Bidirectional(
CuDNNGRU(2*n_recurrent, return_sequences=True,
kernel_regularizer=l2(l2_penalty),
recurrent_regularizer=l2(l2_penalty)))(x1)
x1 = Conv1D(2*n_filters, kernel_size=2)(x1)
x1 = PReLU()(x1)
# x1 = Dropout(dropout_rate)(x1)
x1 = att_max_avg_pooling(x1)
x = concatenate([x0, x1],name='concatenate')
# fc = Dense(128, activation='sigmoid')(x)
outputs = Dense(6, activation='softmax')(x)# , kernel_regularizer=l2(l2_penalty), activity_regularizer=l2(l2_penalty)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy', optimizer='Nadam',metrics =['accuracy'])
return model
示例14: RnnVersion1
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import PReLU [as 别名]
def RnnVersion1( n_recurrent=50, n_filters=30, dropout_rate=0.2, l2_penalty=0.0001,n_capsule = 10, n_routings = 5, capsule_dim = 16):
K.clear_session()
def conv_block(x, n, kernel_size):
x = Conv1D(n, kernel_size, activation='relu') (x)
x = Conv1D(n_filters, kernel_size, activation='relu') (x)
x_att = AttentionWithContext()(x)
x_avg = GlobalAveragePooling1D()(x)
x_max = GlobalMaxPooling1D()(x)
return concatenate([x_att, x_avg, x_max])
def att_max_avg_pooling(x):
x_att = AttentionWithContext()(x)
x_avg = GlobalAveragePooling1D()(x)
x_max = GlobalMaxPooling1D()(x)
return concatenate([x_att, x_avg, x_max])
inputs = Input(shape=(100,))
emb = Embedding(9399, 300, trainable=True)(inputs)
# model 0
x0 = BatchNormalization()(emb)
x0 = SpatialDropout1D(dropout_rate)(x0)
x0 = Bidirectional(
CuDNNGRU(n_recurrent, return_sequences=True,
kernel_regularizer=l2(l2_penalty),
recurrent_regularizer=l2(l2_penalty)))(x0)
x0 = Conv1D(n_filters, kernel_size=3)(x0)
x0 = PReLU()(x0)
# x0 = Dropout(dropout_rate)(x0)
x0 = att_max_avg_pooling(x0)
# model 1
x1 = SpatialDropout1D(dropout_rate)(emb)
x1 = Bidirectional(
CuDNNGRU(2*n_recurrent, return_sequences=True,
kernel_regularizer=l2(l2_penalty),
recurrent_regularizer=l2(l2_penalty)))(x1)
x1 = Conv1D(2*n_filters, kernel_size=2)(x1)
x1 = PReLU()(x1)
# x1 = Dropout(dropout_rate)(x1)
x1 = att_max_avg_pooling(x1)
x = concatenate([x0, x1],name='concatenate')
fc = Dense(128, activation='relu')(x)
outputs = Dense(6, activation='softmax')(fc)# , kernel_regularizer=l2(l2_penalty), activity_regularizer=l2(l2_penalty)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy', optimizer='Nadam',metrics =['accuracy'])
return model
示例15: header_code
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import PReLU [as 别名]
def header_code(self):
return """import keras
from keras.models import Model
from keras import layers
import keras.backend as K
import numpy as np
from keras.layers.core import Lambda
import tensorflow as tf
weights_dict = dict()
def load_weights_from_file(weight_file):
try:
weights_dict = np.load(weight_file, allow_pickle=True).item()
except:
weights_dict = np.load(weight_file, allow_pickle=True, encoding='bytes').item()
return weights_dict
def set_layer_weights(model, weights_dict):
for layer in model.layers:
if layer.name in weights_dict:
cur_dict = weights_dict[layer.name]
current_layer_parameters = list()
if layer.__class__.__name__ == "BatchNormalization":
if 'scale' in cur_dict:
current_layer_parameters.append(cur_dict['scale'])
if 'bias' in cur_dict:
current_layer_parameters.append(cur_dict['bias'])
current_layer_parameters.extend([cur_dict['mean'], cur_dict['var']])
elif layer.__class__.__name__ == "Scale":
if 'scale' in cur_dict:
current_layer_parameters.append(cur_dict['scale'])
if 'bias' in cur_dict:
current_layer_parameters.append(cur_dict['bias'])
elif layer.__class__.__name__ == "SeparableConv2D":
current_layer_parameters = [cur_dict['depthwise_filter'], cur_dict['pointwise_filter']]
if 'bias' in cur_dict:
current_layer_parameters.append(cur_dict['bias'])
elif layer.__class__.__name__ == "Embedding":
current_layer_parameters.append(cur_dict['weights'])
elif layer.__class__.__name__ == "PReLU":
gamma = np.ones(list(layer.input_shape[1:]))*cur_dict['gamma']
current_layer_parameters.append(gamma)
else:
# rot
if 'weights' in cur_dict:
current_layer_parameters = [cur_dict['weights']]
if 'bias' in cur_dict:
current_layer_parameters.append(cur_dict['bias'])
model.get_layer(layer.name).set_weights(current_layer_parameters)
return model
def KitModel(weight_file = None):
global weights_dict
weights_dict = load_weights_from_file(weight_file) if not weight_file == None else None
"""