本文整理汇总了Python中keras.layers.Activation方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Activation方法的具体用法?Python layers.Activation怎么用?Python layers.Activation使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers
的用法示例。
在下文中一共展示了layers.Activation方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_logits_name
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Activation [as 别名]
def _get_logits_name(self):
"""
Looks for the name of the layer producing the logits.
:return: name of layer producing the logits
"""
softmax_name = self._get_softmax_name()
softmax_layer = self.model.get_layer(softmax_name)
if not isinstance(softmax_layer, Activation):
# In this case, the activation is part of another layer
return softmax_name
if hasattr(softmax_layer, 'inbound_nodes'):
warnings.warn(
"Please update your version to keras >= 2.1.3; "
"support for earlier keras versions will be dropped on "
"2018-07-22")
node = softmax_layer.inbound_nodes[0]
else:
node = softmax_layer._inbound_nodes[0]
logits_name = node.inbound_layers[0].name
return logits_name
示例2: CausalCNN
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Activation [as 别名]
def CausalCNN(n_filters, lr, decay, loss,
seq_len, input_features,
strides_len, kernel_size,
dilation_rates):
inputs = Input(shape=(seq_len, input_features), name='input_layer')
x=inputs
for dilation_rate in dilation_rates:
x = Conv1D(filters=n_filters,
kernel_size=kernel_size,
padding='causal',
dilation_rate=dilation_rate,
activation='linear')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
#x = Dense(7, activation='relu', name='dense_layer')(x)
outputs = Dense(3, activation='sigmoid', name='output_layer')(x)
causalcnn = Model(inputs, outputs=[outputs])
return causalcnn
示例3: build_generator
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Activation [as 别名]
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((7, 7, 128)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(1, kernel_size=3, padding="same"))
model.add(Activation("tanh"))
model.summary()
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
示例4: build_generator
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Activation [as 别名]
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((7, 7, 128)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(self.channels, kernel_size=3, padding='same'))
model.add(Activation("tanh"))
gen_input = Input(shape=(self.latent_dim,))
img = model(gen_input)
model.summary()
return Model(gen_input, img)
示例5: build_generator
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Activation [as 别名]
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((7, 7, 128)))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(Conv2D(self.channels, kernel_size=4, padding="same"))
model.add(Activation("tanh"))
model.summary()
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
示例6: build_generator
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Activation [as 别名]
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((7, 7, 128)))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
model.add(Activation("tanh"))
model.summary()
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
示例7: get_model_41
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Activation [as 别名]
def get_model_41(params):
embedding_weights = pickle.load(open("../data/datasets/train_data/embedding_weights_w2v-google_MSD-AG.pk","rb"))
# main sequential model
model = Sequential()
model.add(Embedding(len(embedding_weights[0]), params['embedding_dim'], input_length=params['sequence_length'],
weights=embedding_weights))
#model.add(Dropout(params['dropout_prob'][0], input_shape=(params['sequence_length'], params['embedding_dim'])))
model.add(LSTM(2048))
#model.add(Dropout(params['dropout_prob'][1]))
model.add(Dense(output_dim=params["n_out"], init="uniform"))
model.add(Activation(params['final_activation']))
logging.debug("Output CNN: %s" % str(model.output_shape))
if params['final_activation'] == 'linear':
model.add(Lambda(lambda x :K.l2_normalize(x, axis=1)))
return model
# CRNN Arch for audio
示例8: g_block
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Activation [as 别名]
def g_block(inp, fil, u = True):
if u:
out = UpSampling2D(interpolation = 'bilinear')(inp)
else:
out = Activation('linear')(inp)
skip = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out)
out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out)
out = LeakyReLU(0.2)(out)
out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out)
out = LeakyReLU(0.2)(out)
out = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out)
out = add([out, skip])
out = LeakyReLU(0.2)(out)
return out
示例9: nonlinearity
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Activation [as 别名]
def nonlinearity(h_nonlin_name):
def compile_fn(di, dh):
def fn(di):
nonlin_name = dh['nonlin_name']
if nonlin_name == 'relu':
Out = Activation('relu')(di['in'])
elif nonlin_name == 'tanh':
Out = Activation('tanh')(di['in'])
elif nonlin_name == 'elu':
Out = Activation('elu')(di['in'])
else:
raise ValueError
return {"out": Out}
return fn
return hke.siso_keras_module('Nonlinearity', compile_fn,
{'nonlin_name': h_nonlin_name})
示例10: evaluate
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Activation [as 别名]
def evaluate(self, inputs, outputs):
keras.backend.clear_session()
X = Input(self.X_train[0].shape)
co.forward({inputs['in']: X})
logits = outputs['out'].val
probs = Activation('softmax')(logits)
model = Model(inputs=[inputs['in'].val], outputs=[probs])
model.compile(optimizer=Adam(lr=self.learning_rate),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.summary()
history = model.fit(self.X_train,
self.y_train,
batch_size=self.batch_size,
epochs=self.num_training_epochs,
validation_data=(self.X_val, self.y_val))
results = {'validation_accuracy': history.history['val_accuracy'][-1]}
return results
示例11: modelA
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Activation [as 别名]
def modelA():
model = Sequential()
model.add(Conv2D(64, (5, 5),
padding='valid'))
model.add(Activation('relu'))
model.add(Conv2D(64, (5, 5)))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(FLAGS.NUM_CLASSES))
return model
示例12: modelB
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Activation [as 别名]
def modelB():
model = Sequential()
model.add(Dropout(0.2, input_shape=(FLAGS.IMAGE_ROWS,
FLAGS.IMAGE_COLS,
FLAGS.NUM_CHANNELS)))
model.add(Convolution2D(64, 8, 8,
subsample=(2, 2),
border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(128, 6, 6,
subsample=(2, 2),
border_mode='valid'))
model.add(Activation('relu'))
model.add(Convolution2D(128, 5, 5,
subsample=(1, 1)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(FLAGS.NUM_CLASSES))
return model
示例13: modelC
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Activation [as 别名]
def modelC():
model = Sequential()
model.add(Convolution2D(128, 3, 3,
border_mode='valid',
input_shape=(FLAGS.IMAGE_ROWS,
FLAGS.IMAGE_COLS,
FLAGS.NUM_CHANNELS)))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(FLAGS.NUM_CLASSES))
return model
示例14: modelF
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Activation [as 别名]
def modelF():
model = Sequential()
model.add(Convolution2D(32, 3, 3,
border_mode='valid',
input_shape=(FLAGS.IMAGE_ROWS,
FLAGS.IMAGE_COLS,
FLAGS.NUM_CHANNELS)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1024))
model.add(Activation('relu'))
model.add(Dense(FLAGS.NUM_CLASSES))
return model
示例15: test_keras_transformer_single_dim
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Activation [as 别名]
def test_keras_transformer_single_dim(self):
"""
Test that KerasTransformer correctly handles single-dimensional input data.
"""
# Construct a model for simple binary classification (with a single hidden layer)
model = Sequential()
input_shape = [10]
model.add(Dense(units=10, input_shape=input_shape,
bias_initializer=self._getKerasModelWeightInitializer(),
kernel_initializer=self._getKerasModelWeightInitializer()))
model.add(Activation('relu'))
model.add(Dense(units=1, bias_initializer=self._getKerasModelWeightInitializer(),
kernel_initializer=self._getKerasModelWeightInitializer()))
model.add(Activation('sigmoid'))
# Compare KerasTransformer output to raw Keras model output
self._test_keras_transformer_helper(model, model_filename="keras_transformer_single_dim")