本文整理汇总了Python中tensorflow.keras.layers.Activation方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Activation方法的具体用法?Python layers.Activation怎么用?Python layers.Activation使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.layers
的用法示例。
在下文中一共展示了layers.Activation方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_keras_multiclass_classifier
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Activation [as 别名]
def create_keras_multiclass_classifier(X, y):
batch_size = 128
epochs = 12
num_classes = len(np.unique(y))
model = _common_model_generator(X.shape[1], num_classes)
model.add(Dense(units=num_classes, activation=Activation("softmax")))
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=["accuracy"],
)
y_train = keras.utils.to_categorical(y, num_classes)
model.fit(
X,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(X, y_train),
)
return model
示例2: conv2d_bn
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Activation [as 别名]
def conv2d_bn(x,
filters,
kernel_size,
strides=1,
padding='same',
activation='relu',
use_bias=False,
name=None):
x = Conv2D(filters,
kernel_size,
strides=strides,
padding=padding,
use_bias=use_bias,
name=name)(x)
if not use_bias:
bn_axis = 1 if K.image_data_format() == 'channels_first' else 3
bn_name = _generate_layer_name('BatchNorm', prefix=name)
x = BatchNormalization(axis=bn_axis, momentum=0.995, epsilon=0.001,
scale=False, name=bn_name)(x)
if activation is not None:
ac_name = _generate_layer_name('Activation', prefix=name)
x = Activation(activation, name=ac_name)(x)
return x
示例3: test_dqn
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Activation [as 别名]
def test_dqn():
env = TwoRoundDeterministicRewardEnv()
np.random.seed(123)
env.seed(123)
random.seed(123)
nb_actions = env.action_space.n
# Next, we build a very simple model.
model = Sequential()
model.add(Dense(16, input_shape=(1,)))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
memory = SequentialMemory(limit=1000, window_length=1)
policy = EpsGreedyQPolicy(eps=.1)
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=50,
target_model_update=1e-1, policy=policy, enable_double_dqn=False)
dqn.compile(Adam(lr=1e-3))
dqn.fit(env, nb_steps=2000, visualize=False, verbose=0)
policy.eps = 0.
h = dqn.test(env, nb_episodes=20, visualize=False)
assert_allclose(np.mean(h.history['episode_reward']), 3.)
示例4: test_double_dqn
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Activation [as 别名]
def test_double_dqn():
env = TwoRoundDeterministicRewardEnv()
np.random.seed(123)
env.seed(123)
random.seed(123)
nb_actions = env.action_space.n
# Next, we build a very simple model.
model = Sequential()
model.add(Dense(16, input_shape=(1,)))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
memory = SequentialMemory(limit=1000, window_length=1)
policy = EpsGreedyQPolicy(eps=.1)
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=50,
target_model_update=1e-1, policy=policy, enable_double_dqn=True)
dqn.compile(Adam(lr=1e-3))
dqn.fit(env, nb_steps=2000, visualize=False, verbose=0)
policy.eps = 0.
h = dqn.test(env, nb_episodes=20, visualize=False)
assert_allclose(np.mean(h.history['episode_reward']), 3.)
示例5: test_cem
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Activation [as 别名]
def test_cem():
env = TwoRoundDeterministicRewardEnv()
np.random.seed(123)
env.seed(123)
random.seed(123)
nb_actions = env.action_space.n
# Next, we build a very simple model.
model = Sequential()
model.add(Dense(16, input_shape=(1,)))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
memory = EpisodeParameterMemory(limit=1000, window_length=1)
dqn = CEMAgent(model=model, nb_actions=nb_actions, memory=memory)
dqn.compile()
dqn.fit(env, nb_steps=2000, visualize=False, verbose=1)
h = dqn.test(env, nb_episodes=20, visualize=False)
assert_allclose(np.mean(h.history['episode_reward']), 3.)
示例6: test_sarsa
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Activation [as 别名]
def test_sarsa():
env = TwoRoundDeterministicRewardEnv()
np.random.seed(123)
env.seed(123)
random.seed(123)
nb_actions = env.action_space.n
# Next, we build a very simple model.
model = Sequential()
model.add(Dense(16, input_shape=(1,)))
model.add(Activation('relu'))
model.add(Dense(nb_actions, activation='linear'))
policy = EpsGreedyQPolicy(eps=.1)
sarsa = SARSAAgent(model=model, nb_actions=nb_actions, nb_steps_warmup=50, policy=policy)
sarsa.compile(Adam(lr=1e-3))
sarsa.fit(env, nb_steps=20000, visualize=False, verbose=0)
policy.eps = 0.
h = sarsa.test(env, nb_episodes=20, visualize=False)
assert_allclose(np.mean(h.history['episode_reward']), 3.)
示例7: construct_q_network
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Activation [as 别名]
def construct_q_network(self):
# replacement of the Convolution layers by Dense layers, and change the size of the input space and output space
# Uses the network architecture found in DeepMind paper
self.model = Sequential()
input_layer = Input(shape=(self.observation_size * self.training_param.NUM_FRAMES,))
layer1 = Dense(self.observation_size * self.training_param.NUM_FRAMES)(input_layer)
layer1 = Activation('relu')(layer1)
layer2 = Dense(self.observation_size)(layer1)
layer2 = Activation('relu')(layer2)
layer3 = Dense(self.observation_size)(layer2)
layer3 = Activation('relu')(layer3)
layer4 = Dense(2 * self.action_size)(layer3)
layer4 = Activation('relu')(layer4)
output = Dense(self.action_size)(layer4)
self.model = Model(inputs=[input_layer], outputs=[output])
self.model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
self.target_model = Model(inputs=[input_layer], outputs=[output])
self.target_model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
self.target_model.set_weights(self.model.get_weights())
示例8: _build_q_NN
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Activation [as 别名]
def _build_q_NN(self):
input_states = Input(shape=(self.observation_size,))
input_action = Input(shape=(self.action_size,))
input_layer = Concatenate()([input_states, input_action])
lay1 = Dense(self.observation_size)(input_layer)
lay1 = Activation('relu')(lay1)
lay2 = Dense(self.observation_size)(lay1)
lay2 = Activation('relu')(lay2)
lay3 = Dense(2*self.action_size)(lay2)
lay3 = Activation('relu')(lay3)
advantage = Dense(1, activation = 'linear')(lay3)
model = Model(inputs=[input_states, input_action], outputs=[advantage])
model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
return model
示例9: create_model
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Activation [as 别名]
def create_model(trainable=False):
model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA, weights="imagenet")
for layer in model.layers:
layer.trainable = trainable
block = model.get_layer("block_16_project_BN").output
x = Conv2D(112, padding="same", kernel_size=3, strides=1, activation="relu")(block)
x = Conv2D(112, padding="same", kernel_size=3, strides=1, use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(5, padding="same", kernel_size=1, activation="sigmoid")(x)
model = Model(inputs=model.input, outputs=x)
# divide by 2 since d/dweight learning_rate * weight^2 = 2 * learning_rate * weight
# see https://arxiv.org/pdf/1711.05101.pdf
regularizer = l2(WEIGHT_DECAY / 2)
for weight in model.trainable_weights:
with tf.keras.backend.name_scope("weight_regularizer"):
model.add_loss(regularizer(weight)) # in tf2.0: lambda: regularizer(weight)
return model
示例10: conv_layer
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Activation [as 别名]
def conv_layer(inputs,
filters=32,
kernel_size=3,
strides=1,
use_maxpool=True,
postfix=None,
activation=None):
"""Helper function to build Conv2D-BN-ReLU layer
with optional MaxPooling2D.
"""
x = Conv2D(filters=filters,
kernel_size=kernel_size,
strides=strides,
kernel_initializer='he_normal',
name="conv_"+postfix,
padding='same')(inputs)
x = BatchNormalization(name="bn_"+postfix)(x)
x = Activation('relu', name='relu_'+postfix)(x)
if use_maxpool:
x = MaxPooling2D(name='pool'+postfix)(x)
return x
示例11: tconv_layer
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Activation [as 别名]
def tconv_layer(inputs,
filters=32,
kernel_size=3,
strides=2,
postfix=None):
"""Helper function to build Conv2DTranspose-BN-ReLU
layer
"""
x = Conv2DTranspose(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
name='tconv_'+postfix)(inputs)
x = BatchNormalization(name="bn_"+postfix)(x)
x = Activation('relu', name='relu_'+postfix)(x)
return x
示例12: build_model
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Activation [as 别名]
def build_model(self,
input_dim,
hidden_units,
output_dim):
"""Build a simple MINE model
Arguments:
See class arguments.
"""
inputs1 = Input(shape=(input_dim), name="x")
inputs2 = Input(shape=(input_dim), name="y")
x1 = Dense(hidden_units)(inputs1)
x2 = Dense(hidden_units)(inputs2)
x = Add()([x1, x2])
x = Activation('relu', name="ReLU")(x)
outputs = Dense(output_dim, name="MI")(x)
inputs = [inputs1, inputs2]
self._model = Model(inputs,
outputs,
name='MINE')
self._model.summary()
示例13: encoder_layer
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Activation [as 别名]
def encoder_layer(inputs,
filters=16,
kernel_size=3,
strides=2,
activation='relu',
instance_norm=True):
"""Builds a generic encoder layer made of Conv2D-IN-LeakyReLU
IN is optional, LeakyReLU may be replaced by ReLU
"""
conv = Conv2D(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same')
x = inputs
if instance_norm:
x = InstanceNormalization()(x)
if activation == 'relu':
x = Activation('relu')(x)
else:
x = LeakyReLU(alpha=0.2)(x)
x = conv(x)
return x
示例14: attention_3d_block
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Activation [as 别名]
def attention_3d_block(hidden_states):
"""
Many-to-one attention mechanism for Keras.
@param hidden_states: 3D tensor with shape (batch_size, time_steps, input_dim).
@return: 2D tensor with shape (batch_size, 128)
@author: felixhao28.
"""
hidden_size = int(hidden_states.shape[2])
# Inside dense layer
# hidden_states dot W => score_first_part
# (batch_size, time_steps, hidden_size) dot (hidden_size, hidden_size) => (batch_size, time_steps, hidden_size)
# W is the trainable weight matrix of attention Luong's multiplicative style score
score_first_part = Dense(hidden_size, use_bias=False, name='attention_score_vec')(hidden_states)
# score_first_part dot last_hidden_state => attention_weights
# (batch_size, time_steps, hidden_size) dot (batch_size, hidden_size) => (batch_size, time_steps)
h_t = Lambda(lambda x: x[:, -1, :], output_shape=(hidden_size,), name='last_hidden_state')(hidden_states)
score = dot([score_first_part, h_t], [2, 1], name='attention_score')
attention_weights = Activation('softmax', name='attention_weight')(score)
# (batch_size, time_steps, hidden_size) dot (batch_size, time_steps) => (batch_size, hidden_size)
context_vector = dot([hidden_states, attention_weights], [1, 1], name='context_vector')
pre_activation = concatenate([context_vector, h_t], name='attention_output')
attention_vector = Dense(128, use_bias=False, activation='tanh', name='attention_vector')(pre_activation)
return attention_vector
示例15: load
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Activation [as 别名]
def load(input_shape, output_shape, cfg):
nb_lstm_states = int(cfg['nb_lstm_states'])
inputs = KL.Input(shape=input_shape)
x = KL.CuDNNLSTM(units=nb_lstm_states, unit_forget_bias=True)(inputs)
x = KL.Dense(512)(x)
x = KL.Activation('relu')(x)
x = KL.Dropout(0.2)(x)
x = KL.Dense(256)(x)
x = KL.Activation('relu')(x)
x = KL.Dropout(0.3)(x)
mu = KL.Dense(1)(x)
std = KL.Dense(1)(x)
activation_fn = get_activation_function_by_name(cfg['activation_function'])
std = KL.Activation(activation_fn, name="exponential_activation")(std)
output = KL.Concatenate(axis=-1)([std, mu])
model = KM.Model(inputs=[inputs], outputs=[output])
return model