當前位置: 首頁>>代碼示例>>Python>>正文


Python layers.Activation方法代碼示例

本文整理匯總了Python中tensorflow.keras.layers.Activation方法的典型用法代碼示例。如果您正苦於以下問題:Python layers.Activation方法的具體用法?Python layers.Activation怎麽用?Python layers.Activation使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.keras.layers的用法示例。


在下文中一共展示了layers.Activation方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: create_keras_multiclass_classifier

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Activation [as 別名]
def create_keras_multiclass_classifier(X, y):
    batch_size = 128
    epochs = 12
    num_classes = len(np.unique(y))
    model = _common_model_generator(X.shape[1], num_classes)
    model.add(Dense(units=num_classes, activation=Activation("softmax")))
    model.compile(
        loss=keras.losses.categorical_crossentropy,
        optimizer=keras.optimizers.Adadelta(),
        metrics=["accuracy"],
    )
    y_train = keras.utils.to_categorical(y, num_classes)
    model.fit(
        X,
        y_train,
        batch_size=batch_size,
        epochs=epochs,
        verbose=1,
        validation_data=(X, y_train),
    )
    return model 
開發者ID:interpretml,項目名稱:interpret-text,代碼行數:23,代碼來源:common_utils.py

示例2: conv2d_bn

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Activation [as 別名]
def conv2d_bn(x,
              filters,
              kernel_size,
              strides=1,
              padding='same',
              activation='relu',
              use_bias=False,
              name=None):
    x = Conv2D(filters,
               kernel_size,
               strides=strides,
               padding=padding,
               use_bias=use_bias,
               name=name)(x)
    if not use_bias:
        bn_axis = 1 if K.image_data_format() == 'channels_first' else 3
        bn_name = _generate_layer_name('BatchNorm', prefix=name)
        x = BatchNormalization(axis=bn_axis, momentum=0.995, epsilon=0.001,
                               scale=False, name=bn_name)(x)
    if activation is not None:
        ac_name = _generate_layer_name('Activation', prefix=name)
        x = Activation(activation, name=ac_name)(x)
    return x 
開發者ID:aangfanboy,項目名稱:TripletLossFace,代碼行數:25,代碼來源:inception_resnet_v1.py

示例3: test_dqn

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Activation [as 別名]
def test_dqn():
    env = TwoRoundDeterministicRewardEnv()
    np.random.seed(123)
    env.seed(123)
    random.seed(123)
    nb_actions = env.action_space.n

    # Next, we build a very simple model.
    model = Sequential()
    model.add(Dense(16, input_shape=(1,)))
    model.add(Activation('relu'))
    model.add(Dense(nb_actions))
    model.add(Activation('linear'))

    memory = SequentialMemory(limit=1000, window_length=1)
    policy = EpsGreedyQPolicy(eps=.1)
    dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=50,
                   target_model_update=1e-1, policy=policy, enable_double_dqn=False)
    dqn.compile(Adam(lr=1e-3))

    dqn.fit(env, nb_steps=2000, visualize=False, verbose=0)
    policy.eps = 0.
    h = dqn.test(env, nb_episodes=20, visualize=False)
    assert_allclose(np.mean(h.history['episode_reward']), 3.) 
開發者ID:wau,項目名稱:keras-rl2,代碼行數:26,代碼來源:test_discrete.py

示例4: test_double_dqn

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Activation [as 別名]
def test_double_dqn():
    env = TwoRoundDeterministicRewardEnv()
    np.random.seed(123)
    env.seed(123)
    random.seed(123)
    nb_actions = env.action_space.n

    # Next, we build a very simple model.
    model = Sequential()
    model.add(Dense(16, input_shape=(1,)))
    model.add(Activation('relu'))
    model.add(Dense(nb_actions))
    model.add(Activation('linear'))

    memory = SequentialMemory(limit=1000, window_length=1)
    policy = EpsGreedyQPolicy(eps=.1)
    dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=50,
                   target_model_update=1e-1, policy=policy, enable_double_dqn=True)
    dqn.compile(Adam(lr=1e-3))

    dqn.fit(env, nb_steps=2000, visualize=False, verbose=0)
    policy.eps = 0.
    h = dqn.test(env, nb_episodes=20, visualize=False)
    assert_allclose(np.mean(h.history['episode_reward']), 3.) 
開發者ID:wau,項目名稱:keras-rl2,代碼行數:26,代碼來源:test_discrete.py

示例5: test_cem

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Activation [as 別名]
def test_cem():
    env = TwoRoundDeterministicRewardEnv()
    np.random.seed(123)
    env.seed(123)
    random.seed(123)
    nb_actions = env.action_space.n

    # Next, we build a very simple model.
    model = Sequential()
    model.add(Dense(16, input_shape=(1,)))
    model.add(Activation('relu'))
    model.add(Dense(nb_actions))
    model.add(Activation('linear'))

    memory = EpisodeParameterMemory(limit=1000, window_length=1)
    dqn = CEMAgent(model=model, nb_actions=nb_actions, memory=memory)
    dqn.compile()

    dqn.fit(env, nb_steps=2000, visualize=False, verbose=1)
    h = dqn.test(env, nb_episodes=20, visualize=False)
    assert_allclose(np.mean(h.history['episode_reward']), 3.) 
開發者ID:wau,項目名稱:keras-rl2,代碼行數:23,代碼來源:test_discrete.py

示例6: test_sarsa

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Activation [as 別名]
def test_sarsa():
    env = TwoRoundDeterministicRewardEnv()
    np.random.seed(123)
    env.seed(123)
    random.seed(123)
    nb_actions = env.action_space.n

    # Next, we build a very simple model.
    model = Sequential()
    model.add(Dense(16, input_shape=(1,)))
    model.add(Activation('relu'))
    model.add(Dense(nb_actions, activation='linear'))

    policy = EpsGreedyQPolicy(eps=.1)
    sarsa = SARSAAgent(model=model, nb_actions=nb_actions, nb_steps_warmup=50, policy=policy)
    sarsa.compile(Adam(lr=1e-3))

    sarsa.fit(env, nb_steps=20000, visualize=False, verbose=0)
    policy.eps = 0.
    h = sarsa.test(env, nb_episodes=20, visualize=False)
    assert_allclose(np.mean(h.history['episode_reward']), 3.) 
開發者ID:wau,項目名稱:keras-rl2,代碼行數:23,代碼來源:test_discrete.py

示例7: construct_q_network

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Activation [as 別名]
def construct_q_network(self):
        # replacement of the Convolution layers by Dense layers, and change the size of the input space and output space

        # Uses the network architecture found in DeepMind paper
        self.model = Sequential()
        input_layer = Input(shape=(self.observation_size * self.training_param.NUM_FRAMES,))
        layer1 = Dense(self.observation_size * self.training_param.NUM_FRAMES)(input_layer)
        layer1 = Activation('relu')(layer1)
        layer2 = Dense(self.observation_size)(layer1)
        layer2 = Activation('relu')(layer2)
        layer3 = Dense(self.observation_size)(layer2)
        layer3 = Activation('relu')(layer3)
        layer4 = Dense(2 * self.action_size)(layer3)
        layer4 = Activation('relu')(layer4)
        output = Dense(self.action_size)(layer4)

        self.model = Model(inputs=[input_layer], outputs=[output])
        self.model.compile(loss='mse', optimizer=Adam(lr=self.lr_))

        self.target_model = Model(inputs=[input_layer], outputs=[output])
        self.target_model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
        self.target_model.set_weights(self.model.get_weights()) 
開發者ID:rte-france,項目名稱:Grid2Op,代碼行數:24,代碼來源:ml_agent.py

示例8: _build_q_NN

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Activation [as 別名]
def _build_q_NN(self):
        input_states = Input(shape=(self.observation_size,))
        input_action = Input(shape=(self.action_size,))
        input_layer = Concatenate()([input_states, input_action])
        
        lay1 = Dense(self.observation_size)(input_layer)
        lay1 = Activation('relu')(lay1)
        
        lay2 = Dense(self.observation_size)(lay1)
        lay2 = Activation('relu')(lay2)
        
        lay3 = Dense(2*self.action_size)(lay2)
        lay3 = Activation('relu')(lay3)
        
        advantage = Dense(1, activation = 'linear')(lay3)
        
        model = Model(inputs=[input_states, input_action], outputs=[advantage])
        model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
        
        return model 
開發者ID:rte-france,項目名稱:Grid2Op,代碼行數:22,代碼來源:ml_agent.py

示例9: create_model

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Activation [as 別名]
def create_model(trainable=False):
    model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA, weights="imagenet")

    for layer in model.layers:
        layer.trainable = trainable

    block = model.get_layer("block_16_project_BN").output

    x = Conv2D(112, padding="same", kernel_size=3, strides=1, activation="relu")(block)
    x = Conv2D(112, padding="same", kernel_size=3, strides=1, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Conv2D(5, padding="same", kernel_size=1, activation="sigmoid")(x)

    model = Model(inputs=model.input, outputs=x)

    # divide by 2 since d/dweight learning_rate * weight^2 = 2 * learning_rate * weight
    # see https://arxiv.org/pdf/1711.05101.pdf
    regularizer = l2(WEIGHT_DECAY / 2)
    for weight in model.trainable_weights:
        with tf.keras.backend.name_scope("weight_regularizer"):
            model.add_loss(regularizer(weight)) # in tf2.0: lambda: regularizer(weight)

    return model 
開發者ID:lars76,項目名稱:object-localization,代碼行數:27,代碼來源:train.py

示例10: conv_layer

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Activation [as 別名]
def conv_layer(inputs,
               filters=32,
               kernel_size=3,
               strides=1,
               use_maxpool=True,
               postfix=None,
               activation=None):
    """Helper function to build Conv2D-BN-ReLU layer
        with optional MaxPooling2D.
    """

    x = Conv2D(filters=filters,
               kernel_size=kernel_size,
               strides=strides,
               kernel_initializer='he_normal',
               name="conv_"+postfix,
               padding='same')(inputs)
    x = BatchNormalization(name="bn_"+postfix)(x)
    x = Activation('relu', name='relu_'+postfix)(x)
    if use_maxpool:
        x = MaxPooling2D(name='pool'+postfix)(x)
    return x 
開發者ID:PacktPublishing,項目名稱:Advanced-Deep-Learning-with-Keras,代碼行數:24,代碼來源:model.py

示例11: tconv_layer

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Activation [as 別名]
def tconv_layer(inputs,
                filters=32,
                kernel_size=3,
                strides=2,
                postfix=None):
    """Helper function to build Conv2DTranspose-BN-ReLU 
        layer
    """
    x = Conv2DTranspose(filters=filters,
                        kernel_size=kernel_size,
                        strides=strides,
                        padding='same',
                        kernel_initializer='he_normal',
                        name='tconv_'+postfix)(inputs)
    x = BatchNormalization(name="bn_"+postfix)(x)
    x = Activation('relu', name='relu_'+postfix)(x)
    return x 
開發者ID:PacktPublishing,項目名稱:Advanced-Deep-Learning-with-Keras,代碼行數:19,代碼來源:model.py

示例12: build_model

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Activation [as 別名]
def build_model(self,
                    input_dim,
                    hidden_units,
                    output_dim):
        """Build a simple MINE model
        
        Arguments:
            See class arguments.
        """
        inputs1 = Input(shape=(input_dim), name="x")
        inputs2 = Input(shape=(input_dim), name="y")
        x1 = Dense(hidden_units)(inputs1)
        x2 = Dense(hidden_units)(inputs2)
        x = Add()([x1, x2])
        x = Activation('relu', name="ReLU")(x)
        outputs = Dense(output_dim, name="MI")(x)
        inputs = [inputs1, inputs2]
        self._model = Model(inputs,
                            outputs,
                            name='MINE')
        self._model.summary() 
開發者ID:PacktPublishing,項目名稱:Advanced-Deep-Learning-with-Keras,代碼行數:23,代碼來源:mine-13.8.1.py

示例13: encoder_layer

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Activation [as 別名]
def encoder_layer(inputs,
                  filters=16,
                  kernel_size=3,
                  strides=2,
                  activation='relu',
                  instance_norm=True):
    """Builds a generic encoder layer made of Conv2D-IN-LeakyReLU
    IN is optional, LeakyReLU may be replaced by ReLU

    """

    conv = Conv2D(filters=filters,
                  kernel_size=kernel_size,
                  strides=strides,
                  padding='same')

    x = inputs
    if instance_norm:
        x = InstanceNormalization()(x)
    if activation == 'relu':
        x = Activation('relu')(x)
    else:
        x = LeakyReLU(alpha=0.2)(x)
    x = conv(x)
    return x 
開發者ID:PacktPublishing,項目名稱:Advanced-Deep-Learning-with-Keras,代碼行數:27,代碼來源:cyclegan-7.1.1.py

示例14: attention_3d_block

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Activation [as 別名]
def attention_3d_block(hidden_states):
    """
    Many-to-one attention mechanism for Keras.
    @param hidden_states: 3D tensor with shape (batch_size, time_steps, input_dim).
    @return: 2D tensor with shape (batch_size, 128)
    @author: felixhao28.
    """
    hidden_size = int(hidden_states.shape[2])
    # Inside dense layer
    #              hidden_states            dot               W            =>           score_first_part
    # (batch_size, time_steps, hidden_size) dot (hidden_size, hidden_size) => (batch_size, time_steps, hidden_size)
    # W is the trainable weight matrix of attention Luong's multiplicative style score
    score_first_part = Dense(hidden_size, use_bias=False, name='attention_score_vec')(hidden_states)
    #            score_first_part           dot        last_hidden_state     => attention_weights
    # (batch_size, time_steps, hidden_size) dot   (batch_size, hidden_size)  => (batch_size, time_steps)
    h_t = Lambda(lambda x: x[:, -1, :], output_shape=(hidden_size,), name='last_hidden_state')(hidden_states)
    score = dot([score_first_part, h_t], [2, 1], name='attention_score')
    attention_weights = Activation('softmax', name='attention_weight')(score)
    # (batch_size, time_steps, hidden_size) dot (batch_size, time_steps) => (batch_size, hidden_size)
    context_vector = dot([hidden_states, attention_weights], [1, 1], name='context_vector')
    pre_activation = concatenate([context_vector, h_t], name='attention_output')
    attention_vector = Dense(128, use_bias=False, activation='tanh', name='attention_vector')(pre_activation)
    return attention_vector 
開發者ID:philipperemy,項目名稱:keras-attention-mechanism,代碼行數:25,代碼來源:attention.py

示例15: load

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Activation [as 別名]
def load(input_shape, output_shape, cfg):
    nb_lstm_states = int(cfg['nb_lstm_states'])


    inputs = KL.Input(shape=input_shape)
    x = KL.CuDNNLSTM(units=nb_lstm_states, unit_forget_bias=True)(inputs)

    x = KL.Dense(512)(x)
    x = KL.Activation('relu')(x)
    x = KL.Dropout(0.2)(x)

    x = KL.Dense(256)(x)
    x = KL.Activation('relu')(x)
    x = KL.Dropout(0.3)(x)

    mu = KL.Dense(1)(x)
    std = KL.Dense(1)(x)
    activation_fn = get_activation_function_by_name(cfg['activation_function'])
    std = KL.Activation(activation_fn, name="exponential_activation")(std)

    output = KL.Concatenate(axis=-1)([std, mu])
    model = KM.Model(inputs=[inputs], outputs=[output])

    return model 
開發者ID:johnmartinsson,項目名稱:blood-glucose-prediction,代碼行數:26,代碼來源:lstm_experiment_keras.py


注:本文中的tensorflow.keras.layers.Activation方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。