當前位置: 首頁>>代碼示例>>Python>>正文


Python layers.ELU屬性代碼示例

本文整理匯總了Python中keras.layers.ELU屬性的典型用法代碼示例。如果您正苦於以下問題:Python layers.ELU屬性的具體用法?Python layers.ELU怎麽用?Python layers.ELU使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在keras.layers的用法示例。


在下文中一共展示了layers.ELU屬性的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_model

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import ELU [as 別名]
def get_model(time_len=1):
  ch, row, col = 3, 160, 320  # camera format

  model = Sequential()
  model.add(Lambda(lambda x: x/127.5 - 1.,
            input_shape=(ch, row, col),
            output_shape=(ch, row, col)))
  model.add(Convolution2D(16, 8, 8, subsample=(4, 4), border_mode="same"))
  model.add(ELU())
  model.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode="same"))
  model.add(ELU())
  model.add(Convolution2D(64, 5, 5, subsample=(2, 2), border_mode="same"))
  model.add(Flatten())
  model.add(Dropout(.2))
  model.add(ELU())
  model.add(Dense(512))
  model.add(Dropout(.5))
  model.add(ELU())
  model.add(Dense(1))

  model.compile(optimizer="adam", loss="mse")

  return model 
開發者ID:commaai,項目名稱:research,代碼行數:25,代碼來源:train_steering_model.py

示例2: generate_dense_model

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import ELU [as 別名]
def generate_dense_model(input_shape, layers, nb_actions):
    model = Sequential()
    model.add(Flatten(input_shape=input_shape))
    model.add(Dropout(0.1))  # drop out the input to make model less sensitive to any 1 feature

    for layer in layers:
        model.add(Dense(layer))
        model.add(BatchNormalization())
        model.add(ELU(alpha=1.0))

    model.add(Dense(nb_actions))
    model.add(Activation('linear'))
    print(model.summary())

    return model 
開發者ID:endgameinc,項目名稱:gym-malware,代碼行數:17,代碼來源:train_agent_kerasrl.py

示例3: Encoder

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import ELU [as 別名]
def Encoder(hidden_size, activation=None, return_sequences=True, bidirectional=False, use_gru=True):
    if activation is None:
        activation = ELU()
    if use_gru:
        def _encoder(x):
            if bidirectional:
                branch_1 = GRU(int(hidden_size/2), activation='linear',
                               return_sequences=return_sequences, go_backwards=False)(x)
                branch_2 = GRU(int(hidden_size/2), activation='linear',
                               return_sequences=return_sequences, go_backwards=True)(x)
                x = concatenate([branch_1, branch_2])
                x = activation(x)
                return x
            else:
                x = GRU(hidden_size, activation='linear',
                        return_sequences=return_sequences)(x)
                x = activation(x)
                return x
    else:
        def _encoder(x):
            if bidirectional:
                branch_1 = LSTM(int(hidden_size/2), activation='linear',
                                return_sequences=return_sequences, go_backwards=False)(x)
                branch_2 = LSTM(int(hidden_size/2), activation='linear',
                                return_sequences=return_sequences, go_backwards=True)(x)
                x = concatenate([branch_1, branch_2])
                x = activation(x)
                return x
            else:
                x = LSTM(hidden_size, activation='linear',
                         return_sequences=return_sequences)(x)
                x = activation(x)
                return x
    return _encoder 
開發者ID:saurabhmathur96,項目名稱:Neural-Chatbot,代碼行數:36,代碼來源:sequence_blocks.py

示例4: AttentionDecoder

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import ELU [as 別名]
def AttentionDecoder(hidden_size, activation=None, return_sequences=True, bidirectional=False, use_gru=True):
    if activation is None:
        activation = ELU()
    if use_gru:
        def _decoder(x, attention):
            if bidirectional:
                branch_1 = AttentionWrapper(GRU(int(hidden_size/2), activation='linear', return_sequences=return_sequences,
                                                go_backwards=False), attention, single_attention_param=True)(x)
                branch_2 = AttentionWrapper(GRU(int(hidden_size/2), activation='linear', return_sequences=return_sequences,
                                                go_backwards=True), attention, single_attention_param=True)(x)
                x = concatenate([branch_1, branch_2])
                return activation(x)
            else:
                x = AttentionWrapper(GRU(hidden_size, activation='linear',
                                         return_sequences=return_sequences), attention, single_attention_param=True)(x)
                x = activation(x)
                return x
    else:
        def _decoder(x, attention):
            if bidirectional:
                branch_1 = AttentionWrapper(LSTM(int(hidden_size/2), activation='linear', return_sequences=return_sequences,
                                                 go_backwards=False), attention, single_attention_param=True)(x)
                branch_2 = AttentionWrapper(LSTM(hidden_size, activation='linear', return_sequences=return_sequences,
                                                go_backwards=True), attention, single_attention_param=True)(x)
                x = concatenate([branch_1, branch_2])
                x = activation(x)
                return x
            else:
                x = AttentionWrapper(LSTM(hidden_size, activation='linear', return_sequences=return_sequences),
                                     attention, single_attention_param=True)(x)
                x = activation(x)
                return x

    return _decoder 
開發者ID:saurabhmathur96,項目名稱:Neural-Chatbot,代碼行數:36,代碼來源:sequence_blocks.py

示例5: Decoder

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import ELU [as 別名]
def Decoder(hidden_size, activation=None, return_sequences=True, bidirectional=False, use_gru=True):
    if activation is None:
        activation = ELU()
    if use_gru:
        def _decoder(x):
            if bidirectional:
                x = Bidirectional(
                    GRU(int(hidden_size/2), activation='linear', return_sequences=return_sequences))(x)
                x = activation(x)
                return x
            else:
                x = GRU(hidden_size, activation='linear',
                        return_sequences=return_sequences)(x)
                x = activation(x)
                return x
    else:
        def _decoder(x):
            if bidirectional:
                x = Bidirectional(
                    LSTM(int(hidden_size/2), activation='linear', return_sequences=return_sequences))(x)
                x = activation(x)
                return x
            else:
                x = LSTM(hidden_size, activation='linear',
                         return_sequences=return_sequences)(x)
                x = activation(x)
                return x
    return _decoder 
開發者ID:saurabhmathur96,項目名稱:Neural-Chatbot,代碼行數:30,代碼來源:sequence_blocks.py

示例6: __init__

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import ELU [as 別名]
def __init__(self):
        super(ELUNet, self).__init__()
        self.elu = nn.ELU() 
開發者ID:gzuidhof,項目名稱:nn-transfer,代碼行數:5,代碼來源:test_layers.py

示例7: test_elu

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import ELU [as 別名]
def test_elu(self):
        keras_model = Sequential()
        keras_model.add(ELU(input_shape=(3, 32, 32), name='elu'))
        keras_model.compile(loss=keras.losses.categorical_crossentropy,
                            optimizer=keras.optimizers.SGD())

        pytorch_model = ELUNet()

        self.transfer(keras_model, pytorch_model)
        self.assertEqualPrediction(keras_model, pytorch_model, self.test_data)

    # Tests activation function with learned parameters 
開發者ID:gzuidhof,項目名稱:nn-transfer,代碼行數:14,代碼來源:test_layers.py

示例8: convresblock

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import ELU [as 別名]
def convresblock(x, nfeats=8, ksize=3, nskipped=2, elu=True):
    """The proposed residual block from [4].

    Running with elu=True will use ELU nonlinearity and running with
    elu=False will use BatchNorm + RELU nonlinearity.  While ELU's are fast
    due to the fact they do not suffer from BatchNorm overhead, they may
    overfit because they do not offer the stochastic element of the batch
    formation process of BatchNorm, which acts as a good regularizer.

    # Arguments
        x: 4D tensor, the tensor to feed through the block
        nfeats: Integer, number of feature maps for conv layers.
        ksize: Integer, width and height of conv kernels in first convolution.
        nskipped: Integer, number of conv layers for the residual function.
        elu: Boolean, whether to use ELU or BN+RELU.

    # Input shape
        4D tensor with shape:
        `(batch, channels, rows, cols)`

    # Output shape
        4D tensor with shape:
        `(batch, filters, rows, cols)`
    """
    y0 = Conv2D(nfeats, ksize, padding='same')(x)
    y = y0
    for i in range(nskipped):
        if elu:
            y = ELU()(y)
        else:
            y = BatchNormalization(axis=1)(y)
            y = Activation('relu')(y)
        y = Conv2D(nfeats, 1, padding='same')(y)
    return layers.add([y0, y]) 
開發者ID:hello-sea,項目名稱:DeepLearning_Wavelet-LSTM,代碼行數:36,代碼來源:mnist_swwae.py

示例9: test_elu

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import ELU [as 別名]
def test_elu():
    for alpha in [0., .5, -1.]:
        layer_test(layers.ELU, kwargs={'alpha': alpha},
                   input_shape=(2, 3, 4)) 
開發者ID:hello-sea,項目名稱:DeepLearning_Wavelet-LSTM,代碼行數:6,代碼來源:advanced_activations_test.py

示例10: create_model

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import ELU [as 別名]
def create_model(input_shape, hidden_layers=[1024, 512, 256], input_dropout=0.1, hidden_dropout=0.5):
    '''Define a simple multilayer perceptron.

    Args:
        input_shape (tuple): input shape to the model. For this model, should be of shape (dim,)
        input_dropout (float): fraction of input features to drop out during training
        hidden_layers (tuple): a tuple/list with number of hidden units in each hidden layer

    Returns:
        keras.models.Sequential : a model to train
    '''
    model = Sequential()

    # dropout the input to prevent overfitting to any one feature
    # (a similar concept to randomization in random forests,
    #   but we choose less severe feature sampling  )
    model.add(Dropout(input_dropout, input_shape=input_shape))

    # set up hidden layers
    for n_hidden_units in hidden_layers:
        # the layer...activation will come later
        model.add(Dense(n_hidden_units))
        # dropout to prevent overfitting
        model.add(Dropout(hidden_dropout))
        # batchnormalization helps training
        model.add(BatchNormalization())
        # ...the activation!
        model.add(ELU())

    # the output layer
    model.add(Dense(1, activation='sigmoid'))

    # we'll optimize with plain old sgd
    model.compile(loss='binary_crossentropy',
                  optimizer='sgd', metrics=['accuracy'])

    return model 
開發者ID:endgameinc,項目名稱:youarespecial,代碼行數:39,代碼來源:simple_multilayer.py

示例11: ResidualBlock1D_helper

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import ELU [as 別名]
def ResidualBlock1D_helper(layers, kernel_size, filters, final_stride=1):
    def f(_input):
        basic = _input
        for ln in range(layers):
            #basic = BatchNormalization()( basic ) # triggers known keras bug w/ TimeDistributed: https://github.com/fchollet/keras/issues/5221
            basic = ELU()(basic)  
            basic = Conv1D(filters, kernel_size, kernel_initializer='he_normal',
                           kernel_regularizer=l2(1.e-4), padding='same')(basic)

        # note that this strides without averaging
        return AveragePooling1D(pool_size=1, strides=final_stride)(Add()([_input, basic]))

    return f 
開發者ID:endgameinc,項目名稱:youarespecial,代碼行數:15,代碼來源:malwaresnet.py

示例12: build_model

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import ELU [as 別名]
def build_model(output_size):
    channel_axis = 3
    freq_axis = 1
    padding = 37

    input_shape = (img_height, img_width, channels)
    print('Building model...')

    model = Sequential()
    #model.add(ZeroPadding2D(padding=(0, padding), data_format='channels_last', input_shape=input_shape))
    #model.add(BatchNormalization(axis=freq_axis, name='bn_0_freq'))

    #model.add(Conv2D(64, (3, 3), padding='same', name='conv1'))
    #model.add(BatchNormalization(axis=channel_axis, name='bn1'))
    #model.add(ELU())
    #model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1'))
    #model.add(Dropout(0.1, name='dropout1'))

    #model.add(Conv2D(128, (3, 3), padding='same', name='conv2'))
    #model.add(BatchNormalization(axis=channel_axis, name='bn2'))
    #model.add(ELU())
    #model.add(MaxPooling2D(pool_size=(3, 3), strides=(3, 3), name='pool2'))
    #model.add(Dropout(0.1, name='dropout2'))

    #model.add(Conv2D(128, (3, 3), padding='same', name='conv3'))
    #model.add(BatchNormalization(axis=channel_axis, name='bn3'))
    #model.add(ELU())
    #model.add(MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool3'))
    #model.add(Dropout(0.1, name='dropout3'))

    #model.add(Conv2D(128, (3, 3), padding='same', name='conv4'))
    #model.add(BatchNormalization(axis=channel_axis, name='bn4'))
    #model.add(ELU())
    #model.add(MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool4'))
    #model.add(Dropout(0.1, name='dropout4'))

    #model.add(Reshape(target_shape=(15, 128)))

    #model.add(GRU(32, return_sequences=True, name='gru1'))
    #model.add(GRU(32, return_sequences=False, name='gru2'))

    #model.add(Dropout(0.3, name='dropout_final'))

    model.add(Reshape(target_shape=(img_height * img_width,), input_shape=input_shape))
    model.add(Dense(output_size, activation='softmax', name='output', input_shape=input_shape))

    return model 
開發者ID:kristijanbartol,項目名稱:Deep-Music-Tagger,代碼行數:49,代碼來源:train.py

示例13: build_model

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import ELU [as 別名]
def build_model(output_size):
    channel_axis = 3
    freq_axis = 1
    padding = 37

    input_shape = (img_height, img_width, channels)
    print('Building model...')

    model = Sequential()
    model.add(ZeroPadding2D(padding=(0, padding), data_format='channels_last', input_shape=input_shape))
    model.add(BatchNormalization(axis=freq_axis, name='bn_0_freq'))

    model.add(Conv2D(64, (3, 3), padding='same', name='conv1'))
    model.add(BatchNormalization(axis=channel_axis, name='bn1'))
    model.add(ELU())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1'))
    model.add(Dropout(0.1, name='dropout1'))

    model.add(Conv2D(128, (3, 3), padding='same', name='conv2'))
    model.add(BatchNormalization(axis=channel_axis, name='bn2'))
    model.add(ELU())
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(3, 3), name='pool2'))
    model.add(Dropout(0.1, name='dropout2'))

    model.add(Conv2D(128, (3, 3), padding='same', name='conv3'))
    model.add(BatchNormalization(axis=channel_axis, name='bn3'))
    model.add(ELU())
    model.add(MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool3'))
    model.add(Dropout(0.1, name='dropout3'))

    model.add(Conv2D(128, (3, 3), padding='same', name='conv4'))
    model.add(BatchNormalization(axis=channel_axis, name='bn4'))
    model.add(ELU())
    model.add(MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool4'))
    model.add(Dropout(0.1, name='dropout4'))

    model.add(Reshape(target_shape=(15, 128)))

    model.add(GRU(32, return_sequences=True, name='gru1'))
    model.add(GRU(32, return_sequences=False, name='gru2'))

    model.add(Dropout(0.3, name='dropout_final'))

    model.add(Dense(output_size, activation='softmax', name='output'))

    return model 
開發者ID:kristijanbartol,項目名稱:Deep-Music-Tagger,代碼行數:48,代碼來源:train.py

示例14: buildModel

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import ELU [as 別名]
def buildModel(cameraFormat=(3, 480, 640)):
  """
  Build and return a CNN; details in the comments.
  The intent is a scaled down version of the model from "End to End Learning
  for Self-Driving Cars": https://arxiv.org/abs/1604.07316.

  Args:
    cameraFormat: (3-tuple) Ints to specify the input dimensions (color
        channels, rows, columns).
  Returns:
    A compiled Keras model.
  """
  print "Building model..."
  ch, row, col = cameraFormat

  model = Sequential()

  # Use a lambda layer to normalize the input data
  model.add(Lambda(
      lambda x: x/127.5 - 1.,
      input_shape=(ch, row, col),
      output_shape=(ch, row, col))
  )

  # Several convolutional layers, each followed by ELU activation
  # 8x8 convolution (kernel) with 4x4 stride over 16 output filters
  model.add(Convolution2D(16, 8, 8, subsample=(4, 4), border_mode="same"))
  model.add(ELU())
  # 5x5 convolution (kernel) with 2x2 stride over 32 output filters
  model.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode="same"))
  model.add(ELU())
  # 5x5 convolution (kernel) with 2x2 stride over 64 output filters
  model.add(Convolution2D(64, 5, 5, subsample=(2, 2), border_mode="same"))
  # Flatten the input to the next layer
  model.add(Flatten())
  # Apply dropout to reduce overfitting
  model.add(Dropout(.2))
  model.add(ELU())
  # Fully connected layer
  model.add(Dense(512))
  # More dropout
  model.add(Dropout(.5))
  model.add(ELU())
  # Fully connected layer with one output dimension (representing the speed).
  model.add(Dense(1))

  # Adam optimizer is a standard, efficient SGD optimization method
  # Loss function is mean squared error, standard for regression problems
  model.compile(optimizer="adam", loss="mse")

  return model 
開發者ID:BoltzmannBrain,項目名稱:self-driving,代碼行數:53,代碼來源:cnn_prediction.py


注:本文中的keras.layers.ELU屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。