当前位置: 首页>>代码示例>>Python>>正文


Python keras.layers方法代码示例

本文整理汇总了Python中keras.layers方法的典型用法代码示例。如果您正苦于以下问题:Python keras.layers方法的具体用法?Python keras.layers怎么用?Python keras.layers使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras的用法示例。


在下文中一共展示了keras.layers方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build_graph

# 需要导入模块: import keras [as 别名]
# 或者: from keras import layers [as 别名]
def build_graph(self):
        #keras.backend.clear_session() # clear session/graph    
        self.optimizer = keras.optimizers.Adam(lr=self.lr, decay=self.decay)

        self.model = Seq2Seq_MVE_subnets_swish(id_embd=True, time_embd=True,
            lr=self.lr, decay=self.decay,
            num_input_features=self.num_input_features, num_output_features=self.num_output_features,
            num_decoder_features=self.num_decoder_features, layers=self.layers,
            loss=self.loss, regulariser=self.regulariser)

        def _mve_loss(y_true, y_pred):
            pred_u = crop(2,0,3)(y_pred)
            pred_sig = crop(2,3,6)(y_pred)
            print(pred_sig)
            #exp_sig = tf.exp(pred_sig) # avoid pred_sig is too small such as zero    
            #precision = 1./exp_sig
            precision = 1./pred_sig
            #log_loss= 0.5*tf.log(exp_sig)+0.5*precision*((pred_u-y_true)**2)
            log_loss= 0.5*tf.log(pred_sig)+0.5*precision*((pred_u-y_true)**2)            
          
            log_loss=tf.reduce_mean(log_loss)
            return log_loss

        print(self.model.summary())
        self.model.compile(optimizer = self.optimizer, loss=_mve_loss) 
开发者ID:BruceBinBoxing,项目名称:Deep_Learning_Weather_Forecasting,代码行数:27,代码来源:competition_model_class.py

示例2: weather_ae

# 需要导入模块: import keras [as 别名]
# 或者: from keras import layers [as 别名]
def weather_ae(layers, lr, decay, loss, 
               input_len, input_features):
    
    inputs = Input(shape=(input_len, input_features), name='input_layer')
    
    for i, hidden_nums in enumerate(layers):
        if i==0:
            hn = Dense(hidden_nums, activation='relu')(inputs)
        else:
            hn = Dense(hidden_nums, activation='relu')(hn)

    outputs = Dense(3, activation='sigmoid', name='output_layer')(hn)

    weather_model = Model(inputs, outputs=[outputs])

    return weather_model 
开发者ID:BruceBinBoxing,项目名称:Deep_Learning_Weather_Forecasting,代码行数:18,代码来源:weather_model.py

示例3: test_ShapGradientExplainer

# 需要导入模块: import keras [as 别名]
# 或者: from keras import layers [as 别名]
def test_ShapGradientExplainer(self):

    #     model = VGG16(weights='imagenet', include_top=True)
    #     X, y = shap.datasets.imagenet50()
    #     to_explain = X[[39, 41]]
    #
    #     url = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json"
    #     fname = shap.datasets.cache(url)
    #     with open(fname) as f:
    #         class_names = json.load(f)
    #
    #     def map2layer(x, layer):
    #         feed_dict = dict(zip([model.layers[0].input], [preprocess_input(x.copy())]))
    #         return K.get_session().run(model.layers[layer].input, feed_dict)
    #
    #     e = GradientExplainer((model.layers[7].input, model.layers[-1].output),
    #                           map2layer(preprocess_input(X.copy()), 7))
    #     shap_values, indexes = e.explain_instance(map2layer(to_explain, 7), ranked_outputs=2)
    #
          print("Skipped Shap GradientExplainer") 
开发者ID:IBM,项目名称:AIX360,代码行数:22,代码来源:test_shap.py

示例4: _build_model

# 需要导入模块: import keras [as 别名]
# 或者: from keras import layers [as 别名]
def _build_model(self, num_features, num_actions, max_history_len):
        """Build a keras model and return a compiled model.

        :param max_history_len: The maximum number of historical
                                turns used to decide on next action
        """
        from keras.layers import LSTM, Activation, Masking, Dense
        from keras.models import Sequential

        n_hidden = 32  # Neural Net and training params
        batch_shape = (None, max_history_len, num_features)
        # Build Model
        model = Sequential()
        model.add(Masking(-1, batch_input_shape=batch_shape))
        model.add(LSTM(n_hidden, batch_input_shape=batch_shape))
        model.add(Dense(input_dim=n_hidden, units=num_actions))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

        logger.debug(model.summary())
        return model 
开发者ID:Rowl1ng,项目名称:rasa_wechat,代码行数:26,代码来源:keras_policy.py

示例5: Highway

# 需要导入模块: import keras [as 别名]
# 或者: from keras import layers [as 别名]
def Highway(x, num_layers=1, activation='relu', name_prefix=''):
    '''
    Layer wrapper function for Highway network
    # Arguments:
        x: tensor, shape = (B, input_size)
    # Optional Arguments:
        num_layers: int, dafault is 1, the number of Highway network layers
        activation: keras activation, default is 'relu'
        name_prefix: str, default is '', layer name prefix
    # Returns:
        out: tensor, shape = (B, input_size)
    '''
    input_size = K.int_shape(x)[1]
    for i in range(num_layers):
        gate_ratio_name = '{}Highway/Gate_ratio_{}'.format(name_prefix, i)
        fc_name = '{}Highway/FC_{}'.format(name_prefix, i)
        gate_name = '{}Highway/Gate_{}'.format(name_prefix, i)

        gate_ratio = Dense(input_size, activation='sigmoid', name=gate_ratio_name)(x)
        fc = Dense(input_size, activation=activation, name=fc_name)(x)
        x = Lambda(lambda args: args[0] * args[2] + args[1] * (1 - args[2]), name=gate_name)([fc, x, gate_ratio])
    return x 
开发者ID:tyo-yo,项目名称:SeqGAN,代码行数:24,代码来源:models.py

示例6: parse_args

# 需要导入模块: import keras [as 别名]
# 或者: from keras import layers [as 别名]
def parse_args():
    parser = argparse.ArgumentParser(description="Run MLP.")
    parser.add_argument('--path', nargs='?', default='Data/',
                        help='Input data path.')
    parser.add_argument('--dataset', nargs='?', default='ml-1m',
                        help='Choose a dataset.')
    parser.add_argument('--epochs', type=int, default=100,
                        help='Number of epochs.')
    parser.add_argument('--batch_size', type=int, default=256,
                        help='Batch size.')
    parser.add_argument('--layers', nargs='?', default='[64,32,16,8]',
                        help="Size of each layer. Note that the first layer is the concatenation of user and item embeddings. So layers[0]/2 is the embedding size.")
    parser.add_argument('--reg_layers', nargs='?', default='[0,0,0,0]',
                        help="Regularization for each layer")
    parser.add_argument('--num_neg', type=int, default=4,
                        help='Number of negative instances to pair with a positive instance.')
    parser.add_argument('--lr', type=float, default=0.001,
                        help='Learning rate.')
    parser.add_argument('--learner', nargs='?', default='adam',
                        help='Specify an optimizer: adagrad, adam, rmsprop, sgd')
    parser.add_argument('--verbose', type=int, default=1,
                        help='Show performance per X iterations')
    parser.add_argument('--out', type=int, default=1,
                        help='Whether to save the trained model.')
    return parser.parse_args() 
开发者ID:hexiangnan,项目名称:neural_collaborative_filtering,代码行数:27,代码来源:MLP.py

示例7: load_pretrain_model

# 需要导入模块: import keras [as 别名]
# 或者: from keras import layers [as 别名]
def load_pretrain_model(model, gmf_model, mlp_model, num_layers):
    # MF embeddings
    gmf_user_embeddings = gmf_model.get_layer('user_embedding').get_weights()
    gmf_item_embeddings = gmf_model.get_layer('item_embedding').get_weights()
    model.get_layer('mf_embedding_user').set_weights(gmf_user_embeddings)
    model.get_layer('mf_embedding_item').set_weights(gmf_item_embeddings)
    
    # MLP embeddings
    mlp_user_embeddings = mlp_model.get_layer('user_embedding').get_weights()
    mlp_item_embeddings = mlp_model.get_layer('item_embedding').get_weights()
    model.get_layer('mlp_embedding_user').set_weights(mlp_user_embeddings)
    model.get_layer('mlp_embedding_item').set_weights(mlp_item_embeddings)
    
    # MLP layers
    for i in xrange(1, num_layers):
        mlp_layer_weights = mlp_model.get_layer('layer%d' %i).get_weights()
        model.get_layer('layer%d' %i).set_weights(mlp_layer_weights)
        
    # Prediction weights
    gmf_prediction = gmf_model.get_layer('prediction').get_weights()
    mlp_prediction = mlp_model.get_layer('prediction').get_weights()
    new_weights = np.concatenate((gmf_prediction[0], mlp_prediction[0]), axis=0)
    new_b = gmf_prediction[1] + mlp_prediction[1]
    model.get_layer('prediction').set_weights([0.5*new_weights, 0.5*new_b])    
    return model 
开发者ID:hexiangnan,项目名称:neural_collaborative_filtering,代码行数:27,代码来源:NeuMF.py

示例8: get_shallow_convnet

# 需要导入模块: import keras [as 别名]
# 或者: from keras import layers [as 别名]
def get_shallow_convnet(window_size=4096, channels=1, output_size=84):
    model = keras.models.Sequential()
    model.add(keras.layers.Conv1D(
        64, 512, strides=16, input_shape=(window_size, channels),
        activation='relu',
        kernel_initializer='glorot_normal'))
    model.add(keras.layers.MaxPooling1D(pool_size=4, strides=2))

    model.add(keras.layers.Flatten())
    model.add(keras.layers.Dense(2048, activation='relu',
                                 kernel_initializer='glorot_normal'))
    model.add(keras.layers.Dense(output_size, activation='sigmoid',
                                 bias_initializer=keras.initializers.Constant(value=-5)))
    model.compile(optimizer=keras.optimizers.Adam(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model 
开发者ID:ChihebTrabelsi,项目名称:deep_complex_networks,代码行数:19,代码来源:__init__.py

示例9: _get_softmax_name

# 需要导入模块: import keras [as 别名]
# 或者: from keras import layers [as 别名]
def _get_softmax_name(self):
        """
        Looks for the name of the softmax layer.
        :return: Softmax layer name
        """
        for i, layer in enumerate(self.model.layers):
            cfg = layer.get_config()
            if 'activation' in cfg and cfg['activation'] == 'softmax':
                return layer.name

        raise Exception("No softmax layers found") 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:13,代码来源:utils_keras.py

示例10: get_layer_names

# 需要导入模块: import keras [as 别名]
# 或者: from keras import layers [as 别名]
def get_layer_names(self):
        """
        :return: Names of all the layers kept by Keras
        """
        layer_names = [x.name for x in self.model.layers]
        return layer_names 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:8,代码来源:utils_keras.py

示例11: fprop

# 需要导入模块: import keras [as 别名]
# 或者: from keras import layers [as 别名]
def fprop(self, x):
        """
        Exposes all the layers of the model returned by get_layer_names.
        :param x: A symbolic representation of the network input
        :return: A dictionary mapping layer names to the symbolic
                 representation of their output.
        """
        from keras.models import Model as KerasModel

        if self.keras_model is None:
            # Get the input layer
            new_input = self.model.get_input_at(0)

            # Make a new model that returns each of the layers as output
            out_layers = [x_layer.output for x_layer in self.model.layers]
            self.keras_model = KerasModel(new_input, out_layers)

        # and get the outputs for that model on the input x
        outputs = self.keras_model(x)

        # Keras only returns a list for outputs of length >= 1, if the model
        # is only one layer, wrap a list
        if len(self.model.layers) == 1:
            outputs = [outputs]

        # compute the dict to return
        fprop_dict = dict(zip(self.get_layer_names(), outputs))

        return fprop_dict 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:31,代码来源:utils_keras.py

示例12: weather_conv1D

# 需要导入模块: import keras [as 别名]
# 或者: from keras import layers [as 别名]
def weather_conv1D(layers, lr, decay, loss, 
               input_len, input_features, 
               strides_len, kernel_size):
    
    inputs = Input(shape=(input_len, input_features), name='input_layer')
    for i, hidden_nums in enumerate(layers):
        if i==0:
            #inputs = BatchNormalization(name='BN_input')(inputs)
            hn = Conv1D(hidden_nums, kernel_size=kernel_size, strides=strides_len, 
                        data_format='channels_last', 
                        padding='same', activation='linear')(inputs)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn)
            hn = Activation('relu')(hn)
        elif i<len(layers)-1:
            hn = Conv1D(hidden_nums, kernel_size=kernel_size, strides=strides_len,
                        data_format='channels_last', 
                        padding='same',activation='linear')(hn)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn) 
            hn = Activation('relu')(hn)
        else:
            hn = Conv1D(hidden_nums, kernel_size=kernel_size, strides=strides_len,
                        data_format='channels_last', 
                        padding='same',activation='linear')(hn)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn) 

    outputs = Dense(80, activation='relu', name='dense_layer')(hn)
    outputs = Dense(3, activation='tanh', name='output_layer')(outputs)

    weather_model = Model(inputs, outputs=[outputs])

    return weather_model 
开发者ID:BruceBinBoxing,项目名称:Deep_Learning_Weather_Forecasting,代码行数:33,代码来源:weather_model.py

示例13: weather_fnn

# 需要导入模块: import keras [as 别名]
# 或者: from keras import layers [as 别名]
def weather_fnn(layers, lr,
            decay, loss, seq_len, 
            input_features, output_features):
    
    ori_inputs = Input(shape=(seq_len, input_features), name='input_layer')
    #print(seq_len*input_features)
    conv_ = Conv1D(11, kernel_size=13, strides=1, 
                        data_format='channels_last', 
                        padding='valid', activation='linear')(ori_inputs)
    conv_ = BatchNormalization(name='BN_conv')(conv_)
    conv_ = Activation('relu')(conv_)
    conv_ = Conv1D(5, kernel_size=7, strides=1, 
                        data_format='channels_last', 
                        padding='valid', activation='linear')(conv_)
    conv_ = BatchNormalization(name='BN_conv2')(conv_)
    conv_ = Activation('relu')(conv_)

    inputs = Reshape((-1,))(conv_)

    for i, hidden_nums in enumerate(layers):
        if i==0:
            hn = Dense(hidden_nums, activation='linear')(inputs)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn)
            hn = Activation('relu')(hn)
        else:
            hn = Dense(hidden_nums, activation='linear')(hn)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn)
            hn = Activation('relu')(hn)
            #hn = Dropout(0.1)(hn)
    #print(seq_len, output_features)
    #print(hn)
    outputs = Dense(seq_len*output_features, activation='sigmoid', name='output_layer')(hn) # 37*3
    outputs = Reshape((seq_len, output_features))(outputs)

    weather_fnn = Model(ori_inputs, outputs=[outputs])

    return weather_fnn 
开发者ID:BruceBinBoxing,项目名称:Deep_Learning_Weather_Forecasting,代码行数:39,代码来源:weather_model.py

示例14: build_graph

# 需要导入模块: import keras [as 别名]
# 或者: from keras import layers [as 别名]
def build_graph(self):
        #keras.backend.clear_session() # clear session/graph    
        self.optimizer = keras.optimizers.Adam(lr=self.lr, decay=self.decay)

        self.model = Seq2Seq_MVE(id_embd=self.id_embd, time_embd=self.time_embd,
            lr=self.lr, decay=self.decay, 
            num_input_features=self.num_input_features, num_output_features=self.num_output_features,
            num_decoder_features=self.num_decoder_features, layers=self.layers,
            loss=self.loss, regulariser=self.regulariser, dropout_rate = self.dropout_rate)

        def loss_fn(y_true, y_pred):
            pred_u = crop(2,0,3)(y_pred) # mean of Gaussian distribution
            pred_sig = crop(2,3,6)(y_pred) # variance of Gaussian distribution
            if self.loss == 'mve':
                precision = 1./pred_sig
                log_loss= 0.5*tf.log(pred_sig)+0.5*precision*((pred_u-y_true)**2)                                 
                log_loss=tf.reduce_mean(log_loss)
                return log_loss
            elif self.loss == 'mse':
                mse_loss = tf.reduce_mean((pred_u-y_true)**2)
                return mse_loss
            elif self.loss == 'mae':
                mae_loss = tf.reduce_mean(tf.abs(y_true-pred_u))
                return mae_loss
            else:
                sys.exit("'Loss type wrong! They can only be mae, mse or mve'")
                
        print(self.model.summary())
        self.model.compile(optimizer = self.optimizer, loss=loss_fn) 
开发者ID:BruceBinBoxing,项目名称:Deep_Learning_Weather_Forecasting,代码行数:31,代码来源:seq2seq_class.py

示例15: __init__

# 需要导入模块: import keras [as 别名]
# 或者: from keras import layers [as 别名]
def __init__(self, regulariser,lr, decay, loss, 
        layers, batch_size, seq_len, input_features, output_features):

        self.regulariser=regulariser
        self.layers=layers
        self.lr=lr
        self.decay=decay
        self.loss=loss
        self.seq_len=seq_len
        self.input_features=input_features
        self.output_features = output_features 
开发者ID:BruceBinBoxing,项目名称:Deep_Learning_Weather_Forecasting,代码行数:13,代码来源:competition_model_class.py


注:本文中的keras.layers方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。