當前位置: 首頁>>代碼示例>>Python>>正文


Python engine.Model方法代碼示例

本文整理匯總了Python中keras.engine.Model方法的典型用法代碼示例。如果您正苦於以下問題:Python engine.Model方法的具體用法?Python engine.Model怎麽用?Python engine.Model使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.engine的用法示例。


在下文中一共展示了engine.Model方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: build_discriminator

# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Model [as 別名]
def build_discriminator(config: BEGANConfig, autoencoder: Container):
    """
    
    Keras Model class is able to have several inputs/outputs. 
    But, loss functions should be defined each other, and the loss function cannot reference other inputs/outputs.
    For computing loss, two inputs/outputs are concatenated.
    """
    # IN Shape: [ImageHeight, ImageWidth, (real data(3 channels) + generated data(3 channels))]
    in_out_shape = (config.image_height, config.image_width, 3 * 2)
    all_input = Input(in_out_shape)

    # Split Input Data
    data_input = Lambda(lambda x: x[:, :, :, 0:3], output_shape=(config.image_height, config.image_width, 3))(all_input)
    generator_input = Lambda(lambda x: x[:, :, :, 3:6], output_shape=(config.image_height, config.image_width, 3))(all_input)

    # use same autoencoder(weights are shared)
    data_output = autoencoder(data_input)  # (bs, row, col, ch)
    generator_output = autoencoder(generator_input)

    # concatenate output to be same shape of input
    all_output = Concatenate(axis=-1)([data_output, generator_output])

    discriminator = DiscriminatorModel(all_input, all_output, name="discriminator")
    return discriminator 
開發者ID:mokemokechicken,項目名稱:keras_BEGAN,代碼行數:26,代碼來源:models.py

示例2: __init__

# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Model [as 別名]
def __init__(self, inputs, outputs, greedy=True, beam_width=100, top_paths=1, charset=None):
        """
        Initialization of a CTC Model. 
        :param inputs: Input layer of the neural network
            outputs: Last layer of the neural network before CTC (e.g. a TimeDistributed Dense)
            greedy, beam_width, top_paths: Parameters of the CTC decoding (see ctc decoding tensorflow for more details)
            charset: labels related to the input of the CTC approach
        """
        self.model_train = None
        self.model_pred = None
        self.model_eval = None
        if not isinstance(inputs, list):
            self.inputs = [inputs]
        else:
            self.inputs = inputs
        if not isinstance(outputs, list):
            self.outputs = [outputs]
        else:
            self.outputs = outputs

        self.greedy = greedy
        self.beam_width = beam_width
        self.top_paths = top_paths
        self.charset = charset 
開發者ID:ysoullard,項目名稱:CTCModel,代碼行數:26,代碼來源:CTCModel.py

示例3: get_probas_on_batch

# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Model [as 別名]
def get_probas_on_batch(self, inputs, verbose=False):
        """
        Get the probabilities of each label at each time of an observation sequence (matrix T x D)
        This is the output of the softmax function after the recurrent layers (the input of the CTC computations)

        Computation is done for a batch. This function does not exist in a Keras Model.

        :return: A set of probabilities for each sequence and each time frame, one probability per label + the blank
            (this is the output of the TimeDistributed Dense layer, the blank label is the last probability)
        """

        x = inputs[0]
        x_len = inputs[2]
        batch_size = x.shape[0]

        #  Find the output of the softmax function
        probs = self.model_init.predict(x, batch_size=batch_size, verbose=verbose)

        # Select the outputs that do not refer to padding
        probs_epoch = [np.asarray(probs[data_idx, :x_len[data_idx][0], :]) for data_idx in range(batch_size)]

        return probs_epoch 
開發者ID:ysoullard,項目名稱:CTCModel,代碼行數:24,代碼來源:CTCModel.py

示例4: get_probas

# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Model [as 別名]
def get_probas(self, inputs, batch_size, verbose=False):
        """
        Get the probabilities of each label at each time of an observation sequence (matrix T x D)
        This is the output of the softmax function after the recurrent layers (the input of the CTC computations)

        Computation is done for a batch. This function does not exist in a Keras Model.

        :return: A set of probabilities for each sequence and each time frame, one probability per label + the blank
            (this is the output of the TimeDistributed Dense layer, the blank label is the last probability)
        """

        x = inputs[0]
        x_len = inputs[2]

        #  Find the output of the softmax function
        probs = self.model_init.predict(x, batch_size=batch_size, verbose=verbose)

        # Select the outputs that do not refer to padding
        probs_epoch = [np.asarray(probs[data_idx, :x_len[data_idx][0], :]) for data_idx in range(batch_size)]

        return probs_epoch 
開發者ID:ysoullard,項目名稱:CTCModel,代碼行數:23,代碼來源:CTCModel.py

示例5: segmentor

# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Model [as 別名]
def segmentor(self, start_filters=64, filter_inc_rate=2, out_ch=1, depth=2):
        """
        Creates recursively a segmentor model a.k.a. generator in GAN literature
        """
        inp = Input(shape=self.shape)
        first_block = convl1_lrelu(inp, start_filters, 4, 2)
        middle_blocks = level_block(first_block, int(start_filters * 2), depth=depth,
                                    filter_inc_rate=filter_inc_rate, p=0.1)
        if self.softmax:
            last_block = upsampl_softmax(middle_blocks, out_ch+1, 3, 1, 2, self.max_project) # out_ch+1, because softmax needs crossentropy
        else:
            last_block = upsampl_conv(middle_blocks, out_ch, 3, 1, 2)
        if self.crop:
            out = multiply([inp, last_block])  # crop input with predicted mask
            return Model([inp], [out], name='segmentor_net')
        return Model([inp], [last_block], name='segmentor_net')
        #return Model([inp], [last_block], name='segmentor_net') 
開發者ID:iNLyze,項目名稱:DeepLearning-SeGAN-Segmentation,代碼行數:19,代碼來源:SeGAN.py

示例6: critic

# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Model [as 別名]
def critic(self):
        """
        Creates a critic a.k.a. discriminator model
        """
        # Note: Future improvement is to provide definable depth of critic
        inp_cropped = Input(self.shape, name='inp_cropped_image')  # Data cropped with generated OR g.t. mask


        shared_1 = shared_convl1_lrelu(self.shape, 64, 4, 2, name='shared_1_conv_lrelu')
        shared_2 = shared_convl1_bn_lrelu((16, 16, 64), 128, 4, 2, name='shared_2_conv_bn_lrelu')
        shared_3 = shared_convl1_bn_lrelu((8, 8, 128), 256, 4, 2, name='shared_3_conv_bn_lrelu')
        shared_4 = shared_convl1_bn_lrelu((4, 4, 256), 512, 4, 2, name='shared_4_conv_bn_lrelu')

        x1_S = shared_1(inp_cropped)
        #x1_S = shared_1(multiply([inp, mask]))
        x2_S = shared_2(x1_S)
        x3_S = shared_3(x2_S)
        x4_S = shared_4(x3_S)
        features = Concatenate(name='features_S')(
            [Flatten()(inp_cropped), Flatten()(x1_S), Flatten()(x2_S), Flatten()(x3_S), Flatten()(x4_S)]
            #[Flatten()(inp), Flatten()(x1_S), Flatten()(x2_S), Flatten()(x3_S), Flatten()(x4_S)]
        )
        return Model(inp_cropped, features, name='critic_net')
        #return Model([inp, mask], features, name='critic_net') 
開發者ID:iNLyze,項目名稱:DeepLearning-SeGAN-Segmentation,代碼行數:26,代碼來源:SeGAN.py

示例7: define

# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Model [as 別名]
def define(self, n_output: int=2, dropout: float=1., base_model=None):
        """
        Define model architecture for eyes_fcscratch
        :param n_output: number of network outputs
        :param dropout: dropout value
        :param base_model: Base model whose architecture and weights are used for convolutional blocks.
        """

        hidden_dim = 1536
        image_input = Input(shape=base_model.input_size[input_type.EYES], name='input')

        # Load base model without FC layers
        base = base_model.load_model(input_tensor=image_input, include_top=False)

        weight_init = glorot_uniform(seed=3)

        # Define architecture on top of base model
        last_layer = base.get_layer('pool5').output
        x = Flatten(name='flatten')(last_layer)
        x = Dense(hidden_dim, activation='relu', kernel_initializer=weight_init, name='fc6')(x)
        if dropout < 1.:
            x = Dropout(dropout, seed=0, name='dp6')(x)
        out = Dense(n_output, kernel_initializer=weight_init, name='fc8')(x)

        # First for layers are not trained
        for layer in base.layers[:4]:
            layer.trainable = False

        self.model = Model([image_input], out)

        print(len(self.model.layers))
        print([n.name for n in self.model.layers])

        # Print model summary
        self.model.summary() 
開發者ID:crisie,項目名稱:RecurrentGaze,代碼行數:37,代碼來源:eyes_fcscratch.py

示例8: define

# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Model [as 別名]
def define(self, n_output: int=2, dropout: float=1., base_model=None):
        """
        Define model architecture for face_finetune
        :param n_output: number of network outputs
        :param dropout: dropout value
        :param base_model: Base model whose architecture and weights are used for all network except last FC layer.
        """

        image_input = Input(shape=base_model.input_size[input_type.FACE], name='input')
        weight_init = glorot_uniform(seed=3)

        # Load model with FC layers
        base = base_model.load_model(input_tensor=image_input, include_top=True)

        last_layer = base.get_layer('fc6/relu').output
        fc7 = base.get_layer('fc7')
        fc7r = base.get_layer('fc7/relu')
        x = last_layer

        if dropout < 1.:
            x = Dropout(dropout, seed=0, name='dp6')(x)
        x = fc7(x)
        x = fc7r(x)
        if dropout < 1.:
            x = Dropout(dropout, seed=1, name='dp7')(x)
        out = Dense(n_output, kernel_initializer=weight_init, name='fc8')(x)

        # Freeze first conv layers
        for layer in base.layers[:4]:
            layer.trainable = False

        self.model = Model(image_input, out)

        # Print model summary
        self.model.summary() 
開發者ID:crisie,項目名稱:RecurrentGaze,代碼行數:37,代碼來源:face_finetune.py

示例9: get_model

# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Model [as 別名]
def get_model(base_model, 
              layer, 
              lr=1e-3, 
              input_shape=(224,224,1), 
              classes=2,
              activation="softmax",
              dropout=None, 
              pooling="avg", 
              weights=None,
              pretrained="imagenet"): 
    base = base_model(input_shape=input_shape,
                      include_top=False,
                      weights=pretrained, 
                      channels="gray") 
    if pooling == "avg": 
        x = GlobalAveragePooling2D()(base.output) 
    elif pooling == "max": 
        x = GlobalMaxPooling2D()(base.output) 
    elif pooling is None: 
        x = Flatten()(base.output) 
    if dropout is not None: 
        x = Dropout(dropout)(x) 
    x = Dense(classes, activation=activation)(x) 
    model = Model(inputs=base.input, outputs=x) 
    if weights is not None: 
        model.load_weights(weights) 
    for l in model.layers[:layer]:
        l.trainable = False 
    model.compile(loss="binary_crossentropy", metrics=["accuracy"], 
                  optimizer=optimizers.Adam(lr)) 
    return model

##########
## DATA ##
##########

# == PREPROCESSING == # 
開發者ID:i-pan,項目名稱:kaggle-rsna18,代碼行數:39,代碼來源:TrainClassifierEnsemble.py

示例10: get_model

# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Model [as 別名]
def get_model(base_model, 
              layer, 
              lr=1e-3, 
              input_shape=(224,224,1), 
              classes=2,
              activation="softmax",
              dropout=None, 
              pooling="avg", 
              weights=None,
              pretrained=None): 
    base = base_model(input_shape=input_shape,
                      include_top=False,
                      weights=pretrained, 
                      channels="gray") 
    if pooling == "avg": 
        x = GlobalAveragePooling2D()(base.output) 
    elif pooling == "max": 
        x = GlobalMaxPooling2D()(base.output) 
    elif pooling is None: 
        x = Flatten()(base.output) 
    if dropout is not None: 
        x = Dropout(dropout)(x) 
    x = Dense(classes, activation=activation)(x) 
    model = Model(inputs=base.input, outputs=x) 
    if weights is not None: 
        model.load_weights(weights) 
    for l in model.layers[:layer]:
        l.trainable = False 
    model.compile(loss="binary_crossentropy", metrics=["accuracy"], 
                  optimizer=optimizers.Adam(lr)) 
    return model

##########
## DATA ##
##########

# == PREPROCESSING == # 
開發者ID:i-pan,項目名稱:kaggle-rsna18,代碼行數:39,代碼來源:PredictOneClassifier.py

示例11: build_generator

# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Model [as 別名]
def build_generator(config: BEGANConfig):
    decoder = build_decoder(config, name="generator_decoder")
    generator = Model(decoder.inputs, decoder.outputs, name="generator")
    return generator 
開發者ID:mokemokechicken,項目名稱:keras_BEGAN,代碼行數:6,代碼來源:models.py

示例12: nn_architecture_seg_3d

# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Model [as 別名]
def nn_architecture_seg_3d(input_shape, pool_size=(2, 2, 2), n_labels=1, initial_learning_rate=0.00001,
                        depth=3, n_base_filters=16, metrics=dice_coefficient, batch_normalization=True):
    inputs = Input(input_shape)
    current_layer = inputs
    levels = list()

    for layer_depth in range(depth):
        layer1 = create_convolution_block(input_layer=current_layer, n_filters=n_base_filters * (2**layer_depth),
                                          batch_normalization=batch_normalization)
        layer2 = create_convolution_block(input_layer=layer1, n_filters=n_base_filters * (2**layer_depth) * 2,
                                          batch_normalization=batch_normalization)
        if layer_depth < depth - 1:
            current_layer = MaxPooling3D(pool_size=pool_size)(layer2)
            levels.append([layer1, layer2, current_layer])
        else:
            current_layer = layer2
            levels.append([layer1, layer2])

    for layer_depth in range(depth - 2, -1, -1):
        up_convolution = UpSampling3D(size=pool_size)
        concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)
        current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],
                                                 input_layer=concat, batch_normalization=batch_normalization)
        current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],
                                                 input_layer=current_layer,
                                                 batch_normalization=batch_normalization)

    final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)
    act = Activation('sigmoid')(final_convolution)
    model = Model(inputs=inputs, outputs=act)

    if not isinstance(metrics, list):
        metrics = [metrics]

    model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coefficient_loss, metrics=metrics)
    return model 
開發者ID:neuropoly,項目名稱:spinalcordtoolbox,代碼行數:38,代碼來源:cnn_models_3d.py

示例13: ab_ag_seq_model

# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Model [as 別名]
def ab_ag_seq_model(max_ag_len, max_cdr_len):
    input_ag = Input(shape=(max_ag_len, NUM_FEATURES))
    ag_seq = Masking()(input_ag)

    enc_ag = Bidirectional(LSTM(128, dropout=0.1, recurrent_dropout=0.1),
                           merge_mode='concat')(ag_seq)

    input_ab = Input(shape=(max_cdr_len, NUM_FEATURES))
    label_mask = Input(shape=(max_cdr_len,))

    seq = Masking()(input_ab)

    loc_fts = MaskedConvolution1D(64, 5, padding='same', activation='elu')(seq)

    glb_fts = Bidirectional(LSTM(256, dropout=0.15, recurrent_dropout=0.2,
                                 return_sequences=True),
                            merge_mode='concat')(loc_fts)

    enc_ag_rep = RepeatVector(max_cdr_len)(enc_ag)
    ab_ag_repr = concatenate([glb_fts, enc_ag_rep])
    ab_ag_repr = MaskingByLambda(mask_by_input(label_mask))(ab_ag_repr)
    ab_ag_repr = Dropout(0.3)(ab_ag_repr)

    aa_probs = TimeDistributed(Dense(1, activation='sigmoid'))(ab_ag_repr)
    model = Model(inputs=[input_ag, input_ab, label_mask], outputs=aa_probs)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['binary_accuracy', false_pos, false_neg],
                  sample_weight_mode="temporal")
    return model 
開發者ID:eliberis,項目名稱:parapred,代碼行數:32,代碼來源:model.py

示例14: ab_seq_model

# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Model [as 別名]
def ab_seq_model(max_cdr_len):
    input_ab, label_mask, _, probs = base_ab_seq_model(max_cdr_len)
    model = Model(inputs=[input_ab, label_mask], outputs=probs)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['binary_accuracy', false_pos, false_neg],
                  sample_weight_mode="temporal")
    return model 
開發者ID:eliberis,項目名稱:parapred,代碼行數:10,代碼來源:model.py

示例15: conv_output_ab_seq_model

# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Model [as 別名]
def conv_output_ab_seq_model(max_cdr_len):
    input_ab, label_mask, loc_fts, probs = base_ab_seq_model(max_cdr_len)
    model = Model(inputs=[input_ab, label_mask], outputs=[probs, loc_fts])
    return model 
開發者ID:eliberis,項目名稱:parapred,代碼行數:6,代碼來源:model.py


注:本文中的keras.engine.Model方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。