当前位置: 首页>>代码示例>>Python>>正文


Python Model.summary方法代码示例

本文整理汇总了Python中keras.engine.training.Model.summary方法的典型用法代码示例。如果您正苦于以下问题:Python Model.summary方法的具体用法?Python Model.summary怎么用?Python Model.summary使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.engine.training.Model的用法示例。


在下文中一共展示了Model.summary方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_trainable_weights_count_consistency

# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import summary [as 别名]
def test_trainable_weights_count_consistency():
    """Tests the trainable weights consistency check of Model.

    This verifies that a warning is shown if model.trainable is modified
    and the model is summarized/run without a new call to .compile()

    Reproduce issue #8121
    """
    a = Input(shape=(3,), name='input_a')
    model1 = Model(inputs=a, outputs=Dense(1)(a))

    model1.trainable = False
    b = Input(shape=(3,), name='input_b')
    y = model1(b)
    model2 = Model(inputs=b, outputs=Dense(1)(y))

    model2.compile(optimizer='adam', loss='mse')

    model1.trainable = True

    # Should warn on .summary()
    with pytest.warns(UserWarning) as w:
        model2.summary()
    warning_raised = any(['Discrepancy' in str(w_.message) for w_ in w])
    assert warning_raised, 'No warning raised when trainable is modified without .compile.'

    # And on .fit()
    with pytest.warns(UserWarning) as w:
        model2.fit(x=np.zeros((5, 3)), y=np.zeros((5, 1)))
    warning_raised = any(['Discrepancy' in str(w_.message) for w_ in w])
    assert warning_raised, 'No warning raised when trainable is modified without .compile.'

    # And shouldn't warn if we recompile
    model2.compile(optimizer='adam', loss='mse')
    with pytest.warns(None) as w:
        model2.summary()
    assert len(w) == 0, "Warning raised even when .compile() is called after modifying .trainable"
开发者ID:pkainz,项目名称:keras,代码行数:39,代码来源:test_training.py

示例2: test_model_with_input_feed_tensor

# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import summary [as 别名]
def test_model_with_input_feed_tensor():
    """We test building a model with a TF variable as input.
    We should be able to call fit, evaluate, predict,
    by only passing them data for the placeholder inputs
    in the model.
    """
    import tensorflow as tf

    input_a_np = np.random.random((10, 3))
    input_b_np = np.random.random((10, 3))

    output_a_np = np.random.random((10, 4))
    output_b_np = np.random.random((10, 3))

    a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
    b = Input(shape=(3,), name='input_b')

    a_2 = Dense(4, name='dense_1')(a)
    dp = Dropout(0.5, name='dropout')
    b_2 = dp(b)

    model = Model([a, b], [a_2, b_2])
    model.summary()

    optimizer = 'rmsprop'
    loss = 'mse'
    loss_weights = [1., 0.5]
    model.compile(optimizer, loss, metrics=['mean_squared_error'],
                  loss_weights=loss_weights,
                  sample_weight_mode=None)

    # test train_on_batch
    out = model.train_on_batch(input_b_np,
                               [output_a_np, output_b_np])
    out = model.train_on_batch({'input_b': input_b_np},
                               [output_a_np, output_b_np])
    out = model.test_on_batch({'input_b': input_b_np},
                              [output_a_np, output_b_np])
    out = model.predict_on_batch({'input_b': input_b_np})

    # test fit
    out = model.fit({'input_b': input_b_np},
                    [output_a_np, output_b_np], epochs=1, batch_size=10)
    out = model.fit(input_b_np,
                    [output_a_np, output_b_np], epochs=1, batch_size=10)

    # test evaluate
    out = model.evaluate({'input_b': input_b_np},
                         [output_a_np, output_b_np], batch_size=10)
    out = model.evaluate(input_b_np,
                         [output_a_np, output_b_np], batch_size=10)

    # test predict
    out = model.predict({'input_b': input_b_np}, batch_size=10)
    out = model.predict(input_b_np, batch_size=10)
    assert len(out) == 2

    # Now test a model with a single input
    # i.e. we don't pass any data to fit the model.
    a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
    a_2 = Dense(4, name='dense_1')(a)
    a_2 = Dropout(0.5, name='dropout')(a_2)
    model = Model(a, a_2)
    model.summary()

    optimizer = 'rmsprop'
    loss = 'mse'
    model.compile(optimizer, loss, metrics=['mean_squared_error'])

    # test train_on_batch
    out = model.train_on_batch(None,
                               output_a_np)
    out = model.train_on_batch(None,
                               output_a_np)
    out = model.test_on_batch(None,
                              output_a_np)
    out = model.predict_on_batch(None)
    out = model.train_on_batch([],
                               output_a_np)
    out = model.train_on_batch({},
                               output_a_np)

    # test fit
    out = model.fit(None,
                    output_a_np, epochs=1, batch_size=10)
    out = model.fit(None,
                    output_a_np, epochs=1, batch_size=10)

    # test evaluate
    out = model.evaluate(None,
                         output_a_np, batch_size=10)
    out = model.evaluate(None,
                         output_a_np, batch_size=10)

    # test predict
    out = model.predict(None, steps=3)
    out = model.predict(None, steps=3)
    assert out.shape == (10 * 3, 4)

    # Same, without learning phase
#.........这里部分代码省略.........
开发者ID:pkainz,项目名称:keras,代码行数:103,代码来源:test_training.py

示例3: build_CNN_model

# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import summary [as 别名]

#.........这里部分代码省略.........
    #                       kernel_regularizer=conv_reg3,
    #                       dilation_rate=1,
    #                       name='ConvLayer3')(layer)
    #
    # layer = SpatialDropout1D(0.50)(layer)
    #
    # layer = MaxPooling1D(pool_size=pool_len3)(layer)



    # #layer = GlobalMaxPool1D()(layer)
    #
    # layer = Convolution1D(filters=num_filters4,
    #                       kernel_size=filter_length4,
    #                       padding=region,
    #                       activation=conv_activation4,
    #                       kernel_initializer=conv_init4,
    #                       kernel_regularizer=conv_reg4,
    #                       dilation_rate=1,
    #                       name='ConvLayer4')(layer)
    #
    # #layer = leaky_relu(layer)
    #
    # layer = SpatialDropout1D(0.50)(layer)
    #
    # layer = MaxPooling1D(pool_size=pool_len4)(layer)
    # #layer = GlobalMaxPool1D()(layer)
    #
    # # layer = BatchNormalization()(layer)

    layer = Flatten()(layer)

    layer = Dense(dense_dims0, activation=dense_activation0, kernel_regularizer=dense_reg0,
                  kernel_initializer='glorot_normal', bias_initializer='zeros',
                  name='dense0')(layer)

    layer = Dropout(0.50)(layer)

    layer = Dense(dense_dims1, activation=dense_activation1, kernel_regularizer=dense_reg1,
                  kernel_initializer='glorot_normal', bias_initializer='zeros',
                  name='dense1')(layer)

    layer = Dropout(0.50)(layer)

    # layer = Dense(dense_dims2, activation=dense_activation2, kernel_regularizer=dense_reg2,
    #               kernel_initializer=dense_init2,
    #               name='dense2')(layer)
    #
    #
    # layer = Dropout(0.50)(layer)
    #
    # layer = Dense(dense_dims3, activation=dense_activation3, kernel_regularizer=dense_reg3,
    #               kernel_initializer=dense_init3,
    #               name='dense3_outA')(layer)
    # #layer = leaky_relu(layer)
    #
    if is_IntermediateModel:
        return Model(inputs=[review_input], outputs=[layer], name="CNN_model")

    #
    # layer = Dropout(0.5)(layer)

    layer = Dense(dense_dims_final, activation=dense_activation_final, kernel_initializer=dense_init_final,
                  kernel_regularizer=dense_reg0,
                  name='output_Full')(layer)

    CNN_model = Model(inputs=[review_input], outputs=[layer], name="CNN_model")

    CNN_model.compile(optimizer=Adam(lr=0.001, decay=0.0), loss=loss_func, metrics=[binary_accuracy])

    if load_weight_path is not None:
        CNN_model.load_weights(load_weight_path)

    hist = ""
    if do_training:
        weightPath = os.path.join(modelParameters.WEIGHT_PATH, filename)
        configPath = os.path.join(modelParameters.WEIGHT_PATH, filename_config)

        with open(configPath + ".json", 'wb') as f:
            f.write(CNN_model.to_json())

        checkpoint = ModelCheckpoint(weightPath + '_W.{epoch:02d}-{val_loss:.4f}.hdf5',
                                     verbose=1, save_best_only=True, save_weights_only=False, monitor='val_loss')

        earlyStop = EarlyStopping(patience=3, verbose=1, monitor='val_loss')

        LRadjuster = ReduceLROnPlateau(monitor='val_loss', factor=0.30, patience=0, verbose=1, cooldown=1,
                                       min_lr=0.00001, epsilon=1e-2)

        call_backs = [checkpoint, earlyStop, LRadjuster]

        CNN_model.summary()

        hist = CNN_model.fit(*model_inputs['training'],
                             batch_size=batch_size,
                             epochs=nb_epoch, verbose=1,
                             validation_data=model_inputs['dev'],
                             callbacks=call_backs)

    return {"model": CNN_model, "hist": hist}
开发者ID:jcavalieri8619,项目名称:siamese_sentiment,代码行数:104,代码来源:CNN_model.py


注:本文中的keras.engine.training.Model.summary方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。