当前位置: 首页>>代码示例>>Python>>正文


Python Model.compile方法代码示例

本文整理汇总了Python中keras.engine.training.Model.compile方法的典型用法代码示例。如果您正苦于以下问题:Python Model.compile方法的具体用法?Python Model.compile怎么用?Python Model.compile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.engine.training.Model的用法示例。


在下文中一共展示了Model.compile方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_warnings

# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import compile [as 别名]
def test_warnings():
    a = Input(shape=(3,), name='input_a')
    b = Input(shape=(3,), name='input_b')

    a_2 = Dense(4, name='dense_1')(a)
    dp = Dropout(0.5, name='dropout')
    b_2 = dp(b)

    model = Model([a, b], [a_2, b_2])

    optimizer = 'rmsprop'
    loss = 'mse'
    loss_weights = [1., 0.5]
    model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
                  sample_weight_mode=None)

    def gen_data(batch_sz):
        while True:
            yield ([np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3))],
                   [np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3))])

    with pytest.warns(Warning) as w:
        out = model.fit_generator(gen_data(4), steps_per_epoch=10, use_multiprocessing=True, workers=2)
    warning_raised = any(['Sequence' in str(w_.message) for w_ in w])
    assert warning_raised, 'No warning raised when using generator with processes.'

    with pytest.warns(None) as w:
        out = model.fit_generator(RandomSequence(3), steps_per_epoch=4, use_multiprocessing=True, workers=2)
    assert all(['Sequence' not in str(w_.message) for w_ in w]), 'A warning was raised for Sequence.'
开发者ID:pkainz,项目名称:keras,代码行数:31,代码来源:test_training.py

示例2: test_model_multiple_calls

# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import compile [as 别名]
def test_model_multiple_calls():
    x1 = Input(shape=(20,))

    y1 = sequential([
        Dense(10),
        Dense(1),
    ])(x1)
    m1 = Model(x1, y1)

    x2 = Input(shape=(25,))
    y2 = sequential([
        Dense(20),
        m1
    ])(x2)
    m2 = Model(x2, y2)
    m2.compile('adam', 'mse')

    x3 = Input(shape=(20,))
    y3 = sequential([
        Dense(25),
        m2
    ])(x3)
    m3 = Model(x3, y3)
    m3.compile('adam', 'mse')
    m3.train_on_batch(np.zeros((32, 20)), np.zeros((32, 1)))
开发者ID:berleon,项目名称:deepdecoder,代码行数:27,代码来源:test_render_gan.py

示例3: test_model_custom_target_tensors

# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import compile [as 别名]
def test_model_custom_target_tensors():
    a = Input(shape=(3,), name='input_a')
    b = Input(shape=(3,), name='input_b')

    a_2 = Dense(4, name='dense_1')(a)
    dp = Dropout(0.5, name='dropout')
    b_2 = dp(b)

    y = K.placeholder([10, 4], name='y')
    y1 = K.placeholder([10, 3], name='y1')
    y2 = K.placeholder([7, 5], name='y2')
    model = Model([a, b], [a_2, b_2])

    optimizer = 'rmsprop'
    loss = 'mse'
    loss_weights = [1., 0.5]

    # test list of target tensors
    with pytest.raises(ValueError):
        model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
                      sample_weight_mode=None, target_tensors=[y, y1, y2])
    model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
                  sample_weight_mode=None, target_tensors=[y, y1])
    input_a_np = np.random.random((10, 3))
    input_b_np = np.random.random((10, 3))

    output_a_np = np.random.random((10, 4))
    output_b_np = np.random.random((10, 3))

    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np],
                               {y: np.random.random((10, 4)),
                                y1: np.random.random((10, 3))})
    # test dictionary of target_tensors
    with pytest.raises(ValueError):
        model.compile(optimizer, loss,
                      metrics=[],
                      loss_weights=loss_weights,
                      sample_weight_mode=None,
                      target_tensors={'does_not_exist': y2})
    # test dictionary of target_tensors
    model.compile(optimizer, loss,
                  metrics=[],
                  loss_weights=loss_weights,
                  sample_weight_mode=None,
                  target_tensors={'dense_1': y, 'dropout': y1})
    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np],
                               {y: np.random.random((10, 4)),
                                y1: np.random.random((10, 3))})

    if K.backend() == 'tensorflow':
        import tensorflow as tf
        # test with custom TF placeholder as target
        pl_target_a = tf.placeholder('float32', shape=(None, 4))
        model.compile(optimizer='rmsprop', loss='mse',
                      target_tensors={'dense_1': pl_target_a})
        model.train_on_batch([input_a_np, input_b_np],
                             [output_a_np, output_b_np])
开发者ID:pkainz,项目名称:keras,代码行数:61,代码来源:test_training.py

示例4: test_sparse_input_validation_split

# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import compile [as 别名]
def test_sparse_input_validation_split():
    test_input = sparse.random(6, 3, density=0.25).tocsr()
    in1 = Input(shape=(3,), sparse=True)
    out1 = Dense(4)(in1)
    test_output = np.random.random((6, 4))
    model = Model(in1, out1)
    model.compile('rmsprop', 'mse')
    model.fit(test_input, test_output, epochs=1, batch_size=2, validation_split=0.2)
开发者ID:pkainz,项目名称:keras,代码行数:10,代码来源:test_training.py

示例5: m

# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import compile [as 别名]
def m():
    x = Input(shape=(input_size + output_size, nb_chars))
    m_realness = sequential([
        LSTM(14),
        Dense(1, activation='sigmoid'),
    ])(x)
    m = Model([x], [m_realness])
    m.compile(Adam(), 'mse')
    return m
开发者ID:berleon,项目名称:seqgan,代码行数:11,代码来源:test_seqgan.py

示例6: decoder_dummy

# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import compile [as 别名]
def decoder_dummy(label_sizes, nb_filter=16, data_shape=(1, 64, 64), nb_bits=12,
                  optimizer='adam'):

    input = Input(shape=data_shape)
    x = input
    outputs, losses = decoder_end_block(x, label_sizes, nb_bits,
                                        activation=lambda: ELU())

    model = Model(input, list(outputs.values()))
    model.compile(optimizer, loss=list(losses.values()),
                  loss_weights={k: decoder_loss_weights(k) for k in losses.keys()})
    return model
开发者ID:berleon,项目名称:deepdecoder,代码行数:14,代码来源:networks.py

示例7: test_sparse_placeholder_fit

# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import compile [as 别名]
def test_sparse_placeholder_fit():
    test_inputs = [sparse.random(6, 3, density=0.25).tocsr() for _ in range(2)]
    test_outputs = [sparse.random(6, i, density=0.25).tocsr() for i in range(3, 5)]
    in1 = Input(shape=(3,))
    in2 = Input(shape=(3,), sparse=True)
    out1 = Dropout(0.5, name='dropout')(in1)
    out2 = Dense(4, name='dense_1')(in2)
    model = Model([in1, in2], [out1, out2])
    model.predict(test_inputs, batch_size=2)
    model.compile('rmsprop', 'mse')
    model.fit(test_inputs, test_outputs, epochs=1, batch_size=2, validation_split=0.5)
    model.evaluate(test_inputs, test_outputs, batch_size=2)
开发者ID:Dapid,项目名称:keras,代码行数:14,代码来源:test_training.py

示例8: decoder_baseline

# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import compile [as 别名]
def decoder_baseline(label_sizes, nb_bits=12, data_shape=(1, 64, 64),
                     depth=1, nb_filter=16, optimizer='adam'):
    n = nb_filter
    input = Input(shape=data_shape)
    x = sequential([
        conv2d_block(n, depth=depth, pooling='max'),    # 32x32
        conv2d_block(2*n, depth=depth, pooling='max'),  # 16x16
        conv2d_block(4*n, depth=depth, pooling='max'),  # 8x8
        conv2d_block(8*n, depth=depth, pooling='max'),  # 4x4
    ])(input)
    outputs, losses = decoder_end_block(x, label_sizes, nb_bits,
                                        activation=lambda: ELU())
    model = Model(input, list(outputs.values()))
    model.compile(optimizer, loss=list(losses.values()),)
    return model
开发者ID:berleon,项目名称:deepdecoder,代码行数:17,代码来源:networks.py

示例9: test_render_gan_builder_generator_extended

# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import compile [as 别名]
def test_render_gan_builder_generator_extended():
    labels_shape = (27,)
    z_dim_offset = 50
    builder = RenderGAN(lambda x: tag3d_network_dense(x, nb_units=4),
                        generator_units=4, discriminator_units=4,
                        z_dim_offset=z_dim_offset,
                        labels_shape=(27,))
    bs = 19
    z, z_offset, labels = data(builder, bs)
    real = np.zeros((bs,) + builder.data_shape)

    labels_input = Input(shape=labels_shape)
    z = Input(shape=(z_dim_offset,))
    fake = builder.generator_given_z_and_labels([z, labels_input])
    m = Model([z, labels_input], [fake])
    m.compile('adam', 'mse')
    m.train_on_batch([z_offset, labels], real)
开发者ID:berleon,项目名称:deepdecoder,代码行数:19,代码来源:test_render_gan.py

示例10: test_model_with_partial_loss

# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import compile [as 别名]
def test_model_with_partial_loss():
    a = Input(shape=(3,), name='input_a')
    a_2 = Dense(4, name='dense_1')(a)
    dp = Dropout(0.5, name='dropout')
    a_3 = dp(a_2)
    model = Model(a, [a_2, a_3])

    optimizer = 'rmsprop'
    loss = {'dropout': 'mse'}
    model.compile(optimizer, loss, metrics=['mae'])

    input_a_np = np.random.random((10, 3))
    output_a_np = np.random.random((10, 4))

    # test train_on_batch
    out = model.train_on_batch(input_a_np, output_a_np)
    out = model.test_on_batch(input_a_np, output_a_np)
    # fit
    out = model.fit(input_a_np, [output_a_np])
    # evaluate
    out = model.evaluate(input_a_np, [output_a_np])

    # Same without dropout.
    a = Input(shape=(3,), name='input_a')
    a_2 = Dense(4, name='dense_1')(a)
    a_3 = Dense(4, name='dense_2')(a_2)
    model = Model(a, [a_2, a_3])

    optimizer = 'rmsprop'
    loss = {'dense_2': 'mse'}
    model.compile(optimizer, loss, metrics={'dense_1': 'mae'})

    # test train_on_batch
    out = model.train_on_batch(input_a_np, output_a_np)
    out = model.test_on_batch(input_a_np, output_a_np)
    # fit
    out = model.fit(input_a_np, [output_a_np])
    # evaluate
    out = model.evaluate(input_a_np, [output_a_np])
开发者ID:pkainz,项目名称:keras,代码行数:41,代码来源:test_training.py

示例11: simple_gan

# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import compile [as 别名]
def simple_gan():
    z = Input(batch_shape=simple_gan_z_shape, name='z')
    generator = sequential([
        Dense(4*simple_gan_nb_z, activation='relu', name='g1'),
        Dense(4*simple_gan_nb_z, activation='relu', name='g2'),
        Dense(simple_gan_nb_out, name='g_loss'),
    ])(z)

    d_input = Input(batch_shape=simple_gan_real_shape, name='data')

    discriminator = sequential([
        Dense(400, input_dim=2, name='d1'),
        LeakyReLU(0.3),
        Dense(400, name='d2'),
        LeakyReLU(0.3),
        Dense(1, activation='sigmoid', name='d_loss')
    ])(d_input)
    g = Model(z, generator)
    g.compile(Adam(lr=0.0002, beta_1=0.5), {'g_loss': 'binary_crossentropy'})
    d = Model(d_input, discriminator)
    d.compile(Adam(lr=0.0002, beta_1=0.5), {'d_loss': 'binary_crossentropy'})
    return GAN(g, d)
开发者ID:BioroboticsLab,项目名称:diktya,代码行数:24,代码来源:test_gan.py

示例12: decoder_resnet

# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import compile [as 别名]
def decoder_resnet(label_sizes, nb_filter=16, data_shape=(1, 64, 64), nb_bits=12,
                   resnet_depth=(3, 4, 6, 3),
                   optimizer='adam'):
    def _bn_relu_conv(nb_filter, nb_row=3, nb_col=3, subsample=1):
        return sequential([
            BatchNormalization(mode=0, axis=1),
            ELU(),
            Convolution2D(nb_filter=nb_filter, nb_row=nb_row, nb_col=nb_col,
                          subsample=(subsample, subsample), init="he_normal", border_mode="same")
        ])

    def f(nb_filter, subsample=1):
        return sequential([
            _bn_relu_conv(nb_filter, subsample=subsample),
            _bn_relu_conv(nb_filter),
        ])

    input = Input(shape=data_shape)
    fitlers_by_depth = [nb_filter * 2**i for i in range(len(resnet_depth))]
    print("fitlers_by_depth", fitlers_by_depth)
    x = _bn_relu_conv(nb_filter, 3, 3, subsample=2)(input)
    for i, (n, d) in enumerate(zip(fitlers_by_depth, resnet_depth)):
        for di in range(d):
            if di == 0 and i != 0:
                shortcut = _bn_relu_conv(n, 1, 1, subsample=2)
                subsample = 2
            else:
                shortcut = lambda x: x
                subsample = 1
            x = merge([shortcut(x), f(n, subsample)(x)], mode='sum')

    outputs, losses = decoder_end_block(x, label_sizes, nb_bits,
                                        activation=lambda: ELU())

    model = Model(input, list(outputs.values()))
    model.compile(optimizer, loss=list(losses.values()),
                  loss_weights={k: decoder_loss_weights(k) for k in losses.keys()})
    return model
开发者ID:berleon,项目名称:deepdecoder,代码行数:40,代码来源:networks.py

示例13: train_f_enc

# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import compile [as 别名]
    def train_f_enc(self, steps_list, epoch=50):
        print("training f_enc")
        f_add0 = Sequential(name='f_add0')
        f_add0.add(self.f_enc)
        f_add0.add(Dense(FIELD_DEPTH))
        f_add0.add(Activation('softmax', name='softmax_add0'))

        f_add1 = Sequential(name='f_add1')
        f_add1.add(self.f_enc)
        f_add1.add(Dense(FIELD_DEPTH))
        f_add1.add(Activation('softmax', name='softmax_add1'))

        env_model = Model(self.f_enc.inputs, [f_add0.output, f_add1.output], name="env_model")
        env_model.compile(optimizer='adam', loss=['categorical_crossentropy']*2)

        for ep in range(epoch):
            losses = []
            for idx, steps_dict in enumerate(steps_list):
                prev = None
                for step in steps_dict['steps']:
                    x = self.convert_input(step.input)[:2]
                    env_values = step.input.env.reshape((4, -1))
                    in1 = np.clip(env_values[0].argmax() - 1, 0, 9)
                    in2 = np.clip(env_values[1].argmax() - 1, 0, 9)
                    carry = np.clip(env_values[2].argmax() - 1, 0, 9)
                    y_num = in1 + in2 + carry
                    now = (in1, in2, carry)
                    if prev == now:
                        continue
                    prev = now
                    y0 = to_one_hot_array((y_num %  10)+1, FIELD_DEPTH)
                    y1 = to_one_hot_array((y_num // 10)+1, FIELD_DEPTH)
                    y = [yy.reshape((self.batch_size, -1)) for yy in [y0, y1]]
                    loss = env_model.train_on_batch(x, y)
                    losses.append(loss)
            print("ep %3d: loss=%s" % (ep, np.average(losses)))
            if np.average(losses) < 1e-06:
                break
开发者ID:episodeyang,项目名称:deep_learning_notes,代码行数:40,代码来源:model.py

示例14: test_trainable_weights_count_consistency

# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import compile [as 别名]
def test_trainable_weights_count_consistency():
    """Tests the trainable weights consistency check of Model.

    This verifies that a warning is shown if model.trainable is modified
    and the model is summarized/run without a new call to .compile()

    Reproduce issue #8121
    """
    a = Input(shape=(3,), name='input_a')
    model1 = Model(inputs=a, outputs=Dense(1)(a))

    model1.trainable = False
    b = Input(shape=(3,), name='input_b')
    y = model1(b)
    model2 = Model(inputs=b, outputs=Dense(1)(y))

    model2.compile(optimizer='adam', loss='mse')

    model1.trainable = True

    # Should warn on .summary()
    with pytest.warns(UserWarning) as w:
        model2.summary()
    warning_raised = any(['Discrepancy' in str(w_.message) for w_ in w])
    assert warning_raised, 'No warning raised when trainable is modified without .compile.'

    # And on .fit()
    with pytest.warns(UserWarning) as w:
        model2.fit(x=np.zeros((5, 3)), y=np.zeros((5, 1)))
    warning_raised = any(['Discrepancy' in str(w_.message) for w_ in w])
    assert warning_raised, 'No warning raised when trainable is modified without .compile.'

    # And shouldn't warn if we recompile
    model2.compile(optimizer='adam', loss='mse')
    with pytest.warns(None) as w:
        model2.summary()
    assert len(w) == 0, "Warning raised even when .compile() is called after modifying .trainable"
开发者ID:pkainz,项目名称:keras,代码行数:39,代码来源:test_training.py

示例15: AIPlayer

# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import compile [as 别名]
class AIPlayer(Player):
    
    def __init__(self, buffer_size, sim_count, train=True, model="", tau = 1, compile=False):
        self.buffer = ReplayBuffer(buffer_size)
        self.temp_state = deque()
        self.train = train
        self.loss = 0
        self.acc = 0
        self.batch_count = 0
        self.sim_count = sim_count
        if model != "":
            self.load(model, compile)
        else:
            self.create_network()
        self.tau = tau

    @staticmethod
    def create_if_nonexistant(config):
        models = glob.glob(config.data.model_location + "*.h5")
        if len(models) == 0:
            ai = AIPlayer(config.buffer_size, config.game.simulation_num_per_move)
            ai.save(config.data.model_location+"model_0.h5")
            del ai

    def set_training(self, train):
        self.train = train
    
    @staticmethod
    def clear():
        K.clear_session()
    
    def load(self, file, compile=False):
        try:
            del self.network
        except Exception:
            pass
        self.network = load_model(file, custom_objects={"objective_function_for_policy":AIPlayer.objective_function_for_policy,
                                                        "objective_function_for_value":AIPlayer.objective_function_for_value}, compile=compile)
        
    def save(self, file):
        self.network.save(file)
    
    def create_network(self):
        x_in = Input((3, 8, 8))
        x = Conv2D(filters=128, kernel_size=(3,3), padding="same", data_format="channels_first")(x_in)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)
        for _ in range(10):
            x = self._build_residual_block(x)

        res_out = x
        
        x = Conv2D(filters=2, kernel_size=1, data_format="channels_first")(res_out)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)
        x = Flatten()(x)
        policy_out = Dense(8*8+1, activation="softmax", name="policy_out")(x)

        x = Conv2D(filters=1, kernel_size=1, data_format="channels_first")(res_out)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)
        x = Flatten()(x)
        x = Dense(64, activation="relu")(x)
        value_out =  Dense(1, activation="tanh", name="value_out")(x)
        
        self.network = Model(x_in, [policy_out, value_out], name="reversi_model")
        self.compile()
      
    def _build_residual_block(self, x):
        in_x = x
        x = Conv2D(filters=128, kernel_size=(3,3), padding="same", data_format="channels_first")(x)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)
        x = Conv2D(filters=128, kernel_size=(3,3), padding="same", data_format="channels_first")(x)
        x = BatchNormalization(axis=1)(x)
        x = Add()([in_x, x])
        x = Activation("relu")(x)
        return x
        
    def compile(self):
        losses = [AIPlayer.objective_function_for_policy, AIPlayer.objective_function_for_value]
        self.network.compile(optimizer=optimizers.SGD(lr=1e-3, momentum=0.9), loss=losses)
      
    def update_lr(self, lr):
         K.set_value(self.network.optimizer.lr, lr)
        
    @staticmethod
    def objective_function_for_policy(y_true, y_pred):
        # can use categorical_crossentropy??
        return K.sum(-y_true * K.log(y_pred + K.epsilon()), axis=-1)

    @staticmethod
    def objective_function_for_value(y_true, y_pred):
        return mean_squared_error(y_true, y_pred)
        
    def update_buffer(self, winner):
        if self.train:
            while len(self.temp_state) > 0:
                t = self.temp_state.pop()
                self.buffer.add((t[0], t[1], winner))
#.........这里部分代码省略.........
开发者ID:1715509415,项目名称:alpha_zero_othello,代码行数:103,代码来源:aiplayer.py


注:本文中的keras.engine.training.Model.compile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。