本文整理汇总了Python中keras.engine.training.Model.fit方法的典型用法代码示例。如果您正苦于以下问题:Python Model.fit方法的具体用法?Python Model.fit怎么用?Python Model.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.engine.training.Model
的用法示例。
在下文中一共展示了Model.fit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_sparse_input_validation_split
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import fit [as 别名]
def test_sparse_input_validation_split():
test_input = sparse.random(6, 3, density=0.25).tocsr()
in1 = Input(shape=(3,), sparse=True)
out1 = Dense(4)(in1)
test_output = np.random.random((6, 4))
model = Model(in1, out1)
model.compile('rmsprop', 'mse')
model.fit(test_input, test_output, epochs=1, batch_size=2, validation_split=0.2)
示例2: test_sparse_placeholder_fit
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import fit [as 别名]
def test_sparse_placeholder_fit():
test_inputs = [sparse.random(6, 3, density=0.25).tocsr() for _ in range(2)]
test_outputs = [sparse.random(6, i, density=0.25).tocsr() for i in range(3, 5)]
in1 = Input(shape=(3,))
in2 = Input(shape=(3,), sparse=True)
out1 = Dropout(0.5, name='dropout')(in1)
out2 = Dense(4, name='dense_1')(in2)
model = Model([in1, in2], [out1, out2])
model.predict(test_inputs, batch_size=2)
model.compile('rmsprop', 'mse')
model.fit(test_inputs, test_outputs, epochs=1, batch_size=2, validation_split=0.5)
model.evaluate(test_inputs, test_outputs, batch_size=2)
示例3: test_model_with_partial_loss
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import fit [as 别名]
def test_model_with_partial_loss():
a = Input(shape=(3,), name='input_a')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
a_3 = dp(a_2)
model = Model(a, [a_2, a_3])
optimizer = 'rmsprop'
loss = {'dropout': 'mse'}
model.compile(optimizer, loss, metrics=['mae'])
input_a_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
# test train_on_batch
out = model.train_on_batch(input_a_np, output_a_np)
out = model.test_on_batch(input_a_np, output_a_np)
# fit
out = model.fit(input_a_np, [output_a_np])
# evaluate
out = model.evaluate(input_a_np, [output_a_np])
# Same without dropout.
a = Input(shape=(3,), name='input_a')
a_2 = Dense(4, name='dense_1')(a)
a_3 = Dense(4, name='dense_2')(a_2)
model = Model(a, [a_2, a_3])
optimizer = 'rmsprop'
loss = {'dense_2': 'mse'}
model.compile(optimizer, loss, metrics={'dense_1': 'mae'})
# test train_on_batch
out = model.train_on_batch(input_a_np, output_a_np)
out = model.test_on_batch(input_a_np, output_a_np)
# fit
out = model.fit(input_a_np, [output_a_np])
# evaluate
out = model.evaluate(input_a_np, [output_a_np])
示例4: test_trainable_weights_count_consistency
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import fit [as 别名]
def test_trainable_weights_count_consistency():
"""Tests the trainable weights consistency check of Model.
This verifies that a warning is shown if model.trainable is modified
and the model is summarized/run without a new call to .compile()
Reproduce issue #8121
"""
a = Input(shape=(3,), name='input_a')
model1 = Model(inputs=a, outputs=Dense(1)(a))
model1.trainable = False
b = Input(shape=(3,), name='input_b')
y = model1(b)
model2 = Model(inputs=b, outputs=Dense(1)(y))
model2.compile(optimizer='adam', loss='mse')
model1.trainable = True
# Should warn on .summary()
with pytest.warns(UserWarning) as w:
model2.summary()
warning_raised = any(['Discrepancy' in str(w_.message) for w_ in w])
assert warning_raised, 'No warning raised when trainable is modified without .compile.'
# And on .fit()
with pytest.warns(UserWarning) as w:
model2.fit(x=np.zeros((5, 3)), y=np.zeros((5, 1)))
warning_raised = any(['Discrepancy' in str(w_.message) for w_ in w])
assert warning_raised, 'No warning raised when trainable is modified without .compile.'
# And shouldn't warn if we recompile
model2.compile(optimizer='adam', loss='mse')
with pytest.warns(None) as w:
model2.summary()
assert len(w) == 0, "Warning raised even when .compile() is called after modifying .trainable"
示例5: test_model_methods
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import fit [as 别名]
def test_model_methods():
a = Input(shape=(3,), name='input_a')
b = Input(shape=(3,), name='input_b')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
model = Model([a, b], [a_2, b_2])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
# test train_on_batch
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np})
# test fit
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np], nb_epoch=1, batch_size=4)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np], nb_epoch=1, batch_size=4)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np},
nb_epoch=1, batch_size=4)
# test validation_split
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np],
nb_epoch=1, batch_size=4, validation_split=0.5)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np],
nb_epoch=1, batch_size=4, validation_split=0.5)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np},
nb_epoch=1, batch_size=4, validation_split=0.5)
# test validation data
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np],
nb_epoch=1, batch_size=4,
validation_data=([input_a_np, input_b_np], [output_a_np, output_b_np]))
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np],
nb_epoch=1, batch_size=4, validation_split=0.5,
validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np]))
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np},
nb_epoch=1, batch_size=4, validation_split=0.5,
validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np}))
# test_on_batch
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np})
# predict_on_batch
out = model.predict_on_batch([input_a_np, input_b_np])
out = model.predict_on_batch({'input_a': input_a_np, 'input_b': input_b_np})
# predict, evaluate
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4)
out = model.predict([input_a_np, input_b_np], batch_size=4)
# with sample_weight
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
sample_weight = [None, np.random.random((10,))]
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
sample_weight=sample_weight)
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
#.........这里部分代码省略.........
示例6: test_model_methods
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import fit [as 别名]
def test_model_methods():
a = Input(shape=(3,), name='input_a')
b = Input(shape=(3,), name='input_b')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
model = Model([a, b], [a_2, b_2])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
input_a_df = pd.DataFrame(input_a_np)
input_b_df = pd.DataFrame(input_b_np)
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
output_a_df = pd.DataFrame(output_a_np)
output_b_df = pd.DataFrame(output_b_np)
# training/testing doesn't work before compiling.
with pytest.raises(RuntimeError):
model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np])
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None)
# test train_on_batch
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np})
out = model.train_on_batch([input_a_df, input_b_df],
[output_a_df, output_b_df])
# test fit
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np], epochs=1, batch_size=4)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np], epochs=1, batch_size=4)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np},
epochs=1, batch_size=4)
out = model.fit([input_a_df, input_b_df],
[output_a_df, output_b_df], epochs=1, batch_size=4)
# test validation_split
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np],
epochs=1, batch_size=4, validation_split=0.5)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np],
epochs=1, batch_size=4, validation_split=0.5)
# test validation data
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np],
epochs=1, batch_size=4,
validation_data=([input_a_np, input_b_np], [output_a_np, output_b_np]))
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np],
epochs=1, batch_size=4, validation_split=0.5,
validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np]))
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np},
epochs=1, batch_size=4, validation_split=0.5,
validation_data=(
{'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np}))
# test_on_batch
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np})
out = model.test_on_batch([input_a_df, input_b_df],
[output_a_df, output_b_df])
# predict_on_batch
out = model.predict_on_batch([input_a_np, input_b_np])
out = model.predict_on_batch({'input_a': input_a_np, 'input_b': input_b_np})
out = model.predict_on_batch([input_a_df, input_b_df])
# predict, evaluate
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4)
out = model.evaluate([input_a_df, input_b_df], [output_a_df, output_b_df], batch_size=4)
#.........这里部分代码省略.........
示例7: test_model_with_external_loss
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import fit [as 别名]
def test_model_with_external_loss():
# None loss, only regularization loss.
a = Input(shape=(3,), name='input_a')
a_2 = Dense(4, name='dense_1',
kernel_regularizer='l1',
bias_regularizer='l2')(a)
dp = Dropout(0.5, name='dropout')
a_3 = dp(a_2)
model = Model(a, [a_2, a_3])
optimizer = 'rmsprop'
loss = None
model.compile(optimizer, loss, metrics=['mae'])
input_a_np = np.random.random((10, 3))
# test train_on_batch
out = model.train_on_batch(input_a_np, None)
out = model.test_on_batch(input_a_np, None)
# fit
out = model.fit(input_a_np, None)
# evaluate
out = model.evaluate(input_a_np, None)
# No dropout, external loss.
a = Input(shape=(3,), name='input_a')
a_2 = Dense(4, name='dense_1')(a)
a_3 = Dense(4, name='dense_2')(a)
model = Model(a, [a_2, a_3])
model.add_loss(K.mean(a_3 + a_2))
optimizer = 'rmsprop'
loss = None
model.compile(optimizer, loss, metrics=['mae'])
# test train_on_batch
out = model.train_on_batch(input_a_np, None)
out = model.test_on_batch(input_a_np, None)
# fit
out = model.fit(input_a_np, None)
# evaluate
out = model.evaluate(input_a_np, None)
# Test fit with no external data at all.
if K.backend() == 'tensorflow':
import tensorflow as tf
a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
a_2 = Dense(4, name='dense_1')(a)
a_2 = Dropout(0.5, name='dropout')(a_2)
model = Model(a, a_2)
model.add_loss(K.mean(a_2))
model.compile(optimizer='rmsprop',
loss=None,
metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None, None)
out = model.test_on_batch(None, None)
out = model.predict_on_batch(None)
# test fit
with pytest.raises(ValueError):
out = model.fit(None, None, epochs=1, batch_size=10)
out = model.fit(None, None, epochs=1, steps_per_epoch=1)
# test fit with validation data
with pytest.raises(ValueError):
out = model.fit(None, None,
epochs=1,
steps_per_epoch=None,
validation_steps=2)
out = model.fit(None, None,
epochs=1,
steps_per_epoch=2,
validation_steps=2)
# test evaluate
with pytest.raises(ValueError):
out = model.evaluate(None, None, batch_size=10)
out = model.evaluate(None, None, steps=3)
# test predict
with pytest.raises(ValueError):
out = model.predict(None, batch_size=10)
out = model.predict(None, steps=3)
assert out.shape == (10 * 3, 4)
# Test multi-output model without external data.
a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
a_1 = Dense(4, name='dense_1')(a)
a_2 = Dropout(0.5, name='dropout')(a_1)
model = Model(a, [a_1, a_2])
model.add_loss(K.mean(a_2))
model.compile(optimizer='rmsprop',
loss=None,
metrics=['mean_squared_error'])
#.........这里部分代码省略.........
示例8: test_model_with_input_feed_tensor
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import fit [as 别名]
def test_model_with_input_feed_tensor():
"""We test building a model with a TF variable as input.
We should be able to call fit, evaluate, predict,
by only passing them data for the placeholder inputs
in the model.
"""
import tensorflow as tf
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
b = Input(shape=(3,), name='input_b')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
model = Model([a, b], [a_2, b_2])
model.summary()
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(optimizer, loss, metrics=['mean_squared_error'],
loss_weights=loss_weights,
sample_weight_mode=None)
# test train_on_batch
out = model.train_on_batch(input_b_np,
[output_a_np, output_b_np])
out = model.train_on_batch({'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.test_on_batch({'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.predict_on_batch({'input_b': input_b_np})
# test fit
out = model.fit({'input_b': input_b_np},
[output_a_np, output_b_np], epochs=1, batch_size=10)
out = model.fit(input_b_np,
[output_a_np, output_b_np], epochs=1, batch_size=10)
# test evaluate
out = model.evaluate({'input_b': input_b_np},
[output_a_np, output_b_np], batch_size=10)
out = model.evaluate(input_b_np,
[output_a_np, output_b_np], batch_size=10)
# test predict
out = model.predict({'input_b': input_b_np}, batch_size=10)
out = model.predict(input_b_np, batch_size=10)
assert len(out) == 2
# Now test a model with a single input
# i.e. we don't pass any data to fit the model.
a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
a_2 = Dense(4, name='dense_1')(a)
a_2 = Dropout(0.5, name='dropout')(a_2)
model = Model(a, a_2)
model.summary()
optimizer = 'rmsprop'
loss = 'mse'
model.compile(optimizer, loss, metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None,
output_a_np)
out = model.train_on_batch(None,
output_a_np)
out = model.test_on_batch(None,
output_a_np)
out = model.predict_on_batch(None)
out = model.train_on_batch([],
output_a_np)
out = model.train_on_batch({},
output_a_np)
# test fit
out = model.fit(None,
output_a_np, epochs=1, batch_size=10)
out = model.fit(None,
output_a_np, epochs=1, batch_size=10)
# test evaluate
out = model.evaluate(None,
output_a_np, batch_size=10)
out = model.evaluate(None,
output_a_np, batch_size=10)
# test predict
out = model.predict(None, steps=3)
out = model.predict(None, steps=3)
assert out.shape == (10 * 3, 4)
# Same, without learning phase
#.........这里部分代码省略.........
示例9: AIPlayer
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import fit [as 别名]
#.........这里部分代码省略.........
def update_lr(self, lr):
K.set_value(self.network.optimizer.lr, lr)
@staticmethod
def objective_function_for_policy(y_true, y_pred):
# can use categorical_crossentropy??
return K.sum(-y_true * K.log(y_pred + K.epsilon()), axis=-1)
@staticmethod
def objective_function_for_value(y_true, y_pred):
return mean_squared_error(y_true, y_pred)
def update_buffer(self, winner):
if self.train:
while len(self.temp_state) > 0:
t = self.temp_state.pop()
self.buffer.add((t[0], t[1], winner))
def train_batches(self, batch_size, batches=-1, verbose=2):
if batches == -1:
s_buffer = np.array([_[0] for _ in self.buffer.buffer])
p_buffer = np.array([_[1] for _ in self.buffer.buffer])
v_buffer = np.array([_[2] for _ in self.buffer.buffer])
else:
sample_size = batch_size*batches
sample = []
while sample_size > 0:
sample += self.buffer.sample(sample_size)
sample_size -= self.buffer.size()
s_buffer = np.array([_[0] for _ in sample])
p_buffer = np.array([_[1] for _ in sample])
v_buffer = np.array([_[2] for _ in sample])
history = self.network.fit(s_buffer, [p_buffer, v_buffer], batch_size=batch_size, epochs=1, verbose=verbose)
return history
def preprocess_input(self, board, side):
state = np.zeros((3, 8, 8), dtype=np.int)
for i in range(8):
for j in range(8):
if board[i,j] == 1:
state[0,i,j] = 1
elif board[i,j] == -1:
state[1,i,j] = 1
if side == 1:
state[2,i,j] = 1
return state
def evaluate(self, game, side):
current_input = self.preprocess_input(game.board, side)
pred = self.network.predict(current_input[np.newaxis,:])
return pred[1][0]
def pick_move(self, game, side):
possible_moves = game.possible_moves(side)
if len(possible_moves) == 0:
possible_moves.append((-1,-1))
monte_prob = self.monte_carlo(game, side)
if self.train:
self.temp_state.append((self.preprocess_input(game.board, side), np.divide(monte_prob, np.sum(monte_prob))))
monte_prob = np.float_power(monte_prob, 1/self.tau)
monte_prob = np.divide(monte_prob, np.sum(monte_prob))
r = random()
示例10: test_pandas_dataframe
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import fit [as 别名]
def test_pandas_dataframe():
input_a = Input(shape=(3,), name='input_a')
input_b = Input(shape=(3,), name='input_b')
x = Dense(4, name='dense_1')(input_a)
y = Dense(3, name='desne_2')(input_b)
model_1 = Model(inputs=input_a, outputs=x)
model_2 = Model(inputs=[input_a, input_b], outputs=[x, y])
optimizer = 'rmsprop'
loss = 'mse'
model_1.compile(optimizer=optimizer, loss=loss)
model_2.compile(optimizer=optimizer, loss=loss)
input_a_df = pd.DataFrame(np.random.random((10, 3)))
input_b_df = pd.DataFrame(np.random.random((10, 3)))
output_a_df = pd.DataFrame(np.random.random((10, 4)))
output_b_df = pd.DataFrame(np.random.random((10, 3)))
model_1.fit(input_a_df,
output_a_df)
model_2.fit([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.fit([input_a_df],
[output_a_df])
model_1.fit({'input_a': input_a_df},
output_a_df)
model_2.fit({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
model_1.predict(input_a_df)
model_2.predict([input_a_df, input_b_df])
model_1.predict([input_a_df])
model_1.predict({'input_a': input_a_df})
model_2.predict({'input_a': input_a_df, 'input_b': input_b_df})
model_1.predict_on_batch(input_a_df)
model_2.predict_on_batch([input_a_df, input_b_df])
model_1.predict_on_batch([input_a_df])
model_1.predict_on_batch({'input_a': input_a_df})
model_2.predict_on_batch({'input_a': input_a_df, 'input_b': input_b_df})
model_1.evaluate(input_a_df,
output_a_df)
model_2.evaluate([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.evaluate([input_a_df],
[output_a_df])
model_1.evaluate({'input_a': input_a_df},
output_a_df)
model_2.evaluate({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
model_1.train_on_batch(input_a_df,
output_a_df)
model_2.train_on_batch([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.train_on_batch([input_a_df],
[output_a_df])
model_1.train_on_batch({'input_a': input_a_df},
output_a_df)
model_2.train_on_batch({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
model_1.test_on_batch(input_a_df,
output_a_df)
model_2.test_on_batch([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.test_on_batch([input_a_df],
[output_a_df])
model_1.test_on_batch({'input_a': input_a_df},
output_a_df)
model_2.test_on_batch({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
示例11: FinancialTimeSeriesAnalysisModel
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import fit [as 别名]
class FinancialTimeSeriesAnalysisModel(object):
model = None
def __init__(self, nb_time_step, dim_data, batch_size=1, model_path=None):
self.model_path = model_path
self.model_path = model_path
self.batch_size = batch_size
self.size_of_input_data_dim = dim_data
self.size_of_input_timesteps = nb_time_step
self.build()
self.weight_loaded = False
if model_path is not None:
self.load_weights()
def build(self):
dim_data = self.size_of_input_data_dim
nb_time_step = self.size_of_input_timesteps
financial_time_series_input = Input(shape=(nb_time_step, dim_data), name='x1')
lstm_layer_1 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
return_sequences=True, name='lstm_layer1')
lstm_layer_21 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
return_sequences=True, name='lstm_layer2_loss1')
lstm_layer_22 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
return_sequences=True, name='lstm_layer2_loss2')
lstm_layer_23 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
return_sequences=True, name='lstm_layer2_loss3')
lstm_layer_24 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
return_sequences=True, name='lstm_layer2_loss4')
lstm_layer_25 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
return_sequences=True, name='lstm_layer2_loss5')
h1 = lstm_layer_1(financial_time_series_input)
h21 = lstm_layer_21(h1)
h22 = lstm_layer_22(h1)
h23 = lstm_layer_23(h1)
h24 = lstm_layer_24(h1)
h25 = lstm_layer_25(h1)
time_series_predictions1 = TimeDistributed(Dense(1), name="p1")(h21) # custom 1
time_series_predictions2 = TimeDistributed(Dense(1), name="p2")(h22) # custom 2
time_series_predictions3 = TimeDistributed(Dense(1), name="p3")(h23) # mse
time_series_predictions4 = TimeDistributed(Dense(1, activation='sigmoid'), name="p4")(h24) # logloss
time_series_predictions5 = TimeDistributed(Dense(nb_labels, activation='softmax'), name="p5")(h25) # cross
self.model = Model(input=financial_time_series_input,
output=[time_series_predictions1, time_series_predictions2,
time_series_predictions3, time_series_predictions4,
time_series_predictions5],
name="multi-task deep rnn for financial time series forecasting")
plot(self.model, to_file='model.png')
def reset(self):
for l in self.model.layers:
if type(l) is LSTM:
l.reset_status()
def compile_model(self, lr=0.0001, arg_weight=1.):
optimizer = Adam(lr=lr)
loss = [custom_objective1, custom_objective2, 'mse', 'binary_crossentropy', 'categorical_crossentropy']
self.model.compile(optimizer=optimizer, loss=loss)
def fit_model(self, X, y, y_label, epoch=300):
early_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0)
self.model.fit(X, [y]*3 + [y > 0] + [y_label], batch_size=self.batch_size, nb_epoch=epoch, validation_split=0.2,
shuffle=True, callbacks=[early_stopping])
def save(self):
self.model.save_weights(self.model_path, overwrite=True)
def load_weights(self):
if os.path.exists(self.model_path):
self.model.load_weights(self.model_path)
self.weight_loaded = True
def print_weights(self, weights=None, detail=False):
weights = weights or self.model.get_weights()
for w in weights:
print("w%s: sum(w)=%s, ave(w)=%s" % (w.shape, np.sum(w), np.average(w)))
if detail:
for w in weights:
print("%s: %s" % (w.shape, w))
def model_eval(self, X, y):
y_hat = self.model.predict(X, batch_size=1)[0]
count_true = 0
count_all = y.shape[1]
for i in range(y.shape[1]):
count_true = count_true + 1 if y[0,i,0]*y_hat[0,i,0]>0 else count_true
print(y[0,i,0],y_hat[0,i,0])
print(count_all,count_true)
示例12: test_model_methods
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import fit [as 别名]
def test_model_methods():
a = Input(shape=(3,), name='input_a')
b = Input(shape=(3,), name='input_b')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
model = Model([a, b], [a_2, b_2])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
# test train_on_batch
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np})
# test fit
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np], nb_epoch=1, batch_size=4)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np], nb_epoch=1, batch_size=4)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np},
nb_epoch=1, batch_size=4)
# test validation_split
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np],
nb_epoch=1, batch_size=4, validation_split=0.5)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np],
nb_epoch=1, batch_size=4, validation_split=0.5)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np},
nb_epoch=1, batch_size=4, validation_split=0.5)
# test validation data
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np],
nb_epoch=1, batch_size=4,
validation_data=([input_a_np, input_b_np], [output_a_np, output_b_np]))
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np],
nb_epoch=1, batch_size=4, validation_split=0.5,
validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np]))
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np},
nb_epoch=1, batch_size=4, validation_split=0.5,
validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np}))
# test_on_batch
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np})
# predict_on_batch
out = model.predict_on_batch([input_a_np, input_b_np])
out = model.predict_on_batch({'input_a': input_a_np, 'input_b': input_b_np})
# predict, evaluate
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4)
out = model.predict([input_a_np, input_b_np], batch_size=4)
# with sample_weight
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
sample_weight = [None, np.random.random((10,))]
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
sample_weight=sample_weight)
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
#.........这里部分代码省略.........
示例13: build_CNN_model
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import fit [as 别名]
#.........这里部分代码省略.........
# kernel_regularizer=conv_reg3,
# dilation_rate=1,
# name='ConvLayer3')(layer)
#
# layer = SpatialDropout1D(0.50)(layer)
#
# layer = MaxPooling1D(pool_size=pool_len3)(layer)
# #layer = GlobalMaxPool1D()(layer)
#
# layer = Convolution1D(filters=num_filters4,
# kernel_size=filter_length4,
# padding=region,
# activation=conv_activation4,
# kernel_initializer=conv_init4,
# kernel_regularizer=conv_reg4,
# dilation_rate=1,
# name='ConvLayer4')(layer)
#
# #layer = leaky_relu(layer)
#
# layer = SpatialDropout1D(0.50)(layer)
#
# layer = MaxPooling1D(pool_size=pool_len4)(layer)
# #layer = GlobalMaxPool1D()(layer)
#
# # layer = BatchNormalization()(layer)
layer = Flatten()(layer)
layer = Dense(dense_dims0, activation=dense_activation0, kernel_regularizer=dense_reg0,
kernel_initializer='glorot_normal', bias_initializer='zeros',
name='dense0')(layer)
layer = Dropout(0.50)(layer)
layer = Dense(dense_dims1, activation=dense_activation1, kernel_regularizer=dense_reg1,
kernel_initializer='glorot_normal', bias_initializer='zeros',
name='dense1')(layer)
layer = Dropout(0.50)(layer)
# layer = Dense(dense_dims2, activation=dense_activation2, kernel_regularizer=dense_reg2,
# kernel_initializer=dense_init2,
# name='dense2')(layer)
#
#
# layer = Dropout(0.50)(layer)
#
# layer = Dense(dense_dims3, activation=dense_activation3, kernel_regularizer=dense_reg3,
# kernel_initializer=dense_init3,
# name='dense3_outA')(layer)
# #layer = leaky_relu(layer)
#
if is_IntermediateModel:
return Model(inputs=[review_input], outputs=[layer], name="CNN_model")
#
# layer = Dropout(0.5)(layer)
layer = Dense(dense_dims_final, activation=dense_activation_final, kernel_initializer=dense_init_final,
kernel_regularizer=dense_reg0,
name='output_Full')(layer)
CNN_model = Model(inputs=[review_input], outputs=[layer], name="CNN_model")
CNN_model.compile(optimizer=Adam(lr=0.001, decay=0.0), loss=loss_func, metrics=[binary_accuracy])
if load_weight_path is not None:
CNN_model.load_weights(load_weight_path)
hist = ""
if do_training:
weightPath = os.path.join(modelParameters.WEIGHT_PATH, filename)
configPath = os.path.join(modelParameters.WEIGHT_PATH, filename_config)
with open(configPath + ".json", 'wb') as f:
f.write(CNN_model.to_json())
checkpoint = ModelCheckpoint(weightPath + '_W.{epoch:02d}-{val_loss:.4f}.hdf5',
verbose=1, save_best_only=True, save_weights_only=False, monitor='val_loss')
earlyStop = EarlyStopping(patience=3, verbose=1, monitor='val_loss')
LRadjuster = ReduceLROnPlateau(monitor='val_loss', factor=0.30, patience=0, verbose=1, cooldown=1,
min_lr=0.00001, epsilon=1e-2)
call_backs = [checkpoint, earlyStop, LRadjuster]
CNN_model.summary()
hist = CNN_model.fit(*model_inputs['training'],
batch_size=batch_size,
epochs=nb_epoch, verbose=1,
validation_data=model_inputs['dev'],
callbacks=call_backs)
return {"model": CNN_model, "hist": hist}
示例14: test_model_methods
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import fit [as 别名]
def test_model_methods():
a = Input(shape=(3,), name='input_a')
b = Input(shape=(3,), name='input_b')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
model = Model([a, b], [a_2, b_2])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
# training/testing doesn't work before compiling.
with pytest.raises(RuntimeError):
model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np])
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None)
# test train_on_batch
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np})
# test fit
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np], epochs=1, batch_size=4)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np], epochs=1, batch_size=4)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np},
epochs=1, batch_size=4)
# test validation_split
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np],
epochs=1, batch_size=4, validation_split=0.5)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np],
epochs=1, batch_size=4, validation_split=0.5)
# test validation data
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np],
epochs=1, batch_size=4,
validation_data=([input_a_np, input_b_np], [output_a_np, output_b_np]))
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np],
epochs=1, batch_size=4, validation_split=0.5,
validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np]))
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np},
epochs=1, batch_size=4, validation_split=0.5,
validation_data=(
{'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np}))
# test_on_batch
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np})
# predict_on_batch
out = model.predict_on_batch([input_a_np, input_b_np])
out = model.predict_on_batch({'input_a': input_a_np, 'input_b': input_b_np})
# predict, evaluate
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4)
out = model.predict([input_a_np, input_b_np], batch_size=4)
# with sample_weight
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
sample_weight = [None, np.random.random((10,))]
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
#.........这里部分代码省略.........
示例15: FinancialNewsAnalysisModel
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import fit [as 别名]
class FinancialNewsAnalysisModel(object):
model = None
def __init__(self, nb_time_step, dim_data, batch_size=1, model_path=None):
self.model_path = model_path
self.model_path = model_path
self.batch_size = batch_size
self.size_of_input_data_dim = dim_data
self.size_of_input_timesteps = nb_time_step
self.build()
self.weight_loaded = False
if model_path is not None:
self.load_weights()
def build(self):
dim_data = self.size_of_input_data_dim
nb_time_step = self.size_of_input_timesteps
news_input = Input(shape=(nb_time_step, dim_data))
lstm = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh')
bi_lstm = Bidirectional(lstm, input_shape=(nb_time_step, dim_data), merge_mode='concat')
all_news_rep = bi_lstm(news_input)
news_predictions = Dense(1, activation='linear')(all_news_rep)
self.model = Model(news_input, news_predictions, name="deep rnn for financial news analysis")
def reset(self):
for l in self.model.layers:
if type(l) is LSTM:
l.reset_status()
def compile_model(self, lr=0.0001, loss_weights=0.1):
optimizer = Adam(lr=lr)
loss = 'mse'
# loss = custom_objective
self.model.compile(optimizer=optimizer, loss=loss)
#metrics=['mse'])
plot(self.model, to_file='model.png')
def fit_model(self, X, y, X_val=None, y_val=None, epoch=500):
early_stopping = EarlyStopping(monitor='val_loss', patience=100, verbose=0)
if X_val is None:
self.model.fit(X, y, batch_size=self.batch_size, nb_epoch=epoch, validation_split=0.2,
shuffle=True, callbacks=[early_stopping])
else:
self.model.fit(X, y, batch_size=self.batch_size, nb_epoch=epoch, validation_data=(X_val, y_val),
shuffle=True, callbacks=[early_stopping])
def save(self):
self.model.save_weights(self.model_path, overwrite=True)
def load_weights(self):
if os.path.exists(self.model_path):
self.model.load_weights(self.model_path)
self.weight_loaded = True
def print_weights(self, weights=None, detail=False):
weights = weights or self.model.get_weights()
for w in weights:
print("w%s: sum(w)=%s, ave(w)=%s" % (w.shape, np.sum(w), np.average(w)))
if detail:
for w in weights:
print("%s: %s" % (w.shape, w))
def model_eval(self, X, y):
y_hat = self.model.predict(X, batch_size=1)
count_true = 0
count_all = y.shape[0]
for i in range(y.shape[0]):
count_true = count_true + 1 if y[i,0]*y_hat[i,0]>0 else count_true
print y[i,0],y_hat[i,0]
print count_all,count_true