本文整理汇总了Python中keras.models.Sequential.predict方法的典型用法代码示例。如果您正苦于以下问题:Python Sequential.predict方法的具体用法?Python Sequential.predict怎么用?Python Sequential.predict使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.models.Sequential
的用法示例。
在下文中一共展示了Sequential.predict方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_sequential_model_saving
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict [as 别名]
def test_sequential_model_saving():
model = Sequential()
model.add(Dense(2, input_shape=(3,)))
model.add(RepeatVector(3))
model.add(TimeDistributed(Dense(3)))
model.compile(loss=losses.MSE,
optimizer=optimizers.RMSprop(lr=0.0001),
metrics=[metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
save_model(model, fname)
new_model = load_model(fname)
os.remove(fname)
out2 = new_model.predict(x)
assert_allclose(out, out2, atol=1e-05)
# test that new updates are the same with both models
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
new_model.train_on_batch(x, y)
out = model.predict(x)
out2 = new_model.predict(x)
assert_allclose(out, out2, atol=1e-05)
示例2: test_sequential_model_saving_2
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict [as 别名]
def test_sequential_model_saving_2():
# test with funkier config
model = Sequential()
model.add(Dense(2, input_dim=3))
model.add(RepeatVector(3))
model.add(TimeDistributed(Dense(3)))
model.compile(loss=objectives.MSE,
optimizer=optimizers.RMSprop(lr=0.0001),
metrics=[metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fname = 'tmp_' + str(np.random.randint(10000)) + '.h5'
save_model(model, fname)
new_model = load_model(fname)
os.remove(fname)
out2 = new_model.predict(x)
assert_allclose(out, out2, atol=1e-05)
# test that new updates are the same with both models
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
new_model.train_on_batch(x, y)
out = model.predict(x)
out2 = new_model.predict(x)
assert_allclose(out, out2, atol=1e-05)
示例3: test_merge_overlap
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict [as 别名]
def test_merge_overlap():
left = Sequential()
left.add(Dense(nb_hidden, input_shape=(input_dim,)))
left.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, left], mode='sum'))
model.add(Dense(nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)
model.train_on_batch(X_train[:32], y_train[:32])
loss = model.evaluate(X_train, y_train, verbose=0)
assert(loss < 0.7)
model.predict(X_test, verbose=0)
model.predict_classes(X_test, verbose=0)
model.predict_proba(X_test, verbose=0)
model.get_config(verbose=0)
fname = 'test_merge_overlap_temp.h5'
model.save_weights(fname, overwrite=True)
model.load_weights(fname)
os.remove(fname)
nloss = model.evaluate(X_train, y_train, verbose=0)
assert(loss == nloss)
示例4: test_nested_sequential
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict [as 别名]
def test_nested_sequential(in_tmpdir):
(x_train, y_train), (x_test, y_test) = _get_test_data()
inner = Sequential()
inner.add(Dense(num_hidden, input_shape=(input_dim,)))
inner.add(Activation('relu'))
inner.add(Dense(num_class))
middle = Sequential()
middle.add(inner)
model = Sequential()
model.add(middle)
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1)
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False)
model.train_on_batch(x_train[:32], y_train[:32])
loss = model.evaluate(x_test, y_test, verbose=0)
model.predict(x_test, verbose=0)
model.predict_classes(x_test, verbose=0)
model.predict_proba(x_test, verbose=0)
fname = 'test_nested_sequential_temp.h5'
model.save_weights(fname, overwrite=True)
inner = Sequential()
inner.add(Dense(num_hidden, input_shape=(input_dim,)))
inner.add(Activation('relu'))
inner.add(Dense(num_class))
middle = Sequential()
middle.add(inner)
model = Sequential()
model.add(middle)
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.load_weights(fname)
os.remove(fname)
nloss = model.evaluate(x_test, y_test, verbose=0)
assert(loss == nloss)
# test serialization
config = model.get_config()
Sequential.from_config(config)
model.summary()
json_str = model.to_json()
model_from_json(json_str)
yaml_str = model.to_yaml()
model_from_yaml(yaml_str)
示例5: test_sequential_model_saving
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict [as 别名]
def test_sequential_model_saving():
model = Sequential()
model.add(Dense(2, input_dim=3))
model.add(Dense(3))
model.compile(loss='mse', optimizer='rmsprop', metrics=['acc'])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fname = 'tmp_' + str(np.random.randint(10000)) + '.h5'
save_model(model, fname)
new_model = load_model(fname)
out2 = new_model.predict(x)
assert_allclose(out, out2, atol=1e-05)
# test that new updates are the same with both models
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
new_model.train_on_batch(x, y)
out = model.predict(x)
out2 = new_model.predict(x)
assert_allclose(out, out2, atol=1e-05)
# test load_weights on model file
model.load_weights(fname)
os.remove(fname)
示例6: save_bottleneck_features
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict [as 别名]
def save_bottleneck_features():
model = Sequential()
model.add(Convolution2D(32, 3, 3, input_shape=(1, img_width, img_height)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.1))
model.add(Convolution2D(64, 2, 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Convolution2D(128, 2, 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
assert os.path.exists(weights_path), 'Model weights not found (see "weights_path" variable in script).'
f = h5py.File(weights_path)
layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]
weight_value_tuples = []
for k, name in enumerate(layer_names):
if k >= len(model.layers):
break
g = f[name]
weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
if len(weight_names):
weight_values = [g[weight_name] for weight_name in weight_names]
layer = model.layers[k]
symbolic_weights = layer.trainable_weights + layer.non_trainable_weights
if len(weight_values) != len(symbolic_weights):
raise Exception('Layer #' + str(k) +
' (named "' + layer.name +
'" in the current model) was found to '
'correspond to layer ' + name +
' in the save file. '
'However the new layer ' + layer.name +
' expects ' + str(len(symbolic_weights)) +
' weights, but the saved weights have ' +
str(len(weight_values)) +
' elements.')
weight_value_tuples += zip(symbolic_weights, weight_values)
K.batch_set_value(weight_value_tuples)
f.close()
print('Model loaded.')
X, y = load2d()
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
X_flipped, y_flipped = flip_image(X_train, y_train)
X_train = np.vstack((X_train, X_flipped))
y_train = np.vstack((y_train, y_flipped))
bottleneck_features_train = model.predict(X_train)
np.save(open('bottleneck_features_train.npy', 'w'), bottleneck_features_train)
np.save(open('label_train.npy', 'w'), y_train)
bottleneck_features_validation = model.predict(X_val)
np.save(open('bottleneck_features_validation.npy', 'w'), bottleneck_features_validation)
np.save(open('label_validation.npy', 'w'), y_val)
示例7: test_recursive
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict [as 别名]
def test_recursive():
# test layer-like API
graph = containers.Graph()
graph.add_input(name='input1', input_shape=(32,))
graph.add_node(Dense(16), name='dense1', input='input1')
graph.add_node(Dense(4), name='dense2', input='input1')
graph.add_node(Dense(4), name='dense3', input='dense1')
graph.add_output(name='output1', inputs=['dense2', 'dense3'],
merge_mode='sum')
seq = Sequential()
seq.add(Dense(32, input_shape=(32,)))
seq.add(graph)
seq.add(Dense(4))
seq.compile('rmsprop', 'mse')
seq.fit(X_train_graph, y_train_graph, batch_size=10, nb_epoch=10)
loss = seq.evaluate(X_test_graph, y_test_graph)
assert(loss < 2.5)
loss = seq.evaluate(X_test_graph, y_test_graph, show_accuracy=True)
seq.predict(X_test_graph)
seq.get_config(verbose=1)
示例8: _test_smoke
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict [as 别名]
def _test_smoke(channel_order=None):
from kfs.layers.convolutional import Convolution2DEnergy_TemporalBasis
from keras.models import Sequential
#from keras.layers import Flatten, Dense
input_shape = (12, 3, 64, 64)
if channel_order is None:
channel_order = K.image_data_format()
if channel_order == 'channels_last':
input_shape = (12, 64, 64, 3)
rng = np.random.RandomState(42)
datums = rng.randn(6, 12, 3, 64, 64).astype('float32')
if channel_order == 'channels_last':
datums = datums.transpose(0, 1, 3, 4, 2)
nn2 = Sequential()
nn2.add(Convolution2DEnergy_TemporalCorrelation(8, 16, 4, (5, 5), 7,
padding='same',
temporal_kernel_size=5,
input_shape=input_shape))
nn2.compile(loss='mse', optimizer='sgd')
pred2 = nn2.predict(datums)
return nn2, nn2.predict(datums)
示例9: test_autoencoder_advanced
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict [as 别名]
def test_autoencoder_advanced():
encoder = containers.Sequential([core.Dense(5, input_shape=(10,))])
decoder = containers.Sequential([core.Dense(10, input_shape=(5,))])
X_train = np.random.random((100, 10))
X_test = np.random.random((100, 10))
model = Sequential()
model.add(core.Dense(output_dim=10, input_dim=10))
autoencoder = core.AutoEncoder(encoder=encoder, decoder=decoder,
output_reconstruction=True)
model.add(autoencoder)
# training the autoencoder:
model.compile(optimizer='sgd', loss='mse')
assert autoencoder.output_reconstruction
model.fit(X_train, X_train, nb_epoch=1, batch_size=32)
# predicting compressed representations of inputs:
autoencoder.output_reconstruction = False # the autoencoder has to be recompiled after modifying this property
assert not autoencoder.output_reconstruction
model.compile(optimizer='sgd', loss='mse')
representations = model.predict(X_test)
assert representations.shape == (100, 5)
# the model is still trainable, although it now expects compressed representations as targets:
model.fit(X_test, representations, nb_epoch=1, batch_size=32)
# to keep training against the original inputs, just switch back output_reconstruction to True:
autoencoder.output_reconstruction = True
model.compile(optimizer='sgd', loss='mse')
model.fit(X_train, X_train, nb_epoch=1)
reconstructions = model.predict(X_test)
assert reconstructions.shape == (100, 10)
示例10: __init__
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict [as 别名]
class QNetwork:
def __init__(self, learning_rate=0.01, state_size=2, action_size=3, hidden_size=10):
self.model = Sequential()
self.model.add(Dense(hidden_size, activation='relu', input_dim=state_size))
self.model.add(Dense(hidden_size, activation='relu'))
self.model.add(Dense(action_size, activation='linear'))
self.optimizer = Adam(lr=learning_rate) # 誤差を減らす学習方法はAdam
# self.model.compile(loss='mse', optimizer=self.optimizer)
self.model.compile(loss=huberloss, optimizer=self.optimizer)
# 重みの学習
def replay(self, memory, batch_size, gamma, targetQN):
inputs = np.zeros((batch_size, 2))
targets = np.zeros((batch_size, 3))
mini_batch = memory.sample(batch_size)
for i, (state_b, action_b, reward_b, next_state_b) in enumerate(mini_batch):
inputs[i:i + 1] = state_b
target = reward_b
if not (next_state_b == np.zeros(state_b.shape)).all(axis=1):
# 価値計算(DDQNにも対応できるように、行動決定のQネットワークと価値観数のQネットワークは分離)
retmainQs = self.model.predict(next_state_b)[0]
next_action = np.argmax(retmainQs) # 最大の報酬を返す行動を選択する
target = reward_b + gamma * targetQN.model.predict(next_state_b)[0][next_action]
targets[i] = self.model.predict(state_b) # Qネットワークの出力
targets[i][action_b] = target # 教師信号
# shiglayさんよりアドバイスいただき、for文の外へ修正しました
self.model.fit(inputs, targets, epochs=1, verbose=0) # epochsは訓練データの反復回数、verbose=0は表示なしの設定