本文整理汇总了Python中keras.models.Sequential.get_weights方法的典型用法代码示例。如果您正苦于以下问题:Python Sequential.get_weights方法的具体用法?Python Sequential.get_weights怎么用?Python Sequential.get_weights使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.models.Sequential
的用法示例。
在下文中一共展示了Sequential.get_weights方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_weights [as 别名]
def train():
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128, input_length=maxlen, dropout=0.2))
model.add(LSTM(128, dropout_W=0.2, dropout_U=0.2)) # try using a GRU instead, for fun
model.add(Dense(1))
model.add(Activation('sigmoid'))
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
print(X_train.shape)
print(y_train.shape)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,
validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test,
batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
with open("save_weight_lstm.pickle", mode="wb") as f:
pickle.dump(model.get_weights(),f)
示例2: test_saving_overwrite_option_gcs
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_weights [as 别名]
def test_saving_overwrite_option_gcs():
model = Sequential()
model.add(Dense(2, input_shape=(3,)))
org_weights = model.get_weights()
new_weights = [np.random.random(w.shape) for w in org_weights]
with tf_file_io_proxy('keras.engine.saving.tf_file_io') as file_io_proxy:
gcs_filepath = file_io_proxy.get_filepath(
filename='test_saving_overwrite_option_gcs.h5')
# we should not use same filename in several tests to allow for parallel
# execution
save_model(model, gcs_filepath)
model.set_weights(new_weights)
with patch('keras.engine.saving.ask_to_proceed_with_overwrite') as ask:
ask.return_value = False
save_model(model, gcs_filepath, overwrite=False)
ask.assert_called_once()
new_model = load_model(gcs_filepath)
for w, org_w in zip(new_model.get_weights(), org_weights):
assert_allclose(w, org_w)
ask.return_value = True
save_model(model, gcs_filepath, overwrite=False)
assert ask.call_count == 2
new_model = load_model(gcs_filepath)
for w, new_w in zip(new_model.get_weights(), new_weights):
assert_allclose(w, new_w)
file_io_proxy.delete_file(gcs_filepath) # cleanup
示例3: test_saving_overwrite_option
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_weights [as 别名]
def test_saving_overwrite_option():
model = Sequential()
model.add(Dense(2, input_shape=(3,)))
org_weights = model.get_weights()
new_weights = [np.random.random(w.shape) for w in org_weights]
_, fname = tempfile.mkstemp('.h5')
save_model(model, fname)
model.set_weights(new_weights)
with patch('keras.engine.saving.ask_to_proceed_with_overwrite') as ask:
ask.return_value = False
save_model(model, fname, overwrite=False)
ask.assert_called_once()
new_model = load_model(fname)
for w, org_w in zip(new_model.get_weights(), org_weights):
assert_allclose(w, org_w)
ask.return_value = True
save_model(model, fname, overwrite=False)
assert ask.call_count == 2
new_model = load_model(fname)
for w, new_w in zip(new_model.get_weights(), new_weights):
assert_allclose(w, new_w)
os.remove(fname)
示例4: test_preprocess_weights_for_loading_for_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_weights [as 别名]
def test_preprocess_weights_for_loading_for_model(layer):
model = Sequential([layer])
weights1 = model.get_weights()
weights2 = topology.preprocess_weights_for_loading(
model, convert_weights(layer, weights1),
original_keras_version='1')
assert all([np.allclose(x, y, 1e-5)
for (x, y) in zip(weights1, weights2)])
示例5: __init__
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_weights [as 别名]
class brain:
def __init__(self, model):
if (model == None):
self.model = Sequential()
self.model.add(
Dense(8, activation="tanh", input_dim=6,
kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))
self.model.add(
Dense(3, activation="tanh",
kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))
self.model.compile(loss='mean_squared_error', optimizer='adam')
else:
self.model = model
def getOutputs(self, inputs):
inputs.append(1)
return self.model.predict(np.asarray([inputs]))
def mutate(self, brain1, brain2):
newBrain = []
for i in range(0, len(self.model.get_weights()), 2):
newWeights = []
b1weights = brain1.get_weights()[i]
b2weights = brain2.get_weights()[i]
for n in range(len(b1weights)):
w = []
for m in range(len(b1weights[0])):
r = random()
k = 0
if random() < 0.1:
k = randint(-100, 100) / 100
if (r < 0.4):
w.append(b1weights[n][m] + k)
elif r > 0.6:
w.append(b2weights[n][m] + k)
else:
w.append((b1weights[n][m] + b2weights[n][m]) / 2 + k)
newWeights.append(w)
newBrain.append(newWeights)
newBrain.append(self.model.get_weights()[i + 1])
self.model.set_weights(newBrain)
示例6: test_save_load_weights_gcs
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_weights [as 别名]
def test_save_load_weights_gcs():
model = Sequential()
model.add(Dense(2, input_shape=(3,)))
org_weights = model.get_weights()
with tf_file_io_proxy('keras.engine.saving.tf_file_io') as file_io_proxy:
gcs_filepath = file_io_proxy.get_filepath(
filename='test_save_load_weights_gcs.h5')
# we should not use same filename in several tests to allow for parallel
# execution
model.save_weights(gcs_filepath)
model.set_weights([np.random.random(w.shape) for w in org_weights])
for w, org_w in zip(model.get_weights(), org_weights):
assert not (w == org_w).all()
model.load_weights(gcs_filepath)
for w, org_w in zip(model.get_weights(), org_weights):
assert_allclose(w, org_w)
file_io_proxy.delete_file(gcs_filepath) # cleanup
示例7: init_neural_networks
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_weights [as 别名]
def init_neural_networks(self):
print "init start"
model = Sequential()
model.add(Dense(input_dim = self.inputSize,output_dim = 20,init="he_normal",activation = "tanh"))
model.add(Dense(input_dim = 20,output_dim = 1,init="he_normal",activation = "tanh"))
model.add(Dense(input_dim=1 , output_dim = 1, init="he_normal",activation = "linear"))
model.compile(loss = 'mean_squared_error',optimizer = 'rmsprop')
weights = model.get_weights()
self.learner = model
print "init end"
示例8: compare_newapi
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_weights [as 别名]
def compare_newapi(self, klayer, blayer, input_data, weight_converter=None,
is_training=False, rtol=1e-6, atol=1e-6):
from keras.models import Sequential as KSequential
from bigdl.nn.keras.topology import Sequential as BSequential
bmodel = BSequential()
bmodel.add(blayer)
kmodel = KSequential()
kmodel.add(klayer)
koutput = kmodel.predict(input_data)
from bigdl.nn.keras.layer import BatchNormalization
if isinstance(blayer, BatchNormalization):
k_running_mean = K.eval(klayer.running_mean)
k_running_std = K.eval(klayer.running_std)
blayer.set_running_mean(k_running_mean)
blayer.set_running_std(k_running_std)
if kmodel.get_weights():
bmodel.set_weights(weight_converter(klayer, kmodel.get_weights()))
bmodel.training(is_training)
boutput = bmodel.forward(input_data)
self.assert_allclose(boutput, koutput, rtol=rtol, atol=atol)
示例9: __init__
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_weights [as 别名]
class Brain:
def __init__(self, model):
if (model == None):
self.model = Sequential()
self.model.add(Dense(12, input_dim=6, activation="tanh",
kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))
# self.model.add(Dense(20, activation="tanh",
# kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))
# self.model.add(Dense(20, activation="tanh",
# kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))
# self.model.add(Dense(20, activation="tanh",
# kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))
self.model.add(Dense(3, activation="tanh",
kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))
self.model.compile(optimizer='sgd', loss='mean_squared_error')
else:
self.model = model
def getOutputs(self, inputs):
return self.model.predict(np.asarray([inputs]))
def breed(self, brain1, brain2):
newBrain = []
for i in range(0, len(self.model.get_weights()), 2):
newWeights = []
b1weights = brain1.model.get_weights()[i]
b2weights = brain2.model.get_weights()[i]
for j in range(len(b1weights)):
w = []
for k in range(len(b1weights[0])):
r = random()
if r > 0.8:
genome = choice([b1weights[j][k], b2weights[j][k]])
w.append(genome + randint(-200, 200)/1000)
else:
w.append(choice([b1weights[j][k], b2weights[j][k]]))
newWeights.append(w)
newBrain.append(newWeights)
newBrain.append(self.model.get_weights()[i + 1])
self.model.set_weights(newBrain)
示例10: test1
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_weights [as 别名]
def test1():
model = Sequential()
model.add(Embedding(100,50,input_length=10,mask_zero=True))
model.add(Sum(50,ave=True))
model.compile(optimizer='sgd', loss='mse')
a = model.predict(np.array([range(10)]))
w = model.get_weights()[0]
b = w[1:10,:].mean(0)
if abs((a-b).sum())<1e-8:
print("Behave as expectation")
else:
print("Something wrong")
示例11: compare_layer
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_weights [as 别名]
def compare_layer(self, klayer, zlayer, input_data, weight_converter=None,
is_training=False, rtol=1e-6, atol=1e-6):
"""
Compare forward results for Keras layer against Zoo Keras API layer.
"""
from keras.models import Sequential as KSequential
from zoo.pipeline.api.keras.models import Sequential as ZSequential
zmodel = ZSequential()
zmodel.add(zlayer)
kmodel = KSequential()
kmodel.add(klayer)
koutput = kmodel.predict(input_data)
from zoo.pipeline.api.keras.layers import BatchNormalization
if isinstance(zlayer, BatchNormalization):
k_running_mean = K.eval(klayer.running_mean)
k_running_std = K.eval(klayer.running_std)
zlayer.set_running_mean(k_running_mean)
zlayer.set_running_std(k_running_std)
if kmodel.get_weights():
zmodel.set_weights(weight_converter(klayer, kmodel.get_weights()))
zmodel.training(is_training)
zoutput = zmodel.forward(input_data)
self.assert_allclose(zoutput, koutput, rtol=rtol, atol=atol)
示例12: train
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_weights [as 别名]
def train():
model = Sequential()
model.add(Dense(output_dim=100, input_dim=28*28))
model.add(Activation("relu"))
model.add(Dense(output_dim=10))
model.add(Activation("softmax"))
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
model.fit(X_train,y_train)
with open("save_weight.pickle", mode="wb") as f:
pickle.dump(model.get_weights(),f)
示例13: test_EarlyStopping_reuse
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_weights [as 别名]
def test_EarlyStopping_reuse():
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = Sequential((
Dense(1, input_dim=1, activation='relu'),
Dense(1, activation='sigmoid'),
))
model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
stopper = callbacks.EarlyStopping(monitor='acc', patience=patience)
weights = model.get_weights()
hist = model.fit(data, labels, callbacks=[stopper])
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper])
assert len(hist.epoch) >= patience
示例14: train_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_weights [as 别名]
def train_model(feature_layers, classification_layers, image_list, nb_epoch, nb_classes, img_rows, img_cols, weights=None):
# Create testset data for cross-val
num_images = len(image_list)
test_size = int(0.2 * num_images)
print("Train size: ", num_images-test_size)
print("Test size: ", test_size)
model = Sequential()
for l in feature_layers + classification_layers:
model.add(l)
if not(weights is None):
model.set_weights(weights)
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
print('Using real time data augmentation')
for e in range(nb_epoch):
print('-'*40)
print('Epoch', e)
print('-'*40)
print('Training...')
# batch train with realtime data augmentation
progbar = generic_utils.Progbar(num_images-test_size)
for X_batch, Y_batch in flow(image_list[0:-test_size]):
X_batch = X_batch.reshape(X_batch.shape[0], 3, img_rows, img_cols)
Y_batch = np_utils.to_categorical(Y_batch, nb_classes)
loss = model.train_on_batch(X_batch, Y_batch)
progbar.add(X_batch.shape[0], values=[('train loss', loss)])
print('Testing...')
# test time!
progbar = generic_utils.Progbar(test_size)
for X_batch, Y_batch in flow(image_list[-test_size:]):
X_batch = X_batch.reshape(X_batch.shape[0], 3, img_rows, img_cols)
Y_batch = np_utils.to_categorical(Y_batch, nb_classes)
score = model.test_on_batch(X_batch, Y_batch)
progbar.add(X_batch.shape[0], values=[('test loss', score)])
return model, model.get_weights()
示例15: _test_equivalence
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_weights [as 别名]
def _test_equivalence(channel_order=None):
from kfs.layers.convolutional import Convolution2DEnergy_TemporalBasis
from keras.models import Sequential
#from keras.layers import Flatten, Dense
input_shape = (12, 3, 64, 64)
if channel_order is None:
channel_order = K.image_data_format()
if channel_order == 'channels_last':
input_shape = (12, 64, 64, 3)
nn = Sequential()
nn.add(Convolution2DEnergy_TemporalBasis(8, 16, 4, (5, 5), 7,
padding='same',
input_shape=input_shape,
data_format=channel_order))
rng = np.random.RandomState(42)
datums = rng.randn(6, 12, 3, 64, 64).astype('float32')
if channel_order == 'channels_last':
datums = datums.transpose(0, 1, 3, 4, 2)
nn.compile(loss='mse', optimizer='sgd')
nn2 = Sequential()
nn2.add(Convolution2DEnergy_TemporalCorrelation(8, 16, 4, (5, 5), 7,
padding='same',
input_shape=input_shape,
data_format=channel_order))
nn2.compile(loss='mse', optimizer='sgd')
nn2.set_weights(nn.get_weights())
pred1 = nn.predict(datums)
pred2 = nn2.predict(datums)
assert ((pred1 - pred2) == 0.).all()
return nn, nn.predict(datums), nn2, nn2.predict(datums)