本文整理汇总了Python中keras.engine.training.Model.load_weights方法的典型用法代码示例。如果您正苦于以下问题:Python Model.load_weights方法的具体用法?Python Model.load_weights怎么用?Python Model.load_weights使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.engine.training.Model
的用法示例。
在下文中一共展示了Model.load_weights方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: FinancialTimeSeriesAnalysisModel
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import load_weights [as 别名]
class FinancialTimeSeriesAnalysisModel(object):
model = None
def __init__(self, nb_time_step, dim_data, batch_size=1, model_path=None):
self.model_path = model_path
self.model_path = model_path
self.batch_size = batch_size
self.size_of_input_data_dim = dim_data
self.size_of_input_timesteps = nb_time_step
self.build()
self.weight_loaded = False
if model_path is not None:
self.load_weights()
def build(self):
dim_data = self.size_of_input_data_dim
nb_time_step = self.size_of_input_timesteps
financial_time_series_input = Input(shape=(nb_time_step, dim_data), name='x1')
lstm_layer_1 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
return_sequences=True, name='lstm_layer1')
lstm_layer_21 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
return_sequences=True, name='lstm_layer2_loss1')
lstm_layer_22 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
return_sequences=True, name='lstm_layer2_loss2')
lstm_layer_23 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
return_sequences=True, name='lstm_layer2_loss3')
lstm_layer_24 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
return_sequences=True, name='lstm_layer2_loss4')
lstm_layer_25 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
return_sequences=True, name='lstm_layer2_loss5')
h1 = lstm_layer_1(financial_time_series_input)
h21 = lstm_layer_21(h1)
h22 = lstm_layer_22(h1)
h23 = lstm_layer_23(h1)
h24 = lstm_layer_24(h1)
h25 = lstm_layer_25(h1)
time_series_predictions1 = TimeDistributed(Dense(1), name="p1")(h21) # custom 1
time_series_predictions2 = TimeDistributed(Dense(1), name="p2")(h22) # custom 2
time_series_predictions3 = TimeDistributed(Dense(1), name="p3")(h23) # mse
time_series_predictions4 = TimeDistributed(Dense(1, activation='sigmoid'), name="p4")(h24) # logloss
time_series_predictions5 = TimeDistributed(Dense(nb_labels, activation='softmax'), name="p5")(h25) # cross
self.model = Model(input=financial_time_series_input,
output=[time_series_predictions1, time_series_predictions2,
time_series_predictions3, time_series_predictions4,
time_series_predictions5],
name="multi-task deep rnn for financial time series forecasting")
plot(self.model, to_file='model.png')
def reset(self):
for l in self.model.layers:
if type(l) is LSTM:
l.reset_status()
def compile_model(self, lr=0.0001, arg_weight=1.):
optimizer = Adam(lr=lr)
loss = [custom_objective1, custom_objective2, 'mse', 'binary_crossentropy', 'categorical_crossentropy']
self.model.compile(optimizer=optimizer, loss=loss)
def fit_model(self, X, y, y_label, epoch=300):
early_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0)
self.model.fit(X, [y]*3 + [y > 0] + [y_label], batch_size=self.batch_size, nb_epoch=epoch, validation_split=0.2,
shuffle=True, callbacks=[early_stopping])
def save(self):
self.model.save_weights(self.model_path, overwrite=True)
def load_weights(self):
if os.path.exists(self.model_path):
self.model.load_weights(self.model_path)
self.weight_loaded = True
def print_weights(self, weights=None, detail=False):
weights = weights or self.model.get_weights()
for w in weights:
print("w%s: sum(w)=%s, ave(w)=%s" % (w.shape, np.sum(w), np.average(w)))
if detail:
for w in weights:
print("%s: %s" % (w.shape, w))
def model_eval(self, X, y):
y_hat = self.model.predict(X, batch_size=1)[0]
count_true = 0
count_all = y.shape[1]
for i in range(y.shape[1]):
count_true = count_true + 1 if y[0,i,0]*y_hat[0,i,0]>0 else count_true
print(y[0,i,0],y_hat[0,i,0])
print(count_all,count_true)
示例2: build_CNN_model
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import load_weights [as 别名]
#.........这里部分代码省略.........
# kernel_regularizer=conv_reg3,
# dilation_rate=1,
# name='ConvLayer3')(layer)
#
# layer = SpatialDropout1D(0.50)(layer)
#
# layer = MaxPooling1D(pool_size=pool_len3)(layer)
# #layer = GlobalMaxPool1D()(layer)
#
# layer = Convolution1D(filters=num_filters4,
# kernel_size=filter_length4,
# padding=region,
# activation=conv_activation4,
# kernel_initializer=conv_init4,
# kernel_regularizer=conv_reg4,
# dilation_rate=1,
# name='ConvLayer4')(layer)
#
# #layer = leaky_relu(layer)
#
# layer = SpatialDropout1D(0.50)(layer)
#
# layer = MaxPooling1D(pool_size=pool_len4)(layer)
# #layer = GlobalMaxPool1D()(layer)
#
# # layer = BatchNormalization()(layer)
layer = Flatten()(layer)
layer = Dense(dense_dims0, activation=dense_activation0, kernel_regularizer=dense_reg0,
kernel_initializer='glorot_normal', bias_initializer='zeros',
name='dense0')(layer)
layer = Dropout(0.50)(layer)
layer = Dense(dense_dims1, activation=dense_activation1, kernel_regularizer=dense_reg1,
kernel_initializer='glorot_normal', bias_initializer='zeros',
name='dense1')(layer)
layer = Dropout(0.50)(layer)
# layer = Dense(dense_dims2, activation=dense_activation2, kernel_regularizer=dense_reg2,
# kernel_initializer=dense_init2,
# name='dense2')(layer)
#
#
# layer = Dropout(0.50)(layer)
#
# layer = Dense(dense_dims3, activation=dense_activation3, kernel_regularizer=dense_reg3,
# kernel_initializer=dense_init3,
# name='dense3_outA')(layer)
# #layer = leaky_relu(layer)
#
if is_IntermediateModel:
return Model(inputs=[review_input], outputs=[layer], name="CNN_model")
#
# layer = Dropout(0.5)(layer)
layer = Dense(dense_dims_final, activation=dense_activation_final, kernel_initializer=dense_init_final,
kernel_regularizer=dense_reg0,
name='output_Full')(layer)
CNN_model = Model(inputs=[review_input], outputs=[layer], name="CNN_model")
CNN_model.compile(optimizer=Adam(lr=0.001, decay=0.0), loss=loss_func, metrics=[binary_accuracy])
if load_weight_path is not None:
CNN_model.load_weights(load_weight_path)
hist = ""
if do_training:
weightPath = os.path.join(modelParameters.WEIGHT_PATH, filename)
configPath = os.path.join(modelParameters.WEIGHT_PATH, filename_config)
with open(configPath + ".json", 'wb') as f:
f.write(CNN_model.to_json())
checkpoint = ModelCheckpoint(weightPath + '_W.{epoch:02d}-{val_loss:.4f}.hdf5',
verbose=1, save_best_only=True, save_weights_only=False, monitor='val_loss')
earlyStop = EarlyStopping(patience=3, verbose=1, monitor='val_loss')
LRadjuster = ReduceLROnPlateau(monitor='val_loss', factor=0.30, patience=0, verbose=1, cooldown=1,
min_lr=0.00001, epsilon=1e-2)
call_backs = [checkpoint, earlyStop, LRadjuster]
CNN_model.summary()
hist = CNN_model.fit(*model_inputs['training'],
batch_size=batch_size,
epochs=nb_epoch, verbose=1,
validation_data=model_inputs['dev'],
callbacks=call_backs)
return {"model": CNN_model, "hist": hist}
示例3: __init__
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import load_weights [as 别名]
class CChessModel:
def __init__(self, config: Config):
self.config = config
self.model = None # type: Model
self.digest = None
self.n_labels = len(ActionLabelsRed)
self.graph = None
self.api = None
def build(self):
mc = self.config.model
in_x = x = Input((14, 10, 9)) # 14 x 10 x 9
# (batch, channels, height, width)
x = Conv2D(filters=mc.cnn_filter_num, kernel_size=mc.cnn_first_filter_size, padding="same",
data_format="channels_first", use_bias=False, kernel_regularizer=l2(mc.l2_reg),
name="input_conv-"+str(mc.cnn_first_filter_size)+"-"+str(mc.cnn_filter_num))(x)
x = BatchNormalization(axis=1, name="input_batchnorm")(x)
x = Activation("relu", name="input_relu")(x)
for i in range(mc.res_layer_num):
x = self._build_residual_block(x, i + 1)
res_out = x
# for policy output
x = Conv2D(filters=2, kernel_size=1, data_format="channels_first", use_bias=False,
kernel_regularizer=l2(mc.l2_reg), name="policy_conv-1-2")(res_out)
x = BatchNormalization(axis=1, name="policy_batchnorm")(x)
x = Activation("relu", name="policy_relu")(x)
x = Flatten(name="policy_flatten")(x)
policy_out = Dense(self.n_labels, kernel_regularizer=l2(mc.l2_reg), activation="softmax", name="policy_out")(x)
# for value output
x = Conv2D(filters=4, kernel_size=1, data_format="channels_first", use_bias=False,
kernel_regularizer=l2(mc.l2_reg), name="value_conv-1-4")(res_out)
x = BatchNormalization(axis=1, name="value_batchnorm")(x)
x = Activation("relu",name="value_relu")(x)
x = Flatten(name="value_flatten")(x)
x = Dense(mc.value_fc_size, kernel_regularizer=l2(mc.l2_reg), activation="relu", name="value_dense")(x)
value_out = Dense(1, kernel_regularizer=l2(mc.l2_reg), activation="tanh", name="value_out")(x)
self.model = Model(in_x, [policy_out, value_out], name="cchess_model")
self.graph = tf.get_default_graph()
def _build_residual_block(self, x, index):
mc = self.config.model
in_x = x
res_name = "res" + str(index)
x = Conv2D(filters=mc.cnn_filter_num, kernel_size=mc.cnn_filter_size, padding="same",
data_format="channels_first", use_bias=False, kernel_regularizer=l2(mc.l2_reg),
name=res_name+"_conv1-"+str(mc.cnn_filter_size)+"-"+str(mc.cnn_filter_num))(x)
x = BatchNormalization(axis=1, name=res_name+"_batchnorm1")(x)
x = Activation("relu",name=res_name+"_relu1")(x)
x = Conv2D(filters=mc.cnn_filter_num, kernel_size=mc.cnn_filter_size, padding="same",
data_format="channels_first", use_bias=False, kernel_regularizer=l2(mc.l2_reg),
name=res_name+"_conv2-"+str(mc.cnn_filter_size)+"-"+str(mc.cnn_filter_num))(x)
x = BatchNormalization(axis=1, name="res"+str(index)+"_batchnorm2")(x)
x = Add(name=res_name+"_add")([in_x, x])
x = Activation("relu", name=res_name+"_relu2")(x)
return x
@staticmethod
def fetch_digest(weight_path):
if os.path.exists(weight_path):
m = hashlib.sha256()
with open(weight_path, "rb") as f:
m.update(f.read())
return m.hexdigest()
def load(self, config_path, weight_path):
if os.path.exists(config_path) and os.path.exists(weight_path):
logger.debug(f"loading model from {config_path}")
with open(config_path, "rt") as f:
self.model = Model.from_config(json.load(f))
self.model.load_weights(weight_path)
self.digest = self.fetch_digest(weight_path)
self.graph = tf.get_default_graph()
logger.debug(f"loaded model digest = {self.digest}")
return True
else:
logger.debug(f"model files does not exist at {config_path} and {weight_path}")
return False
def save(self, config_path, weight_path):
logger.debug(f"save model to {config_path}")
with open(config_path, "wt") as f:
json.dump(self.model.get_config(), f)
self.model.save_weights(weight_path)
self.digest = self.fetch_digest(weight_path)
logger.debug(f"saved model digest {self.digest}")
def get_pipes(self, num=1, api=None, need_reload=True):
if self.api is None:
self.api = CChessModelAPI(self.config, self)
self.api.start()
return self.api.get_pipe(need_reload)
#.........这里部分代码省略.........
示例4: FinancialNewsAnalysisModel
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import load_weights [as 别名]
class FinancialNewsAnalysisModel(object):
model = None
def __init__(self, nb_time_step, dim_data, batch_size=1, model_path=None):
self.model_path = model_path
self.model_path = model_path
self.batch_size = batch_size
self.size_of_input_data_dim = dim_data
self.size_of_input_timesteps = nb_time_step
self.build()
self.weight_loaded = False
if model_path is not None:
self.load_weights()
def build(self):
dim_data = self.size_of_input_data_dim
nb_time_step = self.size_of_input_timesteps
news_input = Input(shape=(nb_time_step, dim_data))
lstm = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh')
bi_lstm = Bidirectional(lstm, input_shape=(nb_time_step, dim_data), merge_mode='concat')
all_news_rep = bi_lstm(news_input)
news_predictions = Dense(1, activation='linear')(all_news_rep)
self.model = Model(news_input, news_predictions, name="deep rnn for financial news analysis")
def reset(self):
for l in self.model.layers:
if type(l) is LSTM:
l.reset_status()
def compile_model(self, lr=0.0001, loss_weights=0.1):
optimizer = Adam(lr=lr)
loss = 'mse'
# loss = custom_objective
self.model.compile(optimizer=optimizer, loss=loss)
#metrics=['mse'])
plot(self.model, to_file='model.png')
def fit_model(self, X, y, X_val=None, y_val=None, epoch=500):
early_stopping = EarlyStopping(monitor='val_loss', patience=100, verbose=0)
if X_val is None:
self.model.fit(X, y, batch_size=self.batch_size, nb_epoch=epoch, validation_split=0.2,
shuffle=True, callbacks=[early_stopping])
else:
self.model.fit(X, y, batch_size=self.batch_size, nb_epoch=epoch, validation_data=(X_val, y_val),
shuffle=True, callbacks=[early_stopping])
def save(self):
self.model.save_weights(self.model_path, overwrite=True)
def load_weights(self):
if os.path.exists(self.model_path):
self.model.load_weights(self.model_path)
self.weight_loaded = True
def print_weights(self, weights=None, detail=False):
weights = weights or self.model.get_weights()
for w in weights:
print("w%s: sum(w)=%s, ave(w)=%s" % (w.shape, np.sum(w), np.average(w)))
if detail:
for w in weights:
print("%s: %s" % (w.shape, w))
def model_eval(self, X, y):
y_hat = self.model.predict(X, batch_size=1)
count_true = 0
count_all = y.shape[0]
for i in range(y.shape[0]):
count_true = count_true + 1 if y[i,0]*y_hat[i,0]>0 else count_true
print y[i,0],y_hat[i,0]
print count_all,count_true
示例5: AdditionNPIModel
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import load_weights [as 别名]
class AdditionNPIModel(NPIStep):
model = None
f_enc = None
def __init__(self, system: RuntimeSystem, model_path: str=None, program_set: AdditionProgramSet=None):
self.system = system
self.model_path = model_path
self.program_set = program_set
self.batch_size = 1
self.build()
self.weight_loaded = False
self.load_weights()
def build(self):
enc_size = self.size_of_env_observation()
argument_size = IntegerArguments.size_of_arguments
input_enc = InputLayer(batch_input_shape=(self.batch_size, enc_size), name='input_enc')
input_arg = InputLayer(batch_input_shape=(self.batch_size, argument_size), name='input_arg')
input_prg = Embedding(input_dim=PROGRAM_VEC_SIZE, output_dim=PROGRAM_KEY_VEC_SIZE, input_length=1,
batch_input_shape=(self.batch_size, 1))
f_enc = Sequential(name='f_enc')
f_enc.add(Merge([input_enc, input_arg], mode='concat'))
f_enc.add(Dense(256))
f_enc.add(Dense(32))
f_enc.add(Activation('relu', name='relu_enc'))
self.f_enc = f_enc
program_embedding = Sequential(name='program_embedding')
program_embedding.add(input_prg)
f_enc_convert = Sequential(name='f_enc_convert')
f_enc_convert.add(f_enc)
f_enc_convert.add(RepeatVector(1))
f_lstm = Sequential(name='f_lstm')
f_lstm.add(Merge([f_enc_convert, program_embedding], mode='concat'))
# f_lstm.add(Activation('relu', name='relu_lstm_0'))
f_lstm.add(LSTM(256, return_sequences=False, stateful=True))
f_lstm.add(Activation('relu', name='relu_lstm_1'))
f_lstm.add(RepeatVector(1))
f_lstm.add(LSTM(256, return_sequences=False, stateful=True))
f_lstm.add(Activation('relu', name='relu_lstm_2'))
# plot(f_lstm, to_file='f_lstm.png', show_shapes=True)
f_end = Sequential(name='f_end')
f_end.add(f_lstm)
f_end.add(Dense(10))
f_end.add(Dense(1))
f_end.add(Activation('hard_sigmoid', name='hard_sigmoid_end'))
# plot(f_end, to_file='f_end.png', show_shapes=True)
f_prog = Sequential(name='f_prog')
f_prog.add(f_lstm)
f_prog.add(Dense(PROGRAM_KEY_VEC_SIZE))
f_prog.add(Dense(PROGRAM_VEC_SIZE))
f_prog.add(Activation('softmax', name='softmax_prog'))
# plot(f_prog, to_file='f_prog.png', show_shapes=True)
f_args = []
for ai in range(1, IntegerArguments.max_arg_num+1):
f_arg = Sequential(name='f_arg%s' % ai)
f_arg.add(f_lstm)
f_arg.add(Dense(32))
f_arg.add(Dense(IntegerArguments.depth))
f_arg.add(Activation('softmax', name='softmax_arg%s' % ai))
f_args.append(f_arg)
# plot(f_arg, to_file='f_arg.png', show_shapes=True)
self.model = Model([input_enc.input, input_arg.input, input_prg.input],
[f_end.output, f_prog.output] + [fa.output for fa in f_args],
name="npi")
self.compile_model()
plot(self.model, to_file='model.png', show_shapes=True)
def reset(self):
super(AdditionNPIModel, self).reset()
for l in self.model.layers:
if type(l) is LSTM:
l.reset_states()
def compile_model(self, lr=0.0001, arg_weight=1.):
arg_num = IntegerArguments.max_arg_num
optimizer = Adam(lr=lr)
loss = ['binary_crossentropy', 'categorical_crossentropy'] + ['categorical_crossentropy'] * arg_num
self.model.compile(optimizer=optimizer, loss=loss, loss_weights=[0.25, 0.25] + [arg_weight] * arg_num)
def fit(self, steps_list, epoch=3000):
"""
:param int epoch:
:param typing.List[typing.Dict[q=dict, steps=typing.List[StepInOut]]] steps_list:
:return:
"""
def filter_question(condition_func):
sub_steps_list = []
for steps_dict in steps_list:
question = steps_dict['q']
if condition_func(question['in1'], question['in2']):
#.........这里部分代码省略.........