本文整理汇总了Python中keras.engine.training.Model.get_weights方法的典型用法代码示例。如果您正苦于以下问题:Python Model.get_weights方法的具体用法?Python Model.get_weights怎么用?Python Model.get_weights使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.engine.training.Model
的用法示例。
在下文中一共展示了Model.get_weights方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: FinancialTimeSeriesAnalysisModel
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import get_weights [as 别名]
class FinancialTimeSeriesAnalysisModel(object):
model = None
def __init__(self, nb_time_step, dim_data, batch_size=1, model_path=None):
self.model_path = model_path
self.model_path = model_path
self.batch_size = batch_size
self.size_of_input_data_dim = dim_data
self.size_of_input_timesteps = nb_time_step
self.build()
self.weight_loaded = False
if model_path is not None:
self.load_weights()
def build(self):
dim_data = self.size_of_input_data_dim
nb_time_step = self.size_of_input_timesteps
financial_time_series_input = Input(shape=(nb_time_step, dim_data), name='x1')
lstm_layer_1 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
return_sequences=True, name='lstm_layer1')
lstm_layer_21 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
return_sequences=True, name='lstm_layer2_loss1')
lstm_layer_22 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
return_sequences=True, name='lstm_layer2_loss2')
lstm_layer_23 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
return_sequences=True, name='lstm_layer2_loss3')
lstm_layer_24 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
return_sequences=True, name='lstm_layer2_loss4')
lstm_layer_25 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
return_sequences=True, name='lstm_layer2_loss5')
h1 = lstm_layer_1(financial_time_series_input)
h21 = lstm_layer_21(h1)
h22 = lstm_layer_22(h1)
h23 = lstm_layer_23(h1)
h24 = lstm_layer_24(h1)
h25 = lstm_layer_25(h1)
time_series_predictions1 = TimeDistributed(Dense(1), name="p1")(h21) # custom 1
time_series_predictions2 = TimeDistributed(Dense(1), name="p2")(h22) # custom 2
time_series_predictions3 = TimeDistributed(Dense(1), name="p3")(h23) # mse
time_series_predictions4 = TimeDistributed(Dense(1, activation='sigmoid'), name="p4")(h24) # logloss
time_series_predictions5 = TimeDistributed(Dense(nb_labels, activation='softmax'), name="p5")(h25) # cross
self.model = Model(input=financial_time_series_input,
output=[time_series_predictions1, time_series_predictions2,
time_series_predictions3, time_series_predictions4,
time_series_predictions5],
name="multi-task deep rnn for financial time series forecasting")
plot(self.model, to_file='model.png')
def reset(self):
for l in self.model.layers:
if type(l) is LSTM:
l.reset_status()
def compile_model(self, lr=0.0001, arg_weight=1.):
optimizer = Adam(lr=lr)
loss = [custom_objective1, custom_objective2, 'mse', 'binary_crossentropy', 'categorical_crossentropy']
self.model.compile(optimizer=optimizer, loss=loss)
def fit_model(self, X, y, y_label, epoch=300):
early_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0)
self.model.fit(X, [y]*3 + [y > 0] + [y_label], batch_size=self.batch_size, nb_epoch=epoch, validation_split=0.2,
shuffle=True, callbacks=[early_stopping])
def save(self):
self.model.save_weights(self.model_path, overwrite=True)
def load_weights(self):
if os.path.exists(self.model_path):
self.model.load_weights(self.model_path)
self.weight_loaded = True
def print_weights(self, weights=None, detail=False):
weights = weights or self.model.get_weights()
for w in weights:
print("w%s: sum(w)=%s, ave(w)=%s" % (w.shape, np.sum(w), np.average(w)))
if detail:
for w in weights:
print("%s: %s" % (w.shape, w))
def model_eval(self, X, y):
y_hat = self.model.predict(X, batch_size=1)[0]
count_true = 0
count_all = y.shape[1]
for i in range(y.shape[1]):
count_true = count_true + 1 if y[0,i,0]*y_hat[0,i,0]>0 else count_true
print(y[0,i,0],y_hat[0,i,0])
print(count_all,count_true)
示例2: AdditionNPIModel
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import get_weights [as 别名]
#.........这里部分代码省略.........
if correct_count[question_key] == 0:
correct_new += 1
correct_count[question_key] += 1
print("GOOD!: ep=%2d idx=%3d :%s CorrectCount=%s" % (ep, idx, self.dict_to_str(question), correct_count[question_key]))
ok_rate.append(1)
if skip_correct or int(math.sqrt(correct_count[question_key])) ** 2 != correct_count[question_key]:
continue
else:
ok_rate.append(0)
if correct_count[question_key] > 0:
print("Degraded: ep=%2d idx=%3d :%s CorrectCount=%s -> 0" % (ep, idx, self.dict_to_str(question), correct_count[question_key]))
correct_count[question_key] = 0
wrong_new += 1
steps = steps_dict['steps']
xs = []
ys = []
ws = []
for step in steps:
xs.append(self.convert_input(step.input))
y, w = self.convert_output(step.output)
ys.append(y)
ws.append(w)
self.reset()
for i, (x, y, w) in enumerate(zip(xs, ys, ws)):
loss = self.model.train_on_batch(x, y, sample_weight=w)
if not np.isfinite(loss):
print("Loss is not finite!, Last Input=%s" % ([i, (x, y, w)]))
self.print_weights(last_weights, detail=True)
raise RuntimeError("Loss is not finite!")
losses.append(loss)
last_weights = self.model.get_weights()
if losses:
cur_loss = np.average(losses)
print("ep=%2d: ok_rate=%.2f%% (+%s -%s): ave loss %s (%s samples)" %
(ep, np.average(ok_rate)*100, correct_new, wrong_new, cur_loss, len(steps_list)))
# self.print_weights()
if correct_new + wrong_new == 0:
no_change_count += 1
else:
no_change_count = 0
if math.fabs(1 - cur_loss/last_loss) < 0.001 and no_change_count > 5:
print("math.fabs(1 - cur_loss/last_loss) < 0.001 and no_change_count > 5:")
return False
last_loss = cur_loss
print("=" * 80)
self.save()
if np.average(ok_rate) >= pass_rate:
return True
return False
def update_learning_rate(self, learning_rate, arg_weight=1.):
print("Re-Compile Model lr=%s aw=%s" % (learning_rate, arg_weight))
self.compile_model(learning_rate, arg_weight=arg_weight)
def train_f_enc(self, steps_list, epoch=50):
print("training f_enc")
f_add0 = Sequential(name='f_add0')
f_add0.add(self.f_enc)
f_add0.add(Dense(FIELD_DEPTH))
f_add0.add(Activation('softmax', name='softmax_add0'))
f_add1 = Sequential(name='f_add1')
示例3: FinancialNewsAnalysisModel
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import get_weights [as 别名]
class FinancialNewsAnalysisModel(object):
model = None
def __init__(self, nb_time_step, dim_data, batch_size=1, model_path=None):
self.model_path = model_path
self.model_path = model_path
self.batch_size = batch_size
self.size_of_input_data_dim = dim_data
self.size_of_input_timesteps = nb_time_step
self.build()
self.weight_loaded = False
if model_path is not None:
self.load_weights()
def build(self):
dim_data = self.size_of_input_data_dim
nb_time_step = self.size_of_input_timesteps
news_input = Input(shape=(nb_time_step, dim_data))
lstm = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh')
bi_lstm = Bidirectional(lstm, input_shape=(nb_time_step, dim_data), merge_mode='concat')
all_news_rep = bi_lstm(news_input)
news_predictions = Dense(1, activation='linear')(all_news_rep)
self.model = Model(news_input, news_predictions, name="deep rnn for financial news analysis")
def reset(self):
for l in self.model.layers:
if type(l) is LSTM:
l.reset_status()
def compile_model(self, lr=0.0001, loss_weights=0.1):
optimizer = Adam(lr=lr)
loss = 'mse'
# loss = custom_objective
self.model.compile(optimizer=optimizer, loss=loss)
#metrics=['mse'])
plot(self.model, to_file='model.png')
def fit_model(self, X, y, X_val=None, y_val=None, epoch=500):
early_stopping = EarlyStopping(monitor='val_loss', patience=100, verbose=0)
if X_val is None:
self.model.fit(X, y, batch_size=self.batch_size, nb_epoch=epoch, validation_split=0.2,
shuffle=True, callbacks=[early_stopping])
else:
self.model.fit(X, y, batch_size=self.batch_size, nb_epoch=epoch, validation_data=(X_val, y_val),
shuffle=True, callbacks=[early_stopping])
def save(self):
self.model.save_weights(self.model_path, overwrite=True)
def load_weights(self):
if os.path.exists(self.model_path):
self.model.load_weights(self.model_path)
self.weight_loaded = True
def print_weights(self, weights=None, detail=False):
weights = weights or self.model.get_weights()
for w in weights:
print("w%s: sum(w)=%s, ave(w)=%s" % (w.shape, np.sum(w), np.average(w)))
if detail:
for w in weights:
print("%s: %s" % (w.shape, w))
def model_eval(self, X, y):
y_hat = self.model.predict(X, batch_size=1)
count_true = 0
count_all = y.shape[0]
for i in range(y.shape[0]):
count_true = count_true + 1 if y[i,0]*y_hat[i,0]>0 else count_true
print y[i,0],y_hat[i,0]
print count_all,count_true
示例4: PolicyValueNet
# 需要导入模块: from keras.engine.training import Model [as 别名]
# 或者: from keras.engine.training.Model import get_weights [as 别名]
class PolicyValueNet():
"""policy-value network """
def __init__(self, board_width, board_height, model_file=None):
self.board_width = board_width
self.board_height = board_height
self.l2_const = 1e-4 # coef of l2 penalty
self.create_policy_value_net()
self._loss_train_op()
if model_file:
net_params = pickle.load(open(model_file, 'rb'))
self.model.set_weights(net_params)
def create_policy_value_net(self):
"""create the policy value network """
in_x = network = Input((4, self.board_width, self.board_height))
# conv layers
network = Conv2D(filters=32, kernel_size=(3, 3), padding="same", data_format="channels_first", activation="relu", kernel_regularizer=l2(self.l2_const))(network)
network = Conv2D(filters=64, kernel_size=(3, 3), padding="same", data_format="channels_first", activation="relu", kernel_regularizer=l2(self.l2_const))(network)
network = Conv2D(filters=128, kernel_size=(3, 3), padding="same", data_format="channels_first", activation="relu", kernel_regularizer=l2(self.l2_const))(network)
# action policy layers
policy_net = Conv2D(filters=4, kernel_size=(1, 1), data_format="channels_first", activation="relu", kernel_regularizer=l2(self.l2_const))(network)
policy_net = Flatten()(policy_net)
self.policy_net = Dense(self.board_width*self.board_height, activation="softmax", kernel_regularizer=l2(self.l2_const))(policy_net)
# state value layers
value_net = Conv2D(filters=2, kernel_size=(1, 1), data_format="channels_first", activation="relu", kernel_regularizer=l2(self.l2_const))(network)
value_net = Flatten()(value_net)
value_net = Dense(64, kernel_regularizer=l2(self.l2_const))(value_net)
self.value_net = Dense(1, activation="tanh", kernel_regularizer=l2(self.l2_const))(value_net)
self.model = Model(in_x, [self.policy_net, self.value_net])
def policy_value(state_input):
state_input_union = np.array(state_input)
results = self.model.predict_on_batch(state_input_union)
return results
self.policy_value = policy_value
def policy_value_fn(self, board):
"""
input: board
output: a list of (action, probability) tuples for each available action and the score of the board state
"""
legal_positions = board.availables
current_state = board.current_state()
act_probs, value = self.policy_value(current_state.reshape(-1, 4, self.board_width, self.board_height))
act_probs = zip(legal_positions, act_probs.flatten()[legal_positions])
return act_probs, value[0][0]
def _loss_train_op(self):
"""
Three loss terms:
loss = (z - v)^2 + pi^T * log(p) + c||theta||^2
"""
# get the train op
opt = Adam()
losses = ['categorical_crossentropy', 'mean_squared_error']
self.model.compile(optimizer=opt, loss=losses)
def self_entropy(probs):
return -np.mean(np.sum(probs * np.log(probs + 1e-10), axis=1))
def train_step(state_input, mcts_probs, winner, learning_rate):
state_input_union = np.array(state_input)
mcts_probs_union = np.array(mcts_probs)
winner_union = np.array(winner)
loss = self.model.evaluate(state_input_union, [mcts_probs_union, winner_union], batch_size=len(state_input), verbose=0)
action_probs, _ = self.model.predict_on_batch(state_input_union)
entropy = self_entropy(action_probs)
K.set_value(self.model.optimizer.lr, learning_rate)
self.model.fit(state_input_union, [mcts_probs_union, winner_union], batch_size=len(state_input), verbose=0)
return loss[0], entropy
self.train_step = train_step
def get_policy_param(self):
net_params = self.model.get_weights()
return net_params
def save_model(self, model_file):
""" save model params to file """
net_params = self.get_policy_param()
pickle.dump(net_params, open(model_file, 'wb'), protocol=2)