本文整理汇总了Python中util.Timer.lap方法的典型用法代码示例。如果您正苦于以下问题:Python Timer.lap方法的具体用法?Python Timer.lap怎么用?Python Timer.lap使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类util.Timer
的用法示例。
在下文中一共展示了Timer.lap方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from util import Timer [as 别名]
# 或者: from util.Timer import lap [as 别名]
def main():
n_epoch = params.n_epoch
save_weight_filename = params.save_weight_file
do_validation_only = params.test_only
gen_n_text_samples = params.gen_n_samples
learning_rate = params.learning_rate
training_t = Timer()
validation_t = Timer()
best_pp = None
prev_loss = None
prev_acc = None
patience = MAX_PATIENCE
if params.mode == 'C2W2C':
def c2w2c_weighted_objective(fn):
def weighted(y_true, y_pred, weights, mask=None):
assert mask is None
assert weights is not None
score_array = fn(y_true, y_pred)
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
# apply sample weighting
score_array *= weights
word_scores = K.sum(score_array, axis=-1)
return K.mean(word_scores)
return weighted
# by default Keras calculates only mean which is not correct because
# word loss = sum(char losses), thus we need to monkey batch the
# weighted_objective function to return correct loss for C2W2C model
# ATTENTION: this might not work in later Keras versions, only tested with 1.0.5
ket.weighted_objective = c2w2c_weighted_objective
# ======== PREPARE MODELS AND DATA ========
t_model, v_model, training_data, validation_data, gen_text = prepare_env(params)
def validate_model(best):
if gen_n_text_samples:
print '\nGenerating %d text samples...' % gen_n_text_samples
n_seed = 100
start = max(0, np.random.randint(0, training_dataset.n_words - n_seed))
seed = training_dataset.get_words()[start: start + n_seed]
gen_text(seed=seed, how_many=gen_n_text_samples)
print '\nValidating model...'
validation_t.start()
v_model.set_weights(t_model.get_weights())
v_model.reset_states()
n_v_samples, gen_v = validation_data[0]()
loss, _ = v_model.evaluate_generator(gen_v, n_v_samples)
pp = np.exp(loss)
val_elapsed, val_tot = validation_t.lap()
validation_info = '''Validation result:
- Model loss: %f
- Perplexity: %f %s
- OOV rate: %f
- Validation took: %s
- Total validation: %s
''' % (loss, pp, delta_str(pp, best), validation_data[1], val_elapsed, val_tot)
info(validation_info)
return pp
if do_validation_only:
validate_model(None)
sys.exit(0)
print '\nTraining model...'
for epoch in range(1, n_epoch + 1):
print '=== Epoch %d ===' % epoch
training_t.start()
n_t_samples, gen_t = training_data[0]()
t_model.reset_states()
callbacks = []
if save_weight_filename:
callbacks += [ModelCheckpoint(save_weight_filename, monitor='loss', mode='min', save_best_only=True)]
h = t_model.fit_generator(generator=gen_t,
samples_per_epoch=n_t_samples,
callbacks=callbacks,
nb_epoch=1,
verbose=1)
fit_elapsed, fit_tot = training_t.lap()
loss = h.history['loss'][0]
acc = h.history['acc'][0]
epoch_info = '''Epoch %d summary at %s:
- Model loss: %f %s
- Model accuracy: %f %s
- Perplexity: %f
#.........这里部分代码省略.........