本文整理汇总了Python中plot.Plot.append方法的典型用法代码示例。如果您正苦于以下问题:Python Plot.append方法的具体用法?Python Plot.append怎么用?Python Plot.append使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类plot.Plot
的用法示例。
在下文中一共展示了Plot.append方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train
# 需要导入模块: from plot import Plot [as 别名]
# 或者: from plot.Plot import append [as 别名]
def train(self, patience, patience_increase, n_epochs, improvement_threshold):
logging.info('Training the model...')
plot = Plot('Validation', 'Test')
# go through this many minibatches before checking the network on the
# validation set; in this case we check every epoch
validation_frequency = min(self.n_train_batches, patience / 2)
best_params = None
best_validation_loss = numpy.inf
test_score = 0.
start_time = datetime.datetime.now()
done_looping = False
epoch = 0
try:
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(self.n_train_batches):
minibatch_avg_cost = self.train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * self.n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [self.validate_model(i)
for i in xrange(self.n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
logging.info(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
self.n_train_batches,
this_validation_loss * 100.
)
)
plot.append('Validation', this_validation_loss)
plot.update_plot()
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
# improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
# test it on the test set
test_losses = [self.test_model(i)
for i in xrange(self.n_test_batches)]
test_score = numpy.mean(test_losses)
logging.info(
' epoch %i, minibatch %i/%i test error of best model %f %%' %
(
epoch,
minibatch_index + 1,
self.n_train_batches,
test_score * 100.
)
)
plot.append('Test', test_score)
plot.update_plot()
best_params = Parameters(
self.classifier.params,
type(self.classifier).__name__,
best_validation_loss,
test_score
)
best_params.save()
else:
plot.append('Test', numpy.NaN)
plot.update_plot()
plot.save_plot()
if patience <= iter:
done_looping = True
break
finally:
end_time = datetime.datetime.now()
logging.info(
'Optimization complete with best validation score of %f %%, with test performance %f %%' %
(best_validation_loss * 100., test_score * 100.))
logging.info(
'The code run for %d epochs (%s), with %f epochs/sec' %
(epoch, (end_time - start_time), 1. * epoch / (end_time - start_time).total_seconds()))