本文整理匯總了Python中plot.Plot.append方法的典型用法代碼示例。如果您正苦於以下問題:Python Plot.append方法的具體用法?Python Plot.append怎麽用?Python Plot.append使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類plot.Plot
的用法示例。
在下文中一共展示了Plot.append方法的1個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: train
# 需要導入模塊: from plot import Plot [as 別名]
# 或者: from plot.Plot import append [as 別名]
def train(self, patience, patience_increase, n_epochs, improvement_threshold):
logging.info('Training the model...')
plot = Plot('Validation', 'Test')
# go through this many minibatches before checking the network on the
# validation set; in this case we check every epoch
validation_frequency = min(self.n_train_batches, patience / 2)
best_params = None
best_validation_loss = numpy.inf
test_score = 0.
start_time = datetime.datetime.now()
done_looping = False
epoch = 0
try:
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(self.n_train_batches):
minibatch_avg_cost = self.train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * self.n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [self.validate_model(i)
for i in xrange(self.n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
logging.info(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
self.n_train_batches,
this_validation_loss * 100.
)
)
plot.append('Validation', this_validation_loss)
plot.update_plot()
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
# improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
# test it on the test set
test_losses = [self.test_model(i)
for i in xrange(self.n_test_batches)]
test_score = numpy.mean(test_losses)
logging.info(
' epoch %i, minibatch %i/%i test error of best model %f %%' %
(
epoch,
minibatch_index + 1,
self.n_train_batches,
test_score * 100.
)
)
plot.append('Test', test_score)
plot.update_plot()
best_params = Parameters(
self.classifier.params,
type(self.classifier).__name__,
best_validation_loss,
test_score
)
best_params.save()
else:
plot.append('Test', numpy.NaN)
plot.update_plot()
plot.save_plot()
if patience <= iter:
done_looping = True
break
finally:
end_time = datetime.datetime.now()
logging.info(
'Optimization complete with best validation score of %f %%, with test performance %f %%' %
(best_validation_loss * 100., test_score * 100.))
logging.info(
'The code run for %d epochs (%s), with %f epochs/sec' %
(epoch, (end_time - start_time), 1. * epoch / (end_time - start_time).total_seconds()))