本文整理汇总了Python中simplelearn.training.EpochLogger类的典型用法代码示例。如果您正苦于以下问题:Python EpochLogger类的具体用法?Python EpochLogger怎么用?Python EpochLogger使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了EpochLogger类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
#.........这里部分代码省略.........
#
"""
def make_output_basename(args):
assert_equal(os.path.splitext(args.output_prefix)[1], "")
if os.path.isdir(args.output_prefix) and \
not args.output_prefix.endswith('/'):
args.output_prefix += '/'
output_dir, output_prefix = os.path.split(args.output_prefix)
if output_prefix != "":
output_prefix = output_prefix + "_"
output_prefix = os.path.join(output_dir, output_prefix)
return "{}lr-{}_mom-{}_nesterov-{}_bs-{}".format(
output_prefix,
args.learning_rate,
args.initial_momentum,
args.nesterov,
args.batch_size)
"""
assert_equal(os.path.splitext(args.output_prefix)[1], "")
if os.path.isdir(args.output_prefix) and not args.output_prefix.endswith("/"):
args.output_prefix += "/"
output_dir, output_prefix = os.path.split(args.output_prefix)
if output_prefix != "":
output_prefix = output_prefix + "_"
output_prefix = os.path.join(output_dir, output_prefix)
epoch_logger = EpochLogger(output_prefix + "SGD_nesterov.h5")
# misclassification_node = Misclassification(output_node, label_node)
# mcr_logger = LogsToLists()
# training_stopper = StopsOnStagnation(max_epochs=10,
# min_proportional_decrease=0.0)
misclassification_node = Misclassification(output_node, label_lookup_node)
validation_loss_monitor = MeanOverEpoch(loss_node, callbacks=[])
epoch_logger.subscribe_to("validation mean loss", validation_loss_monitor)
validation_misclassification_monitor = MeanOverEpoch(
misclassification_node, callbacks=[print_mcr, StopsOnStagnation(max_epochs=20, min_proportional_decrease=0.0)]
)
epoch_logger.subscribe_to("validation misclassification", validation_misclassification_monitor)
# batch callback (monitor)
# training_loss_logger = LogsToLists()
training_loss_monitor = MeanOverEpoch(loss_node, callbacks=[print_loss])
epoch_logger.subscribe_to("training mean loss", training_loss_monitor)
training_misclassification_monitor = MeanOverEpoch(misclassification_node, callbacks=[])
epoch_logger.subscribe_to("training misclassification %", training_misclassification_monitor)
# epoch callbacks
# validation_loss_logger = LogsToLists()
def make_output_filename(args, best=False):
basename = make_output_basename(args)
return "{}{}.pkl".format(basename, "_best" if best else "")
示例2: main
#.........这里部分代码省略.........
args.learning_rate,
args.initial_momentum,
args.nesterov)
parameter_updaters.append(parameter_updater)
momentum_updaters.append(LinearlyInterpolatesOverEpochs(
parameter_updater.momentum,
args.final_momentum,
args.epochs_to_momentum_saturation))
#
# Makes batch and epoch callbacks
#
def make_output_basename(args):
assert_equal(os.path.splitext(args.output_prefix)[1], "")
if os.path.isdir(args.output_prefix) and \
not args.output_prefix.endswith('/'):
args.output_prefix += '/'
output_dir, output_prefix = os.path.split(args.output_prefix)
if output_prefix != "":
output_prefix = output_prefix + "_"
output_prefix = os.path.join(output_dir, output_prefix)
return "{}lr-{}_mom-{}_nesterov-{}_bs-{}".format(
output_prefix,
args.learning_rate,
args.initial_momentum,
args.nesterov,
args.batch_size)
epoch_logger = EpochLogger(make_output_basename(args) + "_log.h5")
# misclassification_node = Misclassification(output_node, label_node)
# mcr_logger = LogsToLists()
# training_stopper = StopsOnStagnation(max_epochs=10,
# min_proportional_decrease=0.0)
misclassification_node = Misclassification(output_node, label_node)
validation_loss_monitor = MeanOverEpoch(loss_node, callbacks=[])
epoch_logger.subscribe_to('validation mean loss', validation_loss_monitor)
validation_misclassification_monitor = MeanOverEpoch(
misclassification_node,
callbacks=[print_mcr,
StopsOnStagnation(max_epochs=10,
min_proportional_decrease=0.0)])
epoch_logger.subscribe_to('validation misclassification',
validation_misclassification_monitor)
# batch callback (monitor)
# training_loss_logger = LogsToLists()
training_loss_monitor = MeanOverEpoch(loss_node, callbacks=[print_loss])
epoch_logger.subscribe_to('training mean loss', training_loss_monitor)
training_misclassification_monitor = MeanOverEpoch(misclassification_node,
callbacks=[])
epoch_logger.subscribe_to('training misclassification %',
training_misclassification_monitor)
# epoch callbacks
# validation_loss_logger = LogsToLists()
示例3: main
#.........这里部分代码省略.........
parameter_updaters,
momentum_updaters)
#
# Makes batch and epoch callbacks
#
def make_output_filename(args, best=False):
'''
Constructs a filename that reflects the command-line params.
'''
assert_equal(os.path.splitext(args.output_prefix)[1], "")
if os.path.isdir(args.output_prefix):
output_dir, output_prefix = args.output_prefix, ""
else:
output_dir, output_prefix = os.path.split(args.output_prefix)
assert_true(os.path.isdir(output_dir))
if output_prefix != "":
output_prefix = output_prefix + "_"
output_prefix = os.path.join(output_dir, output_prefix)
return ("%slr-%g_mom-%g_nesterov-%s_bs-%d%s.pkl" %
(output_prefix,
args.learning_rate,
args.initial_momentum,
args.nesterov,
args.batch_size,
"_best" if best else ""))
# Set up the loggers
epoch_logger = EpochLogger(make_output_filename(args) + "_log.h5")
misclassification_node = Misclassification(output_node, label_lookup_node)
validation_loss_monitor = MeanOverEpoch(loss_node, callbacks=[])
epoch_logger.subscribe_to('validation mean loss', validation_loss_monitor)
training_stopper = StopsOnStagnation(max_epochs=201,
min_proportional_decrease=0.0)
validation_misclassification_monitor = MeanOverEpoch(misclassification_node,
callbacks=[print_misclassification_rate,
training_stopper])
epoch_logger.subscribe_to('validation misclassification',
validation_misclassification_monitor)
# batch callback (monitor)
#training_loss_logger = LogsToLists()
training_loss_monitor = MeanOverEpoch(loss_node,
callbacks=[print_loss])
epoch_logger.subscribe_to("training loss", training_loss_monitor)
training_misclassification_monitor = MeanOverEpoch(misclassification_node,
callbacks=[])
epoch_logger.subscribe_to('training misclassification %',
training_misclassification_monitor)
epoch_timer = EpochTimer2()
epoch_logger.subscribe_to('epoch duration', epoch_timer)
# epoch_logger.subscribe_to('epoch time',
# epoch_timer)
#################
示例4: main
#.........这里部分代码省略.........
output_dir, output_prefix = args.output_prefix, ""
else:
output_dir, output_prefix = os.path.split(args.output_prefix)
assert_true(os.path.isdir(output_dir))
if output_prefix != "":
output_prefix = output_prefix + "_"
output_prefix = os.path.join(output_dir, output_prefix)
return ("%slr-%g_mom-%g_nesterov-%s_bs-%d%s.pkl" %
(output_prefix,
args.learning_rate,
args.initial_momentum,
args.nesterov,
args.batch_size,
"_best" if best else ""))
'''
# Set up the loggers
assert_equal(os.path.splitext(args.output_prefix)[1], "")
if os.path.isdir(args.output_prefix) and \
not args.output_prefix.endswith('/'):
args.output_prefix += '/'
output_dir, output_prefix = os.path.split(args.output_prefix)
if output_prefix != "":
output_prefix = output_prefix + "_"
output_prefix = os.path.join(output_dir, output_prefix)
epoch_logger = EpochLogger(output_prefix + "S2GD_plus.h5")
misclassification_node = Misclassification(output_node, label_lookup_node)
validation_loss_monitor = MeanOverEpoch(loss_node, callbacks=[])
epoch_logger.subscribe_to('validation mean loss', validation_loss_monitor)
training_stopper = StopsOnStagnation(max_epochs=20,
min_proportional_decrease=0.0)
validation_misclassification_monitor = MeanOverEpoch(misclassification_node,
callbacks=[print_misclassification_rate,
training_stopper])
epoch_logger.subscribe_to('validation misclassification',
validation_misclassification_monitor)
# batch callback (monitor)
#training_loss_logger = LogsToLists()
training_loss_monitor = MeanOverEpoch(loss_node,
callbacks=[print_loss])
epoch_logger.subscribe_to("training loss", training_loss_monitor)
training_misclassification_monitor = MeanOverEpoch(misclassification_node,
callbacks=[])
epoch_logger.subscribe_to('training misclassification %',
training_misclassification_monitor)
epoch_timer = EpochTimer2()
epoch_logger.subscribe_to('epoch duration', epoch_timer)
# epoch_logger.subscribe_to('epoch time',
# epoch_timer)
#################
示例5: main
#.........这里部分代码省略.........
print(grads)
print(grads.shape)
#
# Makes batch and epoch callbacks
#
def make_output_filename(args, best=False):
'''
Constructs a filename that reflects the command-line params.
'''
assert_equal(os.path.splitext(args.output_prefix)[1], "")
if os.path.isdir(args.output_prefix):
output_dir, output_prefix = args.output_prefix, ""
else:
output_dir, output_prefix = os.path.split(args.output_prefix)
assert_true(os.path.isdir(output_dir))
if output_prefix != "":
output_prefix = output_prefix + "_"
output_prefix = os.path.join(output_dir, output_prefix)
return ("%slr-%g_mom-%g_nesterov-%s_bs-%d%s.pkl" %
(output_prefix,
args.learning_rate,
args.initial_momentum,
not args.no_nesterov,
args.batch_size,
"_best" if best else ""))
# Set up the loggers
epoch_logger = EpochLogger(make_output_filename(args) + "_log.h5")
misclassification_node = Misclassification(output_node, label_lookup_node)
validation_loss_monitor = MeanOverEpoch(loss_node, callbacks=[])
epoch_logger.subscribe_to('validation mean loss', validation_loss_monitor)
training_stopper = StopsOnStagnation(max_epochs=100,
min_proportional_decrease=0.0)
validation_misclassification_monitor = MeanOverEpoch(misclassification_node,
callbacks=[print_misclassification_rate,
training_stopper])
epoch_logger.subscribe_to('validation misclassification',
validation_misclassification_monitor)
# batch callback (monitor)
#training_loss_logger = LogsToLists()
training_loss_monitor = MeanOverEpoch(loss_node,
callbacks=[print_loss])
epoch_logger.subscribe_to("training loss", training_loss_monitor)
training_misclassification_monitor = MeanOverEpoch(misclassification_node,
callbacks=[])
epoch_logger.subscribe_to('training misclassification %',
training_misclassification_monitor)
epoch_timer = EpochTimer()
# epoch_logger.subscribe_to('epoch time',
# epoch_timer)
#################
model = SerializableModel([input_indices_symbolic], [output_node])
示例6: main
#.........这里部分代码省略.........
args.learning_rate,
args.initial_momentum,
args.nesterov)
parameter_updaters.append(parameter_updater)
momentum_updaters.append(LinearlyInterpolatesOverEpochs(
parameter_updater.momentum,
args.final_momentum,
args.epochs_to_momentum_saturation))
#
# Makes batch and epoch callbacks
#
def make_output_basename(args):
assert_equal(os.path.splitext(args.output_prefix)[1], "")
if os.path.isdir(args.output_prefix) and \
not args.output_prefix.endswith('/'):
args.output_prefix += '/'
output_dir, output_prefix = os.path.split(args.output_prefix)
if output_prefix != "":
output_prefix = output_prefix + "_"
output_prefix = os.path.join(output_dir, output_prefix)
return "{}lr-{}_mom-{}_nesterov-{}_bs-{}".format(
output_prefix,
args.learning_rate,
args.initial_momentum,
args.nesterov,
args.batch_size)
epoch_logger = EpochLogger(make_output_basename(args) + "_log.h5")
# misclassification_node = Misclassification(output_node, label_node)
# mcr_logger = LogsToLists()
# training_stopper = StopsOnStagnation(max_epochs=10,
# min_proportional_decrease=0.0)
misclassification_node = Misclassification(output_node, label_node)
validation_loss_monitor = MeanOverEpoch(loss_node, callbacks=[])
epoch_logger.subscribe_to('validation mean loss', validation_loss_monitor)
validation_misclassification_monitor = MeanOverEpoch(
misclassification_node,
callbacks=[print_mcr,
StopsOnStagnation(max_epochs=10,
min_proportional_decrease=0.0)])
epoch_logger.subscribe_to('validation misclassification',
validation_misclassification_monitor)
# batch callback (monitor)
# training_loss_logger = LogsToLists()
training_loss_monitor = MeanOverEpoch(loss_node, callbacks=[print_loss])
epoch_logger.subscribe_to('training mean loss', training_loss_monitor)
training_misclassification_monitor = MeanOverEpoch(misclassification_node,
callbacks=[])
epoch_logger.subscribe_to('training misclassification %',
training_misclassification_monitor)
# epoch callbacks
# validation_loss_logger = LogsToLists()
示例7: main
#.........这里部分代码省略.........
parameter_updaters,
momentum_updaters)
#
# Makes batch and epoch callbacks
#
def make_output_filename(args, best=False):
'''
Constructs a filename that reflects the command-line params.
'''
assert_equal(os.path.splitext(args.output_prefix)[1], "")
if os.path.isdir(args.output_prefix):
output_dir, output_prefix = args.output_prefix, ""
else:
output_dir, output_prefix = os.path.split(args.output_prefix)
assert_true(os.path.isdir(output_dir))
if output_prefix != "":
output_prefix = output_prefix + "_"
output_prefix = os.path.join(output_dir, output_prefix)
return ("%slr-%g_mom-%g_nesterov-%s_bs-%d%s.pkl" %
(output_prefix,
args.learning_rate,
args.initial_momentum,
not args.no_nesterov,
args.batch_size,
"_best" if best else ""))
# Set up the loggers
epoch_logger = EpochLogger(make_output_filename(args) + "_log.h5")
misclassification_node = Misclassification(output_node, label_node)
validation_loss_monitor = MeanOverEpoch(loss_node, callbacks=[])
epoch_logger.subscribe_to('validation mean loss', validation_loss_monitor)
training_stopper = StopsOnStagnation(max_epochs=100,
min_proportional_decrease=0.0)
validation_misclassification_monitor = MeanOverEpoch(misclassification_node,
callbacks=[print_misclassification_rate,
training_stopper])
epoch_logger.subscribe_to('validation misclassification',
validation_misclassification_monitor)
# batch callback (monitor)
#training_loss_logger = LogsToLists()
training_loss_monitor = MeanOverEpoch(loss_node,
callbacks=[print_loss])
epoch_logger.subscribe_to("training loss", training_loss_monitor)
training_misclassification_monitor = MeanOverEpoch(misclassification_node,
callbacks=[])
epoch_logger.subscribe_to('training misclassification %',
training_misclassification_monitor)
epoch_timer = EpochTimer()
# epoch_logger.subscribe_to('epoch time',
# epoch_timer)
#################
model = SerializableModel([image_uint8_node], [output_node])
示例8: main
#.........这里部分代码省略.........
sparse_init_counts,
args.dropout_include_rates,
rng,
theano_rng)
loss_node = CrossEntropy(output_node, label_lookup_node)
loss_sum = loss_node.output_symbol.mean()
max_epochs = 10000
gradient = theano.gradient.grad(loss_sum, params_flat)
#
# Makes batch and epoch callbacks
#
def make_output_basename(args):
assert_equal(os.path.splitext(args.output_prefix)[1], "")
if os.path.isdir(args.output_prefix) and \
not args.output_prefix.endswith('/'):
args.output_prefix += '/'
output_dir, output_prefix = os.path.split(args.output_prefix)
if output_prefix != "":
output_prefix = output_prefix + "_"
output_prefix = os.path.join(output_dir, output_prefix)
return "{}lr-{}_mom-{}_nesterov-{}_bs-{}".format(
output_prefix,
args.learning_rate,
args.initial_momentum,
args.nesterov,
args.batch_size)
epoch_logger = EpochLogger(make_output_basename(args) + "_log.h5")
# misclassification_node = Misclassification(output_node, label_node)
# mcr_logger = LogsToLists()
# training_stopper = StopsOnStagnation(max_epochs=10,
# min_proportional_decrease=0.0)
misclassification_node = Misclassification(output_node, label_lookup_node)
validation_loss_monitor = MeanOverEpoch(loss_node, callbacks=[])
epoch_logger.subscribe_to('validation mean loss', validation_loss_monitor)
validation_misclassification_monitor = MeanOverEpoch(
misclassification_node,
callbacks=[print_mcr,
StopsOnStagnation(max_epochs=100,
min_proportional_decrease=0.0)])
epoch_logger.subscribe_to('validation misclassification',
validation_misclassification_monitor)
# batch callback (monitor)
# training_loss_logger = LogsToLists()
training_loss_monitor = MeanOverEpoch(loss_node, callbacks=[print_loss])
epoch_logger.subscribe_to('training mean loss', training_loss_monitor)
training_misclassification_monitor = MeanOverEpoch(misclassification_node,
callbacks=[])
epoch_logger.subscribe_to('training misclassification %',
training_misclassification_monitor)
# epoch callbacks
# validation_loss_logger = LogsToLists()