本文整理汇总了Python中pylearn2.training_algorithms.learning_rule.Momentum.add_channels_to_monitor方法的典型用法代码示例。如果您正苦于以下问题:Python Momentum.add_channels_to_monitor方法的具体用法?Python Momentum.add_channels_to_monitor怎么用?Python Momentum.add_channels_to_monitor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pylearn2.training_algorithms.learning_rule.Momentum
的用法示例。
在下文中一共展示了Momentum.add_channels_to_monitor方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: SGD
# 需要导入模块: from pylearn2.training_algorithms.learning_rule import Momentum [as 别名]
# 或者: from pylearn2.training_algorithms.learning_rule.Momentum import add_channels_to_monitor [as 别名]
#.........这里部分代码省略.........
cost_value = self.cost.expr(model, nested_args,
** fixed_var_descr.fixed_vars)
if cost_value is not None and cost_value.name is None:
# Concatenate the name of all tensors in theano_args !?
cost_value.name = 'objective'
# Set up monitor to model the objective value, learning rate,
# momentum (if applicable), and extra channels defined by
# the cost
learning_rate = self.learning_rate
if self.monitoring_dataset is not None:
if (self.monitoring_batch_size is None and
self.monitoring_batches is None):
self.monitoring_batch_size = self.batch_size
self.monitoring_batches = self.batches_per_iter
self.monitor.setup(dataset=self.monitoring_dataset,
cost=self.cost,
batch_size=self.monitoring_batch_size,
num_batches=self.monitoring_batches,
extra_costs=self.monitoring_costs,
mode=self.monitor_iteration_mode)
dataset_name = self.monitoring_dataset.keys()[0]
monitoring_dataset = self.monitoring_dataset[dataset_name]
#TODO: have Monitor support non-data-dependent channels
self.monitor.add_channel(name='learning_rate',
ipt=None,
val=learning_rate,
data_specs=(NullSpace(), ''),
dataset=monitoring_dataset)
if self.learning_rule:
self.learning_rule.add_channels_to_monitor(
self.monitor,
monitoring_dataset)
params = list(model.get_params())
assert len(params) > 0
for i, param in enumerate(params):
if param.name is None:
param.name = 'sgd_params[%d]' % i
self.params = params
grads, updates = self.cost.get_gradients(model, nested_args,
** fixed_var_descr.fixed_vars)
if not isinstance(grads, OrderedDict):
raise TypeError(str(type(self.cost)) + ".get_gradients returned " +
"something with" + str(type(grads)) + "as its " +
"first member. Expected OrderedDict.")
for param in grads:
assert param in params
for param in params:
assert param in grads
lr_scalers = model.get_lr_scalers()
for key in lr_scalers:
if key not in params:
raise ValueError("Tried to scale the learning rate on " +\
str(key)+" which is not an optimization parameter.")
assert len(updates.keys()) == 0
示例2: SGD
# 需要导入模块: from pylearn2.training_algorithms.learning_rule import Momentum [as 别名]
# 或者: from pylearn2.training_algorithms.learning_rule.Momentum import add_channels_to_monitor [as 别名]
#.........这里部分代码省略.........
fixed_var_descr = self.cost.get_fixed_var_descr(model, nested_args)
self.on_load_batch = fixed_var_descr.on_load_batch
cost_value = self.cost.expr(model, nested_args,
** fixed_var_descr.fixed_vars)
if cost_value is not None and cost_value.name is None:
# Concatenate the name of all tensors in theano_args !?
cost_value.name = 'objective'
# Set up monitor to model the objective value, learning rate,
# momentum (if applicable), and extra channels defined by
# the cost
learning_rate = self.learning_rate
if self.monitoring_dataset is not None:
self.monitor.setup(
dataset=self.monitoring_dataset,
cost=self.cost,
batch_size=self.batch_size,
num_batches=self.monitoring_batches,
extra_costs=self.monitoring_costs,
mode=self.monitor_iteration_mode
)
dataset_name = self.monitoring_dataset.keys()[0]
monitoring_dataset = self.monitoring_dataset[dataset_name]
#TODO: have Monitor support non-data-dependent channels
self.monitor.add_channel(name='learning_rate',
ipt=None,
val=learning_rate,
data_specs=(NullSpace(), ''),
dataset=monitoring_dataset)
if self.learning_rule:
self.learning_rule.add_channels_to_monitor(
self.monitor,
monitoring_dataset)
params = list(model.get_params())
assert len(params) > 0
for i, param in enumerate(params):
if param.name is None:
param.name = 'sgd_params[%d]' % i
grads, updates = self.cost.get_gradients(model, nested_args,
** fixed_var_descr.fixed_vars)
for param in grads:
assert param in params
for param in params:
assert param in grads
for param in grads:
if grads[param].name is None and cost_value is not None:
grads[param].name = ('grad(%(costname)s, %(paramname)s)' %
{'costname': cost_value.name,
'paramname': param.name})
lr_scalers = model.get_lr_scalers()
for key in lr_scalers:
if key not in params:
raise ValueError("Tried to scale the learning rate on " +\
str(key)+" which is not an optimization parameter.")
log.info('Parameter and initial learning rate summary:')
for param in params: