本文整理匯總了Python中pylearn2.compat.OrderedDict.keys方法的典型用法代碼示例。如果您正苦於以下問題:Python OrderedDict.keys方法的具體用法?Python OrderedDict.keys怎麽用?Python OrderedDict.keys使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類pylearn2.compat.OrderedDict
的用法示例。
在下文中一共展示了OrderedDict.keys方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from pylearn2.compat import OrderedDict [as 別名]
# 或者: from pylearn2.compat.OrderedDict import keys [as 別名]
#.........這裏部分代碼省略.........
self._goto_alpha = function(
[alpha],
updates=goto_updates,
mode=self.theano_function_mode,
name='BatchGradientDescent._goto_alpha')
norm = T.sqrt(sum([T.sqr(elem).sum() for elem in
self.param_to_grad_shared.values()]))
norm.name = 'BatchGradientDescent.norm'
normalize_grad_updates = OrderedDict()
for grad_shared in self.param_to_grad_shared.values():
normalize_grad_updates[grad_shared] = grad_shared / norm
# useful for monitoring
self.ave_grad_size = sharedX(0.)
self.new_weight = sharedX(1.)
normalize_grad_updates[self.ave_grad_size] = \
self.new_weight * norm + (1.-self.new_weight) * self.ave_grad_size
self._normalize_grad = \
function([],
norm,
updates=normalize_grad_updates,
mode=self.theano_function_mode,
name='BatchGradientDescent._normalize_grad')
if self.conjugate:
grad_shared = self.param_to_grad_shared.values()
grad_to_old_grad = OrderedDict()
for elem in grad_shared:
grad_to_old_grad[elem] = \
sharedX(elem.get_value(), 'old_'+elem.name)
self._store_old_grad = \
function([norm],
updates=OrderedDict([(grad_to_old_grad[g_], g_ * norm)
for g_ in grad_to_old_grad]),
mode=self.theano_function_mode,
name='BatchGradientDescent._store_old_grad')
grad_ordered = list(grad_to_old_grad.keys())
old_grad_ordered = [grad_to_old_grad[g_] for g_ in grad_ordered]
def dot_product(x, y):
return sum([(x_elem * y_elem).sum()
for x_elem, y_elem in safe_zip(x, y)])
beta_pr = (dot_product(grad_ordered, grad_ordered) - dot_product(grad_ordered, old_grad_ordered)) / \
(1e-7+dot_product(old_grad_ordered, old_grad_ordered))
assert beta_pr.ndim == 0
beta = T.maximum(beta_pr, 0.)
# beta_pr is the Polak-Ribiere formula for beta.
# According to wikipedia, the beta to use for NCG is "a matter of
# heuristics or taste" but max(0, beta_pr) is "a popular choice...
# which provides direction reset automatically." (ie, it is meant
# to revert to steepest descent when you have traveled far enough
# that the objective function is behaving non-quadratically enough
# that the conjugate gradient formulas aren't working anymore)
# http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method
assert grad not in grad_to_old_grad
make_conjugate_updates = \
[(g_, g_ + beta * grad_to_old_grad[g_]) for g_ in grad_ordered]
mode = self.theano_function_mode
if mode is not None and hasattr(mode, 'record'):
for v, u in make_conjugate_updates:
mode.record.handle_line(
'BatchGradientDescent._make_conjugate var '
+ var_descriptor(v) + '\n')
mode.record.handle_line(
'BatchGradientDescent._make_conjugate update '
+ var_descriptor(u) + '\n')
self._make_conjugate = \
function([], updates=make_conjugate_updates,
mode=self.theano_function_mode,
name='BatchGradientDescent._make_conjugate')
if mode is not None and hasattr(mode, 'record'):
for output in self._make_conjugate.maker.fgraph.outputs:
mode.record.handle_line(
'BatchGradientDescent._make_conjugate output '
+ var_descriptor(output) + '\n')
if tol is None:
if objective.dtype == "float32":
self.tol = 1e-6
else:
self.tol = 3e-7
else:
self.tol = tol
self.ave_step_size = sharedX(0.)
self.ave_grad_mult = sharedX(0.)
示例2: Monitor
# 需要導入模塊: from pylearn2.compat import OrderedDict [as 別名]
# 或者: from pylearn2.compat.OrderedDict import keys [as 別名]
#.........這裏部分代碼省略.........
data_specs=self._flat_data_specs,
return_tuple=True,
rng=sd)
# If self._flat_data_specs is empty, no channel needs data,
# so we do not need to call the iterator in order to average
# the monitored values across different batches, we only
# have to call them once.
if len(self._flat_data_specs[1]) == 0:
X = ()
self.run_prereqs(X, d)
a(*X)
else:
actual_ne = 0
for X in myiterator:
# X is a flat (not nested) tuple
self.run_prereqs(X, d)
a(*X)
actual_ne += self._flat_data_specs[0].np_batch_size(X)
# end for X
if actual_ne != ne:
raise RuntimeError("At compile time, your iterator said "
"it had %d examples total, but at "
"runtime it gave us %d." %
(ne, actual_ne))
# end for d
log.info("Monitoring step:")
log.info("\tEpochs seen: %d" % self._epochs_seen)
log.info("\tBatches seen: %d" % self._num_batches_seen)
log.info("\tExamples seen: %d" % self._examples_seen)
t = time.time() - self.t0
for channel_name in sorted(self.channels.keys(),
key=number_aware_alphabetical_key):
channel = self.channels[channel_name]
channel.time_record.append(t)
channel.batch_record.append(self._num_batches_seen)
channel.example_record.append(self._examples_seen)
channel.epoch_record.append(self._epochs_seen)
val = channel.val_shared.get_value()
channel.val_record.append(val)
# TODO: use logging infrastructure so that user can configure
# formatting
if abs(val) < 1e4:
val_str = str(val)
else:
val_str = '%.3e' % val
log.info("\t%s: %s" % (channel_name, val_str))
def run_prereqs(self, data, dataset):
"""
Runs all "prerequistie functions" on a batch of data. Always
called right before computing the monitoring channels on that
batch.
Parameters
----------
data : tuple or Variable
a member of the Space used as input to the monitoring
functions
dataset : Dataset
the Dataset the data was drawn from
"""
if dataset not in self.prereqs: