本文整理汇总了Python中theano.compat.python2x.OrderedDict类的典型用法代码示例。如果您正苦于以下问题:Python OrderedDict类的具体用法?Python OrderedDict怎么用?Python OrderedDict使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了OrderedDict类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: orderings
def orderings(self):
"""
Return dict d s.t. d[node] is a list of nodes that must be evaluated
before node itself can be evaluated.
This is used primarily by the destroy_handler feature to ensure that
all clients of any destroyed inputs have already computed their
outputs.
:note: This only calls the orderings() fct on all features. It does not
take care of computing dependencies by itself.
"""
ords = OrderedDict()
assert isinstance(self._features, list)
for feature in self._features:
if hasattr(feature, 'orderings'):
orderings = feature.orderings(self)
if not isinstance(orderings, OrderedDict):
raise TypeError("Non-deterministic return value from " +
str(feature.orderings) +
". Nondeterministic object is " +
str(orderings))
for node, prereqs in orderings.items():
if not isinstance(prereqs, (list, OrderedSet)):
raise TypeError(
"prereqs must be a type with a "
"deterministic iteration order, or toposort "
" will be non-deterministic.")
ords.setdefault(node, []).extend(prereqs)
# eliminate duplicate prereqs
for (node, prereqs) in ords.items():
ords[node] = list(OrderedSet(prereqs))
return ords
示例2: get_gradients
def get_gradients(self, model, data, ** kwargs):
cost = self.expr(model=model, data=data, **kwargs)
params = list(model.get_params())
grads = T.grad(cost, params, disconnected_inputs='ignore')
gradients = OrderedDict(izip(params, grads))
if self.gradient_clipping:
norm_gs = 0.
for grad in gradients.values():
norm_gs += (grad ** 2).sum()
not_finite = T.or_(T.isnan(norm_gs), T.isinf(norm_gs))
norm_gs = T.sqrt(norm_gs)
norm_gs = T.switch(T.ge(norm_gs, self.max_magnitude),
self.max_magnitude / norm_gs,
1.)
for param, grad in gradients.items():
gradients[param] = T.switch(not_finite,
.1 * param,
grad * norm_gs)
updates = OrderedDict()
return gradients, updates
示例3: get_gradients
def get_gradients(self, model, data, ** kwargs):
#print 'get_gradients'
chain_start = theano.shared(numpy.zeros(shape=(self.chain_num, model.n_vis)), name=None, borrow=True)
v_samples = chain_start
for i in xrange(self.k):
v_samples = model.gibbs_vhv(v_samples)[-1]
chain_end = v_samples
#print 'chain_end', chain_end.ndim
chain_updates = {}
chain_updates[chain_start] = chain_end
pos_v = data
#neg_v = self.get_neg_v(model)
cost = -(- model.free_energy(pos_v).mean() + model.free_energy(chain_end).mean())
params = list(model.get_params())
grads = T.grad(cost, params, disconnected_inputs = 'ignore', consider_constant=[chain_end])
gradients = OrderedDict(izip(params, grads))
updates = OrderedDict()
updates.update(chain_updates) # manual added
return gradients, updates
示例4: get_layer_monitoring_channels
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
W, = self.transformer.get_params()
assert W.ndim == 4
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=(1, 2, 3)))
rval = OrderedDict([
('kernel_norms_min', row_norms.min()),
('kernel_norms_mean', row_norms.mean()),
('kernel_norms_max', row_norms.max()),
])
orval = super(CorrMMElemwise, self).get_monitoring_channels_from_state(state,
targets)
rval.update(orval)
cst = self.cost
orval = self.nonlin.get_monitoring_channels_from_state(state,
targets,
cost_fn=cst)
rval.update(orval)
return rval
示例5: get_monitoring_channels
def get_monitoring_channels(self, model, data, **kwargs):
self.get_data_specs(model)[0].validate(data)
rval = OrderedDict()
composite_specs, mapping = self.get_composite_specs_and_mapping(model)
nested_data = mapping.nest(data)
for i, cost in enumerate(self.costs):
cost_data = nested_data[i]
try:
channels = cost.get_monitoring_channels(model, cost_data, **kwargs)
rval.update(channels)
except TypeError:
print (
"SumOfCosts.get_monitoring_channels encountered "
"TypeError while calling " + str(type(cost)) + ".get_monitoring_channels"
)
raise
value = cost.expr(model, cost_data, **kwargs)
if value is not None:
name = ""
if hasattr(value, "name") and value.name is not None:
name = "_" + value.name
rval["term_" + str(i) + name] = value
return rval
示例6: get_updates
def get_updates(self, grads):
grads = OrderedDict(grads)
updates = OrderedDict()
i_t = self.i + 1.
fix1 = 1. - (1. - self.b1) ** i_t
fix2 = 1. - (1. - self.b2) ** i_t
lr_t = self.learning_rate * (T.sqrt(fix2) / fix1)
for param in grads.keys():
m = theano.shared(param.get_value() * 0.)
self.parameters.append(m)
v = theano.shared(param.get_value() * 0.)
self.parameters.append(v)
b1t = 1. - (1. - self.b1) * self.lmbda**(i_t - 1)
m_t = b1t * grads[param] + (1. - b1t) * m
v_t = self.b2 * T.sqr(grads[param]) + (1. - self.b2) * v
g_t = m_t / (T.sqrt(v_t) + self.epsilon)
p_t = param - (lr_t * g_t)
updates[m] = m_t
updates[v] = v_t
updates[param] = p_t
updates[self.i] = i_t
return updates
示例7: __init__
def __init__(self, valid=None, invalid=None, valid_equivalent=None):
'''
Check if variables can be expressed without using variables in invalid.
init_valid_equivalent provides a dictionary mapping some invalid
variables to valid ones that can be used instead.
'''
if valid is None:
valid = []
if invalid is None:
invalid = []
if valid_equivalent is None:
valid_equivalent = OrderedDict()
# Nodes that are valid to have in the graph computing outputs
self.valid = set(valid)
# Nodes that are NOT valid to have in the graph computing outputs
self.invalid = set(invalid)
# Mapping from invalid variables to equivalent valid ones.
self.valid_equivalent = valid_equivalent.copy()
self.valid.update(valid_equivalent.values())
self.invalid.update(valid_equivalent.keys())
示例8: get_monitoring_channels
def get_monitoring_channels(self, model, data, ** kwargs):
self.get_data_specs(model)[0].validate(data)
rval = OrderedDict()
composite_specs, mapping = self.get_composite_specs_and_mapping(model)
nested_data = mapping.nest(data)
for i, cost in enumerate(self.costs):
cost_data = nested_data[i]
try:
channels = cost.get_monitoring_channels(model, cost_data,
**kwargs)
rval.update(channels)
except TypeError:
logger.error('SumOfCosts.get_monitoring_channels encountered '
'TypeError while calling {0}'
'.get_monitoring_channels'.format(type(cost)))
raise
value = cost.expr(model, cost_data, ** kwargs)
if value is not None:
name = ''
if hasattr(value, 'name') and value.name is not None:
name = '_' + value.name
rval['term_' + str(i) + name] = value
return rval
示例9: get_monitoring_channels
def get_monitoring_channels(self, model, X, Y=None, ** kwargs):
if Y is None and self.supervised:
raise ValueError("no targets provided while some of the " +
"costs in the sum are supervised costs")
rval = OrderedDict()
for i, cost in enumerate(self.costs):
try:
rval.update(cost.get_monitoring_channels(model, X, Y, **kwargs))
except TypeError:
print 'SumOfCosts.get_monitoring_channels encountered TypeError while calling ' \
+ str(type(cost))+'.get_monitoring_channels'
raise
Y_to_pass = Y
if not cost.supervised:
Y_to_pass = None
value = cost(model, X, Y_to_pass, ** kwargs)
if value is not None:
name = ''
if hasattr(value, 'name') and value.name is not None:
name = '_' + value.name
rval['term_'+str(i)+name] = value
return rval
示例10: get_params
def get_params(self):
"""
This returns the list of theano shared variables that will be trained by the :class:`Optimizer`.
These parameters are used in the gradient.
This includes all of the parameters in every model in the Prototype, without duplication.
Returns
-------
dict(str: SharedVariable)
Dictionary of {string_name: theano shared variables} to be trained with an :class:`Optimizer`.
These are the parameters to be trained.
"""
params = OrderedDict()
model_index = 0
for model in self.models:
if isinstance(model, Model):
model_params = model.get_params()
# append the parameters only if they aren't already in the list!
for name, param in model_params.items():
if param not in list(params.values()):
name = model._classname + '_%d_' % model_index + name
params[name] = param
model_index += 1
return params
示例11: get_gradients
def get_gradients(self, model, data, ** kwargs):
indiv_results = []
composite_specs, mapping = self.get_composite_specs_and_mapping(model)
nested_data = mapping.nest(data)
for cost, cost_data in safe_zip(self.costs, nested_data):
result = cost.get_gradients(model, cost_data, ** kwargs)
indiv_results.append(result)
grads = OrderedDict()
updates = OrderedDict()
params = model.get_params()
for coeff, packed in zip(self.coeffs, indiv_results):
g, u = packed
for param in g:
if param not in params:
raise ValueError("A shared variable (" +
str(param) +
") that is not a parameter appeared "
"a cost gradient dictionary.")
for param in g:
assert param.ndim == g[param].ndim
v = coeff * g[param]
if param not in grads:
grads[param] = v
else:
grads[param] = grads[param] + v
assert grads[param].ndim == param.ndim
assert not any([state in updates for state in u])
assert not any([state in params for state in u])
updates.update(u)
return grads, updates
示例12: OrderedSet
class OrderedSet(object):
"""
An implementation of OrderedSet based on the keys of
an OrderedDict.
"""
def __init__(self, iterable=None):
self.data = OrderedDict()
if iterable is not None:
self.update(iterable)
def update(self, container):
check_deterministic(container)
for elem in container:
self.add(elem)
def add(self, key):
self.data[key] = None
def __len__(self):
return len(self.data)
def __contains__(self, key):
return key in self.data
def discard(self, key):
if key in self.data:
del self.data[key]
def remove(self, key):
if key in self.data:
del self.data[key]
else:
raise KeyError(key)
def __iter__(self):
return self.data.__iter__()
def __reversed__(self):
return self.data.__reversed__()
def pop(self, last=True):
raise NotImplementedError()
def __eq__(self, other):
# Note that we implement only the comparison to another
# `OrderedSet`, and not to a regular `set`, because otherwise we
# could have a non-symmetric equality relation like:
# my_ordered_set == my_set and my_set != my_ordered_set
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
elif isinstance(other, set):
# Raise exception to avoid confusion.
raise TypeError(
'Cannot compare an `OrderedSet` to a `set` because '
'this comparison cannot be made symmetric: please '
'manually cast your `OrderedSet` into `set` before '
'performing this comparison.')
else:
return NotImplemented
示例13: main
def main():
var = theano.shared(T.zeros(shape=(88, 100), dtype=theano.config.floatX).eval(), name='W')
updates = [(var, add_uniform(input=var, noise_level=.02))]
stats = get_stats(var)
l1 = stats.pop('l1')
l2 = stats.pop('l2')
min = stats.pop('min')
max = stats.pop('max')
var = stats.pop('var')
std = stats.pop('std')
mean = stats.pop('mean')
mean_monitor = Monitor('mean', mean, train=True, valid=True, out_service=FileService('outs/mean.txt'))
var_monitor = Monitor('var', var, out_service=FileService('outs/var.txt'))
w_channel = MonitorsChannel('W', monitors=mean_monitor)
stat_channel = MonitorsChannel('stats', monitors=[var_monitor])
monitors = [w_channel, stat_channel]
train_collapsed_raw = collapse_channels(monitors, train=True)
train_collapsed = OrderedDict([(item[0], item[1]) for item in train_collapsed_raw])
train_services = OrderedDict([(item[0], item[2]) for item in train_collapsed_raw])
valid_collapsed_raw = collapse_channels(monitors, valid=True)
valid_collapsed = OrderedDict([(item[0], item[1]) for item in valid_collapsed_raw])
valid_services = OrderedDict([(item[0], item[2]) for item in valid_collapsed_raw])
log.debug('compiling...')
f = theano.function(inputs=[], outputs=train_collapsed.values(), updates=updates)
f2 = theano.function(inputs=[], outputs=valid_collapsed.values(), updates=updates)
log.debug('done')
t1=time.time()
for epoch in range(10):
t=time.time()
log.debug(epoch)
vals = f()
m = OrderedDict(zip(train_collapsed.keys(), vals))
for name, service in train_services.items():
if name in m:
service.write(m[name], TRAIN)
log.debug('----- '+make_time_units_string(time.time()-t))
for epoch in range(10):
t = time.time()
log.debug(epoch)
vals = f2()
m = OrderedDict(zip(valid_collapsed.keys(), vals))
for name, service in valid_services.items():
if name in m:
service.write(m[name], VALID)
log.debug('----- ' + make_time_units_string(time.time() - t))
log.debug("TOTAL TIME "+make_time_units_string(time.time()-t1))
示例14: get_layer_monitoring_channels
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=NotImplementedError):
if self.no_affine:
return OrderedDict()
W_class = self.W_class
W_cluster = self.W_cluster
assert W_class.ndim == 3
assert W_cluster.ndim == 2
sq_W = T.sqr(W_cluster)
sq_W_class = T.sqr(W_class)
row_norms = T.sqrt(sq_W.sum(axis=1))
col_norms = T.sqrt(sq_W.sum(axis=0))
row_norms_class = T.sqrt(sq_W_class.sum(axis=1))
col_norms_class = T.sqrt(sq_W_class.sum(axis=0))
rval = OrderedDict([
('row_norms_min' , row_norms.min()),
('row_norms_mean' , row_norms.mean()),
('row_norms_max' , row_norms.max()),
('col_norms_min' , col_norms.min()),
('col_norms_mean' , col_norms.mean()),
('col_norms_max' , col_norms.max()),
('class_row_norms_min' , row_norms_class.min()),
('class_row_norms_mean' , row_norms_class.mean()),
('class_row_norms_max' , row_norms_class.max()),
('class_col_norms_min' , col_norms_class.min()),
('class_col_norms_mean' , col_norms_class.mean()),
('class_col_norms_max' , col_norms_class.max()),
])
if (state_below is not None) or (state is not None):
if state is None:
#for value in get_debug_values(state_below):
#print 'value is'+ value
state=self.fprop (state_below,targets)
#print state
probclass, probcluster = state
mx = probclass.max(axis=1)
rval.update(OrderedDict([('mean_max_class',mx.mean()),
('max_max_class' , mx.max()),
('min_max_class' , mx.min())
]))
if targets is not None:
rval['nll'] = self.cost(Y=targets,Y_hat=(probclass,probcluster))
rval['perplexity'] = 10 ** (rval['nll']/np.log(10).astype('float32'))
rval['entropy'] = rval['nll']/np.log(2).astype('float32')
return rval
示例15: get_funcs
def get_funcs(self, learning_rate, grads, inp, cost, errors, lr_scalers=None):
"""
Provides the updates for learning with gradient descent + momentum.
Parameters
----------
learning_rate : float
Learning rate coefficient.
grads : dict
A dictionary mapping from the model's parameters to their
gradients.
lr_scalers : dict
A dictionary mapping from the model's parameters to a learning
rate multiplier.
"""
gshared = OrderedDict({p: sharedX(p.get_value() * 0.,
name='%s_grad' % p.name)
for p, g in grads.iteritems()})
gsup = [(gs, g) for gs, g in zip(gshared.values(), grads.values())]
get_norms = lambda x: T.sqrt(sum(map(lambda y: (y**2).sum(), x)))
gnorm = get_norms(grads.values())
pnorm = get_norms(grads.keys())
f_grad_shared = theano.function(inp,
[cost, errors, gnorm, pnorm],
updates=gsup)
updates = OrderedDict()
for param, grad in gshared.keys():
vel = sharedX(param.get_value() * 0.)
assert param.dtype == vel.dtype
assert grad.dtype == param.dtype
if param.name is not None:
vel.name = 'vel_' + param.name
scaled_lr = learning_rate * lr_scalers.get(param, 1.)
updates[vel] = self.momentum * vel - scaled_lr * grad
inc = updates[vel]
if self.nesterov_momentum:
inc = self.momentum * inc - scaled_lr * grad
assert inc.dtype == vel.dtype
updates[param] = param + inc
f_update = theano.function([learning_rate],
[],
updates=updates,
on_unused_input='ignore')
return f_grad_shared, f_update