本文整理汇总了Python中theano.compat.OrderedDict.update方法的典型用法代码示例。如果您正苦于以下问题:Python OrderedDict.update方法的具体用法?Python OrderedDict.update怎么用?Python OrderedDict.update使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.compat.OrderedDict
的用法示例。
在下文中一共展示了OrderedDict.update方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_monitoring_channels
# 需要导入模块: from theano.compat import OrderedDict [as 别名]
# 或者: from theano.compat.OrderedDict import update [as 别名]
def get_monitoring_channels(self, data):
rval = OrderedDict()
try:
rval.update(self.mlp.get_monitoring_channels(data))
except Exception:
warnings.warn("something went wrong with compressor.mlp's monitoring channels")
return rval
示例2: get_monitoring_channels
# 需要导入模块: from theano.compat import OrderedDict [as 别名]
# 或者: from theano.compat.OrderedDict import update [as 别名]
def get_monitoring_channels(self, data):
rval = OrderedDict()
g_ch = self.generator.get_monitoring_channels(data)
d_ch = self.discriminator.get_monitoring_channels((data, None))
samples, _, conditional_data, _ = self.generator.sample_and_noise(100)
d_samp_ch = self.discriminator.get_monitoring_channels(((samples, conditional_data), None))
i_ch = OrderedDict()
if self.inferer is not None:
batch_size = self.inference_monitoring_batch_size
sample, noise, conditional_data, _ = self.generator.sample_and_noise(batch_size)
i_ch.update(self.inferer.get_monitoring_channels(((sample, conditional_data), noise)))
if self.monitor_generator:
for key in g_ch:
rval["gen_" + key] = g_ch[key]
if self.monitor_discriminator:
for key in d_ch:
rval["dis_on_data_" + key] = d_samp_ch[key]
for key in d_ch:
rval["dis_on_samp_" + key] = d_ch[key]
if self.monitor_inference:
for key in i_ch:
rval["inf_" + key] = i_ch[key]
return rval
示例3: get_layer_monitoring_channels
# 需要导入模块: from theano.compat import OrderedDict [as 别名]
# 或者: from theano.compat.OrderedDict import update [as 别名]
def get_layer_monitoring_channels(self, state_below=None, state=None, targets=None):
W, = self.transformer.get_params()
assert W.ndim == 4
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=(0, 1, 2)))
P = state
rval = OrderedDict()
vars_and_prefixes = [(P, '')]
for var, prefix in vars_and_prefixes:
if not hasattr(var, 'ndim') or var.ndim != 4:
print "expected 4D tensor, got "
print var
print type(var)
if isinstance(var, tuple):
print "tuple length: ", len(var)
assert False
v_max = var.max(axis=3)
v_min = var.min(axis=3)
v_mean = var.mean(axis=3)
v_range = v_max - v_min
v_max = v_max.max(axis=(1,2))
v_min = v_min.min(axis=(1,2))
# max_x.mean_u is "the mean over *u*nits of the max over
# e*x*amples" The x and u are included in the name because
# otherwise its hard to remember which axis is which when reading
# the monitor I use inner.outer rather than outer_of_inner or
# something like that because I want mean_x.* to appear next to
# each other in the alphabetical list, as these are commonly
# plotted together
for key, val in [('max_x.max_u', v_max.max()),
('max_x.mean_u', v_max.mean()),
('max_x.min_u', v_max.min()),
('min_x.max_u', v_min.max()),
('min_x.mean_u', v_min.mean()),
('min_x.min_u', v_min.min()),
('range_x.max_u', v_range.max()),
('range_x.mean_u', v_range.mean()),
('range_x.min_u', v_range.min()),
('mean_x.max_u', v_mean.max()),
('mean_x.mean_u', v_mean.mean()),
('mean_x.min_u', v_mean.min())]:
rval[prefix+key] = val
rval.update(OrderedDict([('kernel_norms_min', row_norms.min()),
('kernel_norms_mean', row_norms.mean()),
('kernel_norms_max', row_norms.max()), ]))
return rval
示例4: get_layer_monitoring_channels
# 需要导入模块: from theano.compat import OrderedDict [as 别名]
# 或者: from theano.compat.OrderedDict import update [as 别名]
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
W, = self.transformer.get_params()
assert W.ndim == 5
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=(1, 2, 3, 4)))
rval = OrderedDict([
('kernel_norms_min', row_norms.min()),
('kernel_norms_mean', row_norms.mean()),
('kernel_norms_max', row_norms.max()),
])
cost = self.cost
orval = self.nonlin.get_monitoring_channels_from_state(state,
targets,
cost_fn=cost)
rval.update(orval)
return rval
示例5: get_gradients
# 需要导入模块: from theano.compat import OrderedDict [as 别名]
# 或者: from theano.compat.OrderedDict import update [as 别名]
def get_gradients(self, model, data, **kwargs):
space, sources = self.get_data_specs(model)
space.validate(data)
assert isinstance(model, AdversaryPair)
g = model.generator
d = model.discriminator
S, d_obj, g_obj, i_obj = self.get_samples_and_objectives(model, data)
g_params = g.get_params()
d_params = d.get_params()
for param in g_params:
assert param not in d_params
for param in d_params:
assert param not in g_params
d_grads = T.grad(d_obj, d_params)
g_grads = T.grad(g_obj, g_params)
if self.scale_grads:
S_grad = T.grad(g_obj, S)
scale = T.maximum(1., self.target_scale / T.sqrt(T.sqr(S_grad).sum()))
g_grads = [g_grad * scale for g_grad in g_grads]
rval = OrderedDict()
if self.ever_train_discriminator:
rval.update(OrderedDict(safe_zip(d_params, [self.now_train_discriminator * dg for dg in d_grads])))
else:
rval.update(OrderedDict(zip(d_params, itertools.repeat(theano.tensor.constant(0., dtype='float32')))))
if self.ever_train_generator:
rval.update(OrderedDict(safe_zip(g_params, [self.now_train_generator * gg for gg in g_grads])))
else:
rval.update(OrderedDict(zip(g_params, itertools.repeat(theano.tensor.constant(0., dtype='float32')))))
if self.ever_train_inference and model.inferer is not None:
i_params = model.inferer.get_params()
i_grads = T.grad(i_obj, i_params)
rval.update(OrderedDict(safe_zip(i_params, [self.now_train_inference * ig for ig in i_grads])))
updates = OrderedDict()
# Two d steps for every g step
if self.alternate_g:
updates[self.now_train_generator] = 1. - self.now_train_generator
return rval, updates
示例6: get_monitoring_channels
# 需要导入模块: from theano.compat import OrderedDict [as 别名]
# 或者: from theano.compat.OrderedDict import update [as 别名]
def get_monitoring_channels(self, data):
if data is None:
m = 100
else:
m = data.shape[0]
n = self.mlp.get_input_space().get_total_dimension()
noise = self.get_noise((m, n))
rval = OrderedDict()
try:
rval.update(self.mlp.get_monitoring_channels((noise, None)))
except Exception:
warnings.warn("something went wrong with generator.mlp's monitoring channels")
if self.monitor_ll:
rval['ll'] = T.cast(self.ll(data, self.ll_n_samples, self.ll_sigma),
theano.config.floatX).mean()
rval['nll'] = -rval['ll']
return rval
示例7: get_lr_scalers
# 需要导入模块: from theano.compat import OrderedDict [as 别名]
# 或者: from theano.compat.OrderedDict import update [as 别名]
def get_lr_scalers(self):
"""
.. todo::
WRITEME
"""
rval = OrderedDict()
params = self.get_params()
for layer in self.hidden_layers + [ self.visible_layer ]:
contrib = layer.get_lr_scalers()
# No two layers can contend to scale a parameter
assert not any([key in rval for key in contrib])
# Don't try to scale anything that's not a parameter
assert all([key in params for key in contrib])
rval.update(contrib)
assert all([isinstance(val, float) for val in rval.values()])
return rval
示例8: get_gradients
# 需要导入模块: from theano.compat import OrderedDict [as 别名]
# 或者: from theano.compat.OrderedDict import update [as 别名]
def get_gradients(self, model, data, **kwargs):
space, sources = self.get_data_specs(model)
space.validate(data)
assert isinstance(model, CompressAdversaryPair)
g = model.compressor
d = model.discriminator
#get raw gradients for d and g objectives...
d_obj, g_obj = self.get_objectives(model, data)
g_params = g.get_params()
d_params = d.get_params()
for param in g_params:
assert param not in d_params
for param in d_params:
assert param not in g_params
d_grads = T.grad(d_obj, d_params)
g_grads = T.grad(g_obj, g_params)
# if self.scale_grads:
# S_grad = T.grad(g_obj, S)
# scale = T.maximum(1., self.target_scale / T.sqrt(T.sqr(S_grad).sum()))
# g_grads = [g_grad * scale for g_grad in g_grads]
#adjust raw gradients with control signals
rval = OrderedDict()
zeros = itertools.repeat(theano.tensor.constant(0., dtype='float32'))
if self.ever_train_discriminator:
rval.update(OrderedDict(safe_zip(d_params, [self.now_train_discriminator * dg for dg in d_grads])))
else:
rval.update(OrderedDict(zip(d_params, zeros)))
if self.ever_train_compressor:
rval.update(OrderedDict(safe_zip(g_params, [self.now_train_compressor * gg for gg in g_grads])))
else:
rval.update(OrderedDict(zip(g_params, zeros)))
#update control signals using the updates return functionality
updates = OrderedDict()
#first, the clock
self.future_train_clock = T.switch(T.ge(self.train_clock,self.discriminator_steps+self.joint_steps+self.compressor_steps),1.,self.train_clock+1.)
updates[self.train_clock] = self.future_train_clock
#then the control signals
updates[self.now_train_discriminator] = T.switch(T.le(self.future_train_clock,self.discriminator_steps+self.joint_steps),1.,0.)
updates[self.now_train_compressor] = T.switch(T.gt(self.future_train_clock,self.discriminator_steps),1.,0.)
return rval, updates
示例9: scan
# 需要导入模块: from theano.compat import OrderedDict [as 别名]
# 或者: from theano.compat.OrderedDict import update [as 别名]
def scan(fn,
sequences=None,
outputs_info=None,
non_sequences=None,
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=None,
name=None,
profile=False,
allow_gc=None,
strict=False):
"""
This function constructs and applies a Scan op to the provided
arguments.
:param fn:
``fn`` is a function that describes the operations involved in one
step of ``scan``. ``fn`` should construct variables describing the
output of one iteration step. It should expect as input theano
variables representing all the slices of the input sequences
and previous values of the outputs, as well as all other arguments
given to scan as ``non_sequences``. The order in which scan passes
these variables to ``fn`` is the following :
* all time slices of the first sequence
* all time slices of the second sequence
* ...
* all time slices of the last sequence
* all past slices of the first output
* all past slices of the second otuput
* ...
* all past slices of the last output
* all other arguments (the list given as `non_sequences` to
scan)
The order of the sequences is the same as the one in the list
`sequences` given to scan. The order of the outputs is the same
as the order of ``outputs_info``. For any sequence or output the
order of the time slices is the same as the one in which they have
been given as taps. For example if one writes the following :
.. code-block:: python
scan(fn, sequences = [ dict(input= Sequence1, taps = [-3,2,-1])
, Sequence2
, dict(input = Sequence3, taps = 3) ]
, outputs_info = [ dict(initial = Output1, taps = [-3,-5])
, dict(initial = Output2, taps = None)
, Output3 ]
, non_sequences = [ Argument1, Argument2])
``fn`` should expect the following arguments in this given order:
#. ``Sequence1[t-3]``
#. ``Sequence1[t+2]``
#. ``Sequence1[t-1]``
#. ``Sequence2[t]``
#. ``Sequence3[t+3]``
#. ``Output1[t-3]``
#. ``Output1[t-5]``
#. ``Output3[t-1]``
#. ``Argument1``
#. ``Argument2``
The list of ``non_sequences`` can also contain shared variables
used in the function, though ``scan`` is able to figure those
out on its own so they can be skipped. For the clarity of the
code we recommend though to provide them to scan. To some extend
``scan`` can also figure out other ``non sequences`` (not shared)
even if not passed to scan (but used by `fn`). A simple example of
this would be :
.. code-block:: python
import theano.tensor as TT
W = TT.matrix()
W_2 = W**2
def f(x):
return TT.dot(x,W_2)
The function is expected to return two things. One is a list of
outputs ordered in the same order as ``outputs_info``, with the
difference that there should be only one output variable per
output initial state (even if no tap value is used). Secondly
`fn` should return an update dictionary (that tells how to
update any shared variable after each iteration step). The
dictionary can optionally be given as a list of tuples. There is
no constraint on the order of these two list, ``fn`` can return
either ``(outputs_list, update_dictionary)`` or
``(update_dictionary, outputs_list)`` or just one of the two (in
case the other is empty).
To use ``scan`` as a while loop, the user needs to change the
function ``fn`` such that also a stopping condition is returned.
To do so, he/she needs to wrap the condition in an ``until`` class.
The condition should be returned as a third element, for example:
.. code-block:: python
#.........这里部分代码省略.........
示例10: scan
# 需要导入模块: from theano.compat import OrderedDict [as 别名]
# 或者: from theano.compat.OrderedDict import update [as 别名]
#.........这里部分代码省略.........
n_elems = numpy.max([max_mit_sot, max_sit_sot])
_ordered_args = [[] for x in xrange(n_elems)]
offset = 0
for idx in xrange(n_mit_sot):
n_inputs = len(mit_sot_tap_array[idx])
if n_fixed_steps == 1:
_ordered_args[mit_sot_rightOrder[idx]] = mit_sot_inner_slices[offset : offset + n_inputs]
else:
_ordered_args[mit_sot_rightOrder[idx]] = mit_sot_inner_inputs[offset : offset + n_inputs]
offset += n_inputs
for idx in xrange(n_sit_sot):
if n_fixed_steps == 1:
_ordered_args[sit_sot_rightOrder[idx]] = [sit_sot_inner_slices[idx]]
else:
_ordered_args[sit_sot_rightOrder[idx]] = [sit_sot_inner_inputs[idx]]
ordered_args = []
for ls in _ordered_args:
ordered_args += ls
if n_fixed_steps == 1:
args = inner_slices + ordered_args + non_seqs
else:
args = inner_seqs + ordered_args + non_seqs
# add only the non-shared variables and non-constants to the arguments of
# the dummy function [ a function should not get shared variables or
# constants as input ]
dummy_args = [arg for arg in args if (not isinstance(arg, SharedVariable) and not isinstance(arg, tensor.Constant))]
# when we apply the lambda expression we get a mixture of update rules
# and outputs that needs to be separated
lambda_result = fn(*args)
condition, outputs, updates = scan_utils.get_updates_and_outputs(lambda_result)
if condition is not None:
as_while = True
else:
as_while = False
##
# Step 3. Check if we actually need scan and remove it if we don't
##
if n_fixed_steps == 1:
# We do not need to use the scan op anymore, so we can just return
# the outputs and updates we have
if condition is not None:
_logger.warning(
(
"When the number of steps is fixed and equal " "to 1, the provided stopping condition, ",
str(condition),
" is ignored",
)
)
for pos, inner_out in enumerate(outputs):
# we need to see if we need to pad our sequences with an
# unbroadcastable dimension; case example : we return an
# output for which we want all intermediate. If n_steps is 1
# then, if we return the output as given by the innner function
# this will represent only a slice and it will have one
# dimension less.
if isinstance(inner_out.type, tensor.TensorType) and return_steps.get(pos, 0) != 1:
outputs[pos] = tensor.unbroadcast(tensor.shape_padleft(inner_out), 0)
if len(outputs) == 1:
outputs = outputs[0]
示例11: isinstance
# 需要导入模块: from theano.compat import OrderedDict [as 别名]
# 或者: from theano.compat.OrderedDict import update [as 别名]
n_nit_sot += 1
# Step 5.5 all other arguments including extra inputs
other_scan_args = []
other_inner_args = []
other_scan_args += [arg for arg in non_seqs
if (not isinstance(arg, SharedVariable) and
not isinstance(arg, tensor.Constant))]
# Step 5.6 all shared variables with no update rules
other_inner_args += [safe_new(arg, '_copy') for arg in non_seqs
if (not isinstance(arg, SharedVariable) and
not isinstance(arg, tensor.Constant))]
givens.update(OrderedDict(zip(other_scan_args, other_inner_args)))
if strict:
non_seqs_set = set(non_sequences if non_sequences != None else [])
other_shared_scan_args = [arg.variable for arg
in dummy_f.maker.expanded_inputs
if (isinstance(arg.variable, SharedVariable) and
not arg.update and
arg.variable in non_seqs_set)]
other_shared_inner_args = [safe_new(arg.variable, '_copy') for arg
in dummy_f.maker.expanded_inputs
if (isinstance(arg.variable, SharedVariable) and
not arg.update and
arg.variable in non_seqs_set)]
else: