本文整理汇总了Python中theano.clone方法的典型用法代码示例。如果您正苦于以下问题:Python theano.clone方法的具体用法?Python theano.clone怎么用?Python theano.clone使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano
的用法示例。
在下文中一共展示了theano.clone方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_gt_grad
# 需要导入模块: import theano [as 别名]
# 或者: from theano import clone [as 别名]
def test_gt_grad():
"""A user test that failed.
Something about it made Elemwise.grad return something that was
too complicated for get_scalar_constant_value to recognize as being 0, so
gradient.grad reported that it was not a valid gradient of an
integer.
"""
floatX = config.floatX
T = theano.tensor
input_ = T.vector(dtype=floatX)
random_values = numpy.random.RandomState(1234).uniform(
low=-1, high=1, size=(2, 2))
W_values = numpy.asarray(random_values, dtype=floatX)
W = theano.shared(value=W_values, name='weights')
correct_score = T.dot(input_, W)
wrong_input = T.vector(dtype=floatX)
wrong_score = theano.clone(correct_score, {input_: wrong_input})
# Hinge loss
scores = T.ones_like(correct_score) - correct_score + wrong_score
cost = (scores * (scores > 0)).sum()
T.grad(cost, input_)
示例2: infer_shape
# 需要导入模块: import theano [as 别名]
# 或者: from theano import clone [as 别名]
def infer_shape(self, node, shapes):
out_shp = theano.scan_module.scan_utils.infer_shape(self.new_outputs,
self.new_inputs,
shapes)
# Clone the output shape so that shape are computed from outer inputs.
# Note:
# Here we can do it more simply like:
# ret = [theano.clone(shp, replace=repl) for shp in out_shp]
# But doing it multiple time could duplicate common subgraph between
# each shape call. Theano optimizer will clean this up later, but this
# will ask extra work to the optimizer.
repl = dict(zip(self.new_inputs, node.inputs))
cloned = theano.clone(reduce(tuple.__add__, out_shp), replace=repl)
ret = []
used = 0
for i in range(len(out_shp)):
nb = len(out_shp[i])
ret.append(cloned[used: used + nb])
used += nb
return ret
示例3: reconstruct_graph
# 需要导入模块: import theano [as 别名]
# 或者: from theano import clone [as 别名]
def reconstruct_graph(inputs, outputs, tag=None):
"""
Different interface to clone, that allows you to pass inputs.
Compared to clone, this method always replaces the inputs with
new variables of the same type, and returns those (in the same
order as the original inputs).
"""
if tag is None:
tag = ''
nw_inputs = [safe_new(x, tag) for x in inputs]
givens = OrderedDict()
for nw_x, x in izip(nw_inputs, inputs):
givens[x] = nw_x
allinputs = theano.gof.graph.inputs(outputs)
for inp in allinputs:
if isinstance(inp, theano.Constant):
givens[inp] = inp.clone()
nw_outputs = clone(outputs, replace=givens)
return (nw_inputs, nw_outputs)
示例4: __init__
# 需要导入模块: import theano [as 别名]
# 或者: from theano import clone [as 别名]
def __init__(self, incoming, servoing_pol, **kwargs):
assert isinstance(servoing_pol, TheanoServoingPolicy)
super(TheanoServoingPolicyLayer, self).__init__(incoming, **kwargs)
assert len(self.input_shape) == 4 and self.input_shape[1] == 6
self.action_space = servoing_pol.action_space
self.sqrt_w_var = self.add_param(np.sqrt(servoing_pol.w).astype(theano.config.floatX), servoing_pol.w.shape, name='sqrt_w')
self.sqrt_lambda_var = self.add_param(np.sqrt(servoing_pol.lambda_).astype(theano.config.floatX), servoing_pol.lambda_.shape, name='sqrt_lambda')
self.w_var = self.sqrt_w_var ** 2
self.lambda_var = self.sqrt_lambda_var ** 2
self.X_var, U_var, self.X_target_var, self.U_lin_var, alpha_var = servoing_pol.input_vars
w_var, lambda_var = servoing_pol.param_vars
pi_var = servoing_pol._get_pi_var()
self.pi_var = theano.clone(pi_var, replace={w_var: self.w_var,
lambda_var: self.lambda_var,
alpha_var: np.array(servoing_pol.alpha, dtype=theano.config.floatX)})
示例5: _get_jac_vars
# 需要导入模块: import theano [as 别名]
# 或者: from theano import clone [as 别名]
def _get_jac_vars(self):
if not self.predictor.feature_jacobian_name:
raise NotImplementedError
X_var, U_var, X_target_var, U_lin_var, alpha_var = self.input_vars
names = [self.predictor.feature_name, self.predictor.feature_jacobian_name, self.predictor.next_feature_name]
vars_ = L.get_output([self.predictor.pred_layers[name] for name in iter_util.flatten_tree(names)], deterministic=True)
feature_vars, jac_vars, next_feature_vars = iter_util.unflatten_tree(names, vars_)
y_vars = [T.flatten(feature_var, outdim=2) for feature_var in feature_vars]
y_target_vars = [theano.clone(y_var, replace={X_var: X_target_var}) for y_var in y_vars]
y_target_vars = [theano.ifelse.ifelse(T.eq(alpha_var, 1.0),
y_target_var,
alpha_var * y_target_var + (1 - alpha_var) * y_var)
for (y_var, y_target_var) in zip(y_vars, y_target_vars)]
jac_vars = [theano.clone(jac_var, replace={U_var: U_lin_var}) for jac_var in jac_vars]
return jac_vars
示例6: _get_jac_z_vars
# 需要导入模块: import theano [as 别名]
# 或者: from theano import clone [as 别名]
def _get_jac_z_vars(self):
if not self.predictor.feature_jacobian_name:
raise NotImplementedError
X_var, U_var, X_target_var, U_lin_var, alpha_var = self.input_vars
names = [self.predictor.feature_name, self.predictor.feature_jacobian_name, self.predictor.next_feature_name]
vars_ = L.get_output([self.predictor.pred_layers[name] for name in iter_util.flatten_tree(names)], deterministic=True)
feature_vars, jac_vars, next_feature_vars = iter_util.unflatten_tree(names, vars_)
y_vars = [T.flatten(feature_var, outdim=2) for feature_var in feature_vars]
y_target_vars = [theano.clone(y_var, replace={X_var: X_target_var}) for y_var in y_vars]
y_target_vars = [theano.ifelse.ifelse(T.eq(alpha_var, 1.0),
y_target_var,
alpha_var * y_target_var + (1 - alpha_var) * y_var)
for (y_var, y_target_var) in zip(y_vars, y_target_vars)]
jac_vars = [theano.clone(jac_var, replace={U_var: U_lin_var}) for jac_var in jac_vars]
y_next_pred_vars = [T.flatten(next_feature_var, outdim=2) for next_feature_var in next_feature_vars]
y_next_pred_vars = [theano.clone(y_next_pred_var, replace={U_var: U_lin_var}) for y_next_pred_var in y_next_pred_vars]
z_vars = [y_target_var - y_next_pred_var + T.batched_tensordot(jac_var, U_lin_var, axes=(2, 1))
for (y_target_var, y_next_pred_var, jac_var) in zip(y_target_vars, y_next_pred_vars, jac_vars)]
return jac_vars, z_vars
示例7: deep_clone
# 需要导入模块: import theano [as 别名]
# 或者: from theano import clone [as 别名]
def deep_clone(output, replace, **kwargs):
"""
like theano.clone, but makes sure to replace in the default_update of
shared variables as well
"""
new_output = list(output)
default_update_idxs = []
for idx, v in enumerate(theano.gof.graph.inputs(output)):
if hasattr(v, "default_update"):
new_output.append(v.default_update)
default_update_idxs.append(idx)
cloned = theano.clone(new_output, replace, **kwargs)
cloned_output = cloned[:len(output)]
cloned_default_updates = cloned[len(output):]
assert len(cloned_default_updates) == len(default_update_idxs)
cloned_inputs = theano.gof.graph.inputs(cloned_output)
for idx, update in zip(default_update_idxs, cloned_default_updates):
v = cloned_inputs[idx]
assert hasattr(v, "default_update")
v.default_update = update
return cloned_output
示例8: clone
# 需要导入模块: import theano [as 别名]
# 或者: from theano import clone [as 别名]
def clone(**new_inputs):
new_obj = utils.copy(self)
# Reorder inputs
assert len(new_obj.inputs) == len(new_inputs.items())
pairs=[(x, new_inputs[x.name]) for x in inputs]
new_obj.inputs = new_inputs.values()
new_obj.out = theano.clone(new_obj.out, replace=pairs)
if hasattr(new_obj, 'cost'):
new_obj.cost = theano.clone(new_obj.cost, replace=pairs)
if hasattr(new_obj, 'grads'):
new_obj.grads = theano.clone(new_obj.grads, replace=pairs)
if hasattr(new_obj, 'sample'):
new_obj.sample = theano.clone(new_obj.sample, replace=pairs)
return new_obj
示例9: fuse
# 需要导入模块: import theano [as 别名]
# 或者: from theano import clone [as 别名]
def fuse(building_blocks, fuse_dim=4, input_variables=None, entry_expression=None,
output_expressions=-1, input_dtype='float32'):
num_blocks = len(building_blocks)
if isinstance(output_expressions, numbers.Number):
output_expressions = [output_expressions]
# account for indices -1, -2 etc
output_expressions = [oe % num_blocks for oe in output_expressions]
if fuse_dim == 4:
fuse_block = T.tensor4
else:
fuse_block = T.matrix
if input_variables is None and entry_expression is None:
input_variables = fuse_block(dtype=input_dtype)
entry_expression = input_variables
current_expression = entry_expression
outputs = []
for i, block in enumerate(building_blocks):
if not hasattr(block, "expression_"):
block._build_expression()
current_expression = theano.clone(
block.expression_,
replace={block.input_: current_expression},
strict=False)
if i in output_expressions:
outputs.append(current_expression)
return outputs, input_variables
示例10: get_output_for
# 需要导入模块: import theano [as 别名]
# 或者: from theano import clone [as 别名]
def get_output_for(self, input, **kwargs):
return theano.clone(self.pi_var, replace={self.X_var: input[:, :3, :, :],
self.X_target_var: input[:, 3:, :, :],
self.U_lin_var: T.zeros((input.shape[0],) + self.action_space.shape)})
示例11: fast_jacobian
# 需要导入模块: import theano [as 别名]
# 或者: from theano import clone [as 别名]
def fast_jacobian(expr, wrt, chunk_size=16, func=None):
'''
Computes the jacobian by tiling the inputs
Copied from https://gist.github.com/aam-at/2b2bc5c35850b553d4ec
'''
assert isinstance(expr, Variable), \
"tensor.jacobian expects a Variable as `expr`"
assert expr.ndim < 2, \
("tensor.jacobian expects a 1 dimensional variable as "
"`expr`. If not use flatten to make it a vector")
num_chunks = tt.ceil(1.0 * expr.shape[0] / chunk_size)
num_chunks = tt.cast(num_chunks, 'int32')
steps = tt.arange(num_chunks)
remainder = expr.shape[0] % chunk_size
def chunk_grad(i):
''' operates on a subset of the gradient variables '''
wrt_rep = tt.tile(wrt, (chunk_size, 1))
if func is not None:
expr_rep = func(wrt_rep)
else:
expr_rep, _ = theano.scan(
fn=lambda wrt_: theano.clone(expr, {wrt: wrt_}),
sequences=wrt_rep)
chunk_expr_grad = tt.roll(
tt.identity_like(expr_rep),
i * chunk_size,
axis=1)
return tt.grad(cost=None,
wrt=wrt_rep,
known_grads={
expr_rep: chunk_expr_grad
})
grads, _ = theano.scan(chunk_grad, sequences=steps)
grads = grads.reshape((chunk_size * grads.shape[0], wrt.shape[0]))
jac = ifelse.ifelse(tt.eq(remainder, 0), grads, grads[:expr.shape[0], :])
return jac
示例12: get_output_for
# 需要导入模块: import theano [as 别名]
# 或者: from theano import clone [as 别名]
def get_output_for(self, input, deterministic=False, **kwargs):
beta = self.beta;
if not deterministic:
self_beta = theano.clone(self.beta, share_inputs=False);
input_beta = ttt.percentile(input, self.perc);
self_beta.default_update = ((1 - self.alpha) * self_beta + self.alpha * input_beta);
beta += 0 * self_beta;
# thresholding
return theano.tensor.nnet.relu(input-beta, 0.0);
示例13: compute_output
# 需要导入模块: import theano [as 别名]
# 或者: from theano import clone [as 别名]
def compute_output(self, network, in_vw):
deterministic = network.find_hyperparameter(["deterministic"])
p = network.find_hyperparameter(["dropout_probability",
"probability",
"p"],
0)
if deterministic or p == 0:
network.copy_vw(
name="default",
previous_vw=in_vw,
tags={"output"},
)
else:
rescale_factor = 1 / (1 - p)
mask_shape = in_vw.shape
if any(s is None for s in mask_shape):
# NOTE: this uses symbolic shape - can be an issue with
# theano.clone and random numbers
# https://groups.google.com/forum/#!topic/theano-users/P7Mv7Fg0kUs
warnings.warn("using symbolic shape for dropout mask, "
"which can be an issue with theano.clone")
mask_shape = in_vw.variable.shape
# TODO save this state so that we can seed the rng
srng = MRG_RandomStreams()
# set bernoulli probability to be inverse of dropout probability
# because 1 means to keep the unit
bernoulli_prob = 1 - p
mask = rescale_factor * srng.binomial(mask_shape,
p=bernoulli_prob,
dtype=fX)
network.create_vw(
"default",
variable=in_vw.variable * mask,
shape=in_vw.shape,
tags={"output"},
)
示例14: test_clone
# 需要导入模块: import theano [as 别名]
# 或者: from theano import clone [as 别名]
def test_clone():
"""
NOTE: if this test eventually passes (eg. theano fixes the issue),
deep_clone may no longer be necessary
"""
_clone_test_case(theano.clone)
示例15: compute_output
# 需要导入模块: import theano [as 别名]
# 或者: from theano import clone [as 别名]
def compute_output(self, network, mu_vw, sigma_vw):
deterministic = network.find_hyperparameter(["deterministic"], False)
if deterministic:
res = mu_vw.variable
else:
# TODO look at shape of both mu and sigma
shape = mu_vw.shape
if any(s is None for s in shape):
# NOTE: this uses symbolic shape - can be an issue with
# theano.clone and random numbers
# https://groups.google.com/forum/#!topic/theano-users/P7Mv7Fg0kUs
warnings.warn("using symbolic shape for random number shape, "
"which can be an issue with theano.clone")
shape = mu_vw.variable.shape
# TODO save this state so that we can seed the rng
srng = MRG_RandomStreams()
res = srng.normal(shape,
avg=mu_vw.variable,
std=sigma_vw.variable,
dtype=fX)
network.create_vw(
"default",
variable=theano.gradient.disconnected_grad(res),
shape=mu_vw.shape,
tags={"output"},
)