本文整理汇总了Python中theano.printing.Print方法的典型用法代码示例。如果您正苦于以下问题:Python printing.Print方法的具体用法?Python printing.Print怎么用?Python printing.Print使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.printing
的用法示例。
在下文中一共展示了printing.Print方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: put_hook
# 需要导入模块: from theano import printing [as 别名]
# 或者: from theano.printing import Print [as 别名]
def put_hook(variable, hook_fn, *args):
r"""Put a hook on a Theano variables.
Ensures that the hook function is executed every time when the value
of the Theano variable is available.
Parameters
----------
variable : :class:`~tensor.TensorVariable`
The variable to put a hook on.
hook_fn : function
The hook function. Should take a single argument: the variable's
value.
\*args : list
Positional arguments to pass to the hook function.
"""
return printing.Print(global_fn=lambda _, x: hook_fn(x, *args))(variable)
示例2: print_tensor
# 需要导入模块: from theano import printing [as 别名]
# 或者: from theano.printing import Print [as 别名]
def print_tensor(message, variable):
"""A small helper function that makes printing Theano variables a little bit
easier.
:type message: str
:param message: message, typically the variable name
:type variable: Variable
:param variable: any tensor variable to be printed
:rtype: Variable
:returns: a tensor variable to be used further down the graph in place of
``variable``
"""
print_op = printing.Print(message)
return print_op(variable)
示例3: print_tensor
# 需要导入模块: from theano import printing [as 别名]
# 或者: from theano.printing import Print [as 别名]
def print_tensor(x, message=''):
'''Print the message and the tensor when evaluated and return the same
tensor.
'''
p_op = Print(message)
return p_op(x)
# GRAPH MANIPULATION
示例4: print_tensor
# 需要导入模块: from theano import printing [as 别名]
# 或者: from theano.printing import Print [as 别名]
def print_tensor(x, message=''):
"""Print the message and the tensor when evaluated and return the same
tensor.
"""
p_op = Print(message)
return p_op(x)
# GRAPH MANIPULATION
示例5: multivariate_normal_nohypers
# 需要导入模块: from theano import printing [as 别名]
# 或者: from theano.printing import Print [as 别名]
def multivariate_normal_nohypers(datasets, weights, hyperparams, residuals):
"""
Calculate posterior Likelihood of a Multivariate Normal distribution.
Uses plain inverse of the covariances.
DEPRECATED! Is currently not being used in beat.
Can only be executed in a `with model context`.
Parameters
----------
datasets : list
of :class:`heart.SeismicDataset` or :class:`heart.GeodeticDataset`
weights : list
of :class:`theano.shared`
Square matrix of the inverse of the covariance matrix as weights
hyperparams : dict
of :class:`theano.`
residual : list or array of model residuals
Returns
-------
array_like
"""
n_t = len(datasets)
logpts = tt.zeros((n_t), 'float64')
for l, data in enumerate(datasets):
M = tt.cast(shared(
data.samples, name='nsamples', borrow=True), 'int16')
maha = residuals[l].dot(weights[l]).dot(residuals[l].T)
slogpdet = Print('theano logpdet')(data.covariance.slog_pdet)
logpts = tt.set_subtensor(
logpts[l:l + 1],
(-0.5) * (
M * log_2pi + slogpdet + maha
))
return logpts
示例6: assert_tensor_eq
# 需要导入模块: from theano import printing [as 别名]
# 或者: from theano.printing import Print [as 别名]
def assert_tensor_eq(result, name1, name2, variable1, variable2):
"""A small helper function that makes it a little bit easier to assert that
two Theano variables are equal.
:type result: Variable
:param result: what the result of the operation should be
:type name1: str
:param name1: name of the first variable
:type name2: str
:param name2: name of the second variable
:type variable1: Variable
:param variable1: the first variable
:type variable2: Variable
:param variable2: the second variable
:rtype: Variable
:returns: a tensor variable that returns the same value as ``result``, and
asserts that ``variable1`` equals to ``variable2``
"""
# print_op = printing.Print(name1 + ":")
# variable1 = tensor.switch(tensor.neq(variable1, variable2),
# print_op(variable1),
# variable1)
# print_op = printing.Print(name2 + ":")
# variable2 = tensor.switch(tensor.neq(variable1, variable2),
# print_op(variable2),
# variable2)
assert_op = tensor.opt.Assert(name1 + " != " + name2)
return assert_op(result, tensor.eq(variable1, variable2))
示例7: arg_of_softmax
# 需要导入模块: from theano import printing [as 别名]
# 或者: from theano.printing import Print [as 别名]
def arg_of_softmax(Y_hat):
"""
Given the output of a call to theano.tensor.nnet.softmax,
returns the argument to the softmax (by tracing the Theano
graph).
Parameters
----------
Y_hat : Variable
softmax(Z)
Returns
-------
Z : Variable
The variable that was passed to the Softmax op to create `Y_hat`.
Raises an error if `Y_hat` is not actually the output of a
Softmax.
"""
assert hasattr(Y_hat, 'owner')
owner = Y_hat.owner
assert owner is not None
op = owner.op
if isinstance(op, Print):
assert len(owner.inputs) == 1
Y_hat, = owner.inputs
owner = Y_hat.owner
op = owner.op
if not isinstance(op, T.nnet.Softmax):
raise ValueError("Expected Y_hat to be the output of a softmax, "
"but it appears to be the output of " + str(op) +
" of type " + str(type(op)))
z, = owner.inputs
assert z.ndim == 2
return z
示例8: arg_of_sigmoid
# 需要导入模块: from theano import printing [as 别名]
# 或者: from theano.printing import Print [as 别名]
def arg_of_sigmoid(Y_hat):
"""
Given the output of a call to theano.tensor.nnet.sigmoid,
returns the argument to the sigmoid (by tracing the Theano
graph).
Parameters
----------
Y_hat : Variable
T.nnet.sigmoid(Z)
Returns
-------
Z : Variable
The variable that was passed to T.nnet.sigmoid to create `Y_hat`.
Raises an error if `Y_hat` is not actually the output of a theano
sigmoid.
"""
assert hasattr(Y_hat, 'owner')
owner = Y_hat.owner
assert owner is not None
op = owner.op
if isinstance(op, Print):
assert len(owner.inputs) == 1
Y_hat, = owner.inputs
owner = Y_hat.owner
op = owner.op
success = False
if isinstance(op, T.Elemwise):
if isinstance(op.scalar_op, T.nnet.sigm.ScalarSigmoid):
success = True
if not success:
raise TypeError("Expected Y_hat to be the output of a sigmoid, "
"but it appears to be the output of " + str(op) +
" of type " + str(type(op)))
z, = owner.inputs
assert z.ndim == 2
return z
示例9: printing
# 需要导入模块: from theano import printing [as 别名]
# 或者: from theano.printing import Print [as 别名]
def printing(x, string=''):
"""Prints the value of a tensor variable
:param x: Tensor variable
:param string: Prefix to print
:return: The same tensor variable as x
"""
return theano.printing.Print(string)(x)
示例10: recons_cost
# 需要导入模块: from theano import printing [as 别名]
# 或者: from theano.printing import Print [as 别名]
def recons_cost(self, Y, Y_hat_unmasked, drop_mask_Y, scale):
"""
The cost of reconstructing `Y` as `Y_hat`. Specifically,
the negative log probability.
This cost is for use with multi-prediction training.
Parameters
----------
Y : target space batch
The data labels
Y_hat_unmasked : target space batch
The output of this layer's `mf_update`; the predicted
values of `Y`. Even though the model is only predicting
the dropped values, we take predictions for all the
values here.
drop_mask_Y : 1-D theano tensor
A batch of 0s/1s, with 1s indicating that variables
have been dropped, and should be included in the
reconstruction cost. One indicator per example in the
batch, since each example in this layer only has one
random variable in it.
scale : float
Multiply the cost by this amount.
We need to do this because the visible layer also goes into
the cost. We use the mean over units and examples, so that
the scale of the cost doesn't change too much with batch
size or example size.
We need to multiply this cost by scale to make sure that
it is put on the same scale as the reconstruction cost
for the visible units. ie, scale should be 1/nvis
"""
Y_hat = Y_hat_unmasked
assert hasattr(Y_hat, 'owner')
owner = Y_hat.owner
assert owner is not None
op = owner.op
if isinstance(op, Print):
assert len(owner.inputs) == 1
Y_hat, = owner.inputs
owner = Y_hat.owner
op = owner.op
assert isinstance(op, T.nnet.Softmax)
z ,= owner.inputs
assert z.ndim == 2
z = z - z.max(axis=1).dimshuffle(0, 'x')
log_prob = z - T.log(T.exp(z).sum(axis=1).dimshuffle(0, 'x'))
# we use sum and not mean because this is really one variable per row
log_prob_of = (Y * log_prob).sum(axis=1)
masked = log_prob_of * drop_mask_Y
assert masked.ndim == 1
rval = masked.mean() * scale * self.copies
return - rval