本文整理汇总了Python中theano.tensor.identity_like方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.identity_like方法的具体用法?Python tensor.identity_like怎么用?Python tensor.identity_like使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.identity_like方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: batch_jacobian
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import identity_like [as 别名]
def batch_jacobian(f, wrt, size=None, *args, **kwargs):
"""Computes the jacobian of f(x) w.r.t. x in parallel.
Args:
f: Symbolic function.
x: Variables to differentiate with respect to.
size: Expected vector size of f(x).
*args: Additional positional arguments to pass to `f()`.
**kwargs: Additional key-word arguments to pass to `f()`.
Returns:
Theano tensor.
"""
if isinstance(wrt, T.TensorVariable):
if size is None:
y = f(wrt, *args, **kwargs).shape[-1]
x_rep = T.tile(wrt, (size, 1))
y_rep = f(x_rep, *args, **kwargs)
else:
if size is None:
size = f(*wrt, *args, **kwargs).shape[-1]
x_rep = [T.tile(x, (size, 1)) for x in wrt]
y_rep = f(*x_rep, *args, **kwargs)
J = T.grad(
cost=None,
wrt=x_rep,
known_grads={y_rep: T.identity_like(y_rep)},
disconnected_inputs="ignore",
)
return J
示例2: get_output_for
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import identity_like [as 别名]
def get_output_for(self, inputs, **kwargs):
"""
Compute diffusion convolution of inputs.
"""
A = inputs[0]
X = inputs[1]
# Normalize by degree.
A = A / (T.sum(A, 0) + 1.0)
Apow_list = [T.identity_like(A)]
for i in range(1, self.parameters.num_hops + 1):
Apow_list.append(A.dot(Apow_list[-1]))
Apow = T.stack(Apow_list)
Apow_dot_X = T.dot(Apow, X)
Apow_dot_X_times_W = Apow_dot_X * self.W
out = T.reshape(
self.nonlinearity(T.mean(Apow_dot_X_times_W, 1)),
(1, (self.parameters.num_hops + 1) * self.num_features)
)
return out
示例3: fast_jacobian
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import identity_like [as 别名]
def fast_jacobian(expr, wrt, chunk_size=16, func=None):
'''
Computes the jacobian by tiling the inputs
Copied from https://gist.github.com/aam-at/2b2bc5c35850b553d4ec
'''
assert isinstance(expr, Variable), \
"tensor.jacobian expects a Variable as `expr`"
assert expr.ndim < 2, \
("tensor.jacobian expects a 1 dimensional variable as "
"`expr`. If not use flatten to make it a vector")
num_chunks = tt.ceil(1.0 * expr.shape[0] / chunk_size)
num_chunks = tt.cast(num_chunks, 'int32')
steps = tt.arange(num_chunks)
remainder = expr.shape[0] % chunk_size
def chunk_grad(i):
''' operates on a subset of the gradient variables '''
wrt_rep = tt.tile(wrt, (chunk_size, 1))
if func is not None:
expr_rep = func(wrt_rep)
else:
expr_rep, _ = theano.scan(
fn=lambda wrt_: theano.clone(expr, {wrt: wrt_}),
sequences=wrt_rep)
chunk_expr_grad = tt.roll(
tt.identity_like(expr_rep),
i * chunk_size,
axis=1)
return tt.grad(cost=None,
wrt=wrt_rep,
known_grads={
expr_rep: chunk_expr_grad
})
grads, _ = theano.scan(chunk_grad, sequences=steps)
grads = grads.reshape((chunk_size * grads.shape[0], wrt.shape[0]))
jac = ifelse.ifelse(tt.eq(remainder, 0), grads, grads[:expr.shape[0], :])
return jac
示例4: __call__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import identity_like [as 别名]
def __call__(self, loss):
loss += K.sum(K.square(self.p.dot(self.p.T) - T.identity_like(self.p))) * self.strength
return loss
示例5: lda_loss
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import identity_like [as 别名]
def lda_loss(n_components, margin):
"""
The main loss function (inner_lda_objective) is wrapped in this function due to
the constraints imposed by Keras on objective functions
"""
def inner_lda_objective(y_true, y_pred):
"""
It is the loss function of LDA as introduced in the original paper.
It is adopted from the the original implementation in the following link:
https://github.com/CPJKU/deep_lda
Note: it is implemented by Theano tensor operations, and does not work on Tensorflow backend
"""
r = 1e-4
# init groups
yt = T.cast(y_true.flatten(), "int32")
groups = numpy_unique(yt)
def compute_cov(group, Xt, yt):
Xgt = Xt[T.eq(yt, group).nonzero()[0], :]
Xgt_bar = Xgt - T.mean(Xgt, axis=0)
m = T.cast(Xgt_bar.shape[0], 'float32')
return (1.0 / (m - 1)) * T.dot(Xgt_bar.T, Xgt_bar)
# scan over groups
covs_t, updates = theano.scan(fn=compute_cov, outputs_info=None,
sequences=[groups], non_sequences=[y_pred, yt])
# compute average covariance matrix (within scatter)
Sw_t = T.mean(covs_t, axis=0)
# compute total scatter
Xt_bar = y_pred - T.mean(y_pred, axis=0)
m = T.cast(Xt_bar.shape[0], 'float32')
St_t = (1.0 / (m - 1)) * T.dot(Xt_bar.T, Xt_bar)
# compute between scatter
Sb_t = St_t - Sw_t
# cope for numerical instability (regularize)
Sw_t += T.identity_like(Sw_t) * r
# return T.cast(T.neq(yt[0], -1), 'float32')*T.nlinalg.trace(T.dot(T.nlinalg.matrix_inverse(St_t), Sb_t))
# compute eigenvalues
evals_t = T.slinalg.eigvalsh(Sb_t, Sw_t)
# get eigenvalues
top_k_evals = evals_t[-n_components:]
# maximize variance between classes
# (k smallest eigenvalues below threshold)
thresh = T.min(top_k_evals) + margin
top_k_evals = top_k_evals[(top_k_evals <= thresh).nonzero()]
costs = T.mean(top_k_evals)
return -costs
return inner_lda_objective
示例6: test_grad_W
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import identity_like [as 别名]
def test_grad_W(self):
"""tests that the gradient of the log probability with respect to W
matches my analytical derivation """
#self.model.set_param_values(self.new_params)
g = T.grad(self.prob, self.model.W, consider_constant = self.mf_obs.values())
B = self.model.B
W = self.model.W
mean_hsv = self.stats.d['mean_hsv']
mean_sq_hs = self.stats.d['mean_sq_hs']
mean_HS = self.mf_obs['H_hat'] * self.mf_obs['S_hat']
m = mean_HS.shape[0]
outer_prod = T.dot(mean_HS.T,mean_HS)
outer_prod.name = 'outer_prod<from_observations>'
outer = outer_prod/m
mask = T.identity_like(outer)
second_hs = (1.-mask) * outer + alloc_diag(mean_sq_hs)
term1 = (B * mean_hsv).T
term2 = - B.dimshuffle(0,'x') * T.dot(W, second_hs)
analytical = term1 + term2
f = function([],(g,analytical))
gv, av = f()
assert gv.shape == av.shape
max_diff = np.abs(gv-av).max()
if max_diff > self.tol:
print("gv")
print(gv)
print("av")
print(av)
raise Exception("analytical gradient on W deviates from theano gradient on W by up to "+str(max_diff))