本文整理汇总了Python中torch.eye函数的典型用法代码示例。如果您正苦于以下问题:Python eye函数的具体用法?Python eye怎么用?Python eye使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了eye函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: model
def model(self):
self.set_mode("model")
f_loc = self.get_param("f_loc")
f_scale_tril = self.get_param("f_scale_tril")
N = self.X.shape[0]
Kff = self.kernel(self.X) + (torch.eye(N, out=self.X.new_empty(N, N)) *
self.jitter)
Lff = Kff.potrf(upper=False)
zero_loc = self.X.new_zeros(f_loc.shape)
f_name = param_with_module_name(self.name, "f")
if self.whiten:
Id = torch.eye(N, out=self.X.new_empty(N, N))
pyro.sample(f_name,
dist.MultivariateNormal(zero_loc, scale_tril=Id)
.independent(zero_loc.dim() - 1))
f_scale_tril = Lff.matmul(f_scale_tril)
else:
pyro.sample(f_name,
dist.MultivariateNormal(zero_loc, scale_tril=Lff)
.independent(zero_loc.dim() - 1))
f_var = f_scale_tril.pow(2).sum(dim=-1)
if self.whiten:
f_loc = Lff.matmul(f_loc.unsqueeze(-1)).squeeze(-1)
f_loc = f_loc + self.mean_function(self.X)
if self.y is None:
return f_loc, f_var
else:
return self.likelihood(f_loc, f_var, self.y)
示例2: model
def model(self):
self.set_mode("model")
Xu = self.get_param("Xu")
u_loc = self.get_param("u_loc")
u_scale_tril = self.get_param("u_scale_tril")
M = Xu.shape[0]
Kuu = self.kernel(Xu) + torch.eye(M, out=Xu.new_empty(M, M)) * self.jitter
Luu = Kuu.potrf(upper=False)
zero_loc = Xu.new_zeros(u_loc.shape)
u_name = param_with_module_name(self.name, "u")
if self.whiten:
Id = torch.eye(M, out=Xu.new_empty(M, M))
pyro.sample(u_name,
dist.MultivariateNormal(zero_loc, scale_tril=Id)
.independent(zero_loc.dim() - 1))
else:
pyro.sample(u_name,
dist.MultivariateNormal(zero_loc, scale_tril=Luu)
.independent(zero_loc.dim() - 1))
f_loc, f_var = conditional(self.X, Xu, self.kernel, u_loc, u_scale_tril,
Luu, full_cov=False, whiten=self.whiten,
jitter=self.jitter)
f_loc = f_loc + self.mean_function(self.X)
if self.y is None:
return f_loc, f_var
else:
with poutine.scale(None, self.num_data / self.X.shape[0]):
return self.likelihood(f_loc, f_var, self.y)
示例3: __init__
def __init__(self):
super(Tune, self).__init__()
self.linear1 = nn.Linear(len(TEXT.vocab),len(TEXT.vocab))
self.linear1.weight.data.copy_(torch.eye(len(TEXT.vocab)))
self.linear2 = nn.Linear(len(TEXT.vocab),len(TEXT.vocab))
self.linear2.weight.data.copy_(torch.eye(len(TEXT.vocab)))
self.linear3 = nn.Linear(len(TEXT.vocab),len(TEXT.vocab))
self.linear3.weight.data.copy_(torch.eye(len(TEXT.vocab)))
示例4: enumerate_support
def enumerate_support(self):
probs = self._categorical.probs
n = self.event_shape[0]
if isinstance(probs, Variable):
values = Variable(torch.eye(n, out=probs.data.new(n, n)))
else:
values = torch.eye(n, out=probs.new(n, n))
values = values.view((n,) + (1,) * len(self.batch_shape) + (n,))
return values.expand((n,) + self.batch_shape + (n,))
示例5: test_forward
def test_forward(self):
# pylint: disable=protected-access
similarity = MultiHeadedSimilarity(num_heads=3, tensor_1_dim=6)
similarity._tensor_1_projection = Parameter(torch.eye(6))
similarity._tensor_2_projection = Parameter(torch.eye(6))
a_vectors = Variable(torch.FloatTensor([[[[1, 1, -1, -1, 0, 1], [-2, 5, 9, -1, 3, 4]]]]))
b_vectors = Variable(torch.FloatTensor([[[[1, 1, 1, 0, 2, 5], [0, 1, -1, -7, 1, 2]]]]))
result = similarity(a_vectors, b_vectors).data.numpy()
assert result.shape == (1, 1, 2, 3)
assert_almost_equal(result, [[[[2, -1, 5], [5, -2, 11]]]])
示例6: btriunpack
def btriunpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True):
r"""Unpacks the data and pivots from a batched LU factorization (btrifact) of a tensor.
Returns a tuple indexed by:
0: The pivots.
1: The L tensor.
2: The U tensor.
Arguments:
LU_data (Tensor): the packed LU factorization data
LU_pivots (Tensor): the packed LU factorization pivots
unpack_data (bool): flag indicating if the data should be unpacked
unpack_pivots (bool): tlag indicating if the pivots should be unpacked
Example::
>>> A = torch.randn(2, 3, 3)
>>> A_LU, pivots = A.btrifact()
>>> P, a_L, a_U = torch.btriunpack(A_LU, pivots)
>>>
>>> # test that (P, A_L, A_U) gives LU factorization
>>> A_ = torch.bmm(P, torch.bmm(A_L, A_U))
>>> assert torch.equal(A_, A) == True # can recover A
"""
nBatch, sz, _ = LU_data.size()
if unpack_data:
I_U = torch.triu(torch.ones(sz, sz)).type_as(LU_data).byte().unsqueeze(0).expand(nBatch, sz, sz)
I_L = 1 - I_U
L = LU_data.new(LU_data.size()).zero_()
U = LU_data.new(LU_data.size()).zero_()
I_diag = torch.eye(sz).type_as(LU_data).byte().unsqueeze(0).expand(nBatch, sz, sz)
L[I_diag] = 1.0
L[I_L] = LU_data[I_L]
U[I_U] = LU_data[I_U]
else:
L = U = None
if unpack_pivots:
P = torch.eye(sz).type_as(LU_data).unsqueeze(0).repeat(nBatch, 1, 1)
for i in range(nBatch):
for j in range(sz):
k = LU_pivots[i, j] - 1
t = P[i, :, j].clone()
P[i, :, j] = P[i, :, k]
P[i, :, k] = t
else:
P = None
return P, L, U
示例7: read_test
def read_test(memory):
print("Memory Reading Test: ")
_k = T.ones(1, M_DIM*Kr)
_b = T.eye(Kr)[0].view(1, -1)
print("k tensor: ", _k)
print("b tensor: ", _b)
print(memory.read(_k, _b))
示例8: __init__
def __init__(self, X, y, kernel, Xu, likelihood, mean_function=None,
latent_shape=None, num_data=None, whiten=False, jitter=1e-6,
name="SVGP"):
super(VariationalSparseGP, self).__init__(X, y, kernel, mean_function, jitter,
name)
self.likelihood = likelihood
self.num_data = num_data if num_data is not None else self.X.shape[0]
self.whiten = whiten
self.Xu = Parameter(Xu)
y_batch_shape = self.y.shape[:-1] if self.y is not None else torch.Size([])
self.latent_shape = latent_shape if latent_shape is not None else y_batch_shape
M = self.Xu.shape[0]
u_loc_shape = self.latent_shape + (M,)
u_loc = self.Xu.new_zeros(u_loc_shape)
self.u_loc = Parameter(u_loc)
u_scale_tril_shape = self.latent_shape + (M, M)
Id = torch.eye(M, out=self.Xu.new_empty(M, M))
u_scale_tril = Id.expand(u_scale_tril_shape)
self.u_scale_tril = Parameter(u_scale_tril)
self.set_constraint("u_scale_tril", constraints.lower_cholesky)
self._sample_latent = True
示例9: create_input
def create_input(points, sigma2):
bs, N, _ = points.size() #points has size bs,N,2
OP = torch.zeros(bs,N,N,4).type(dtype)
E = torch.eye(N).type(dtype).unsqueeze(0).expand(bs,N,N)
OP[:,:,:,0] = E
W = points.unsqueeze(1).expand(bs,N,N,dim) - points.unsqueeze(2).expand(bs,N,N,dim)
dists2 = (W * W).sum(3)
dists = torch.sqrt(dists2)
W = torch.exp(-dists2 / sigma2)
OP[:,:,:,1] = W
D = E * W.sum(2,True).expand(bs,N,N)
OP[:,:,:,2] = D
U = (torch.ones(N,N).type(dtype)/N).unsqueeze(0).expand(bs,N,N)
OP[:,:,:,3] = U
OP = Variable(OP)
x = Variable(points)
Y = Variable(W.clone())
# Normalize inputs
if normalize:
mu = x.sum(1)/N
mu_ext = mu.unsqueeze(1).expand_as(x)
var = ((x - mu_ext)*(x - mu_ext)).sum(1)/N
var_ext = var.unsqueeze(1).expand_as(x)
x = x - mu_ext
x = x/(10 * var_ext)
return (OP, x, Y), dists
示例10: calculate_distance_term
def calculate_distance_term(means, n_objects, delta_d, norm=2, usegpu=True):
"""means: bs, n_instances, n_filters"""
bs, n_instances, n_filters = means.size()
dist_term = 0.0
for i in range(bs):
_n_objects_sample = n_objects[i]
if _n_objects_sample <= 1:
continue
_mean_sample = means[i, : _n_objects_sample, :] # n_objects, n_filters
means_1 = _mean_sample.unsqueeze(1).expand(
_n_objects_sample, _n_objects_sample, n_filters)
means_2 = means_1.permute(1, 0, 2)
diff = means_1 - means_2 # n_objects, n_objects, n_filters
_norm = torch.norm(diff, norm, 2)
margin = 2 * delta_d * (1.0 - torch.eye(_n_objects_sample))
if usegpu:
margin = margin.cuda()
margin = Variable(margin)
_dist_term_sample = torch.sum(
torch.clamp(margin - _norm, min=0.0) ** 2)
_dist_term_sample = _dist_term_sample / \
(_n_objects_sample * (_n_objects_sample - 1))
dist_term += _dist_term_sample
dist_term = dist_term / bs
return dist_term
示例11: _compute_logdet_and_mahalanobis
def _compute_logdet_and_mahalanobis(self, D, W, y, trace_term=0):
"""
Calculates log determinant and (squared) Mahalanobis term of covariance
matrix ``(D + Wt.W)``, where ``D`` is a diagonal matrix, based on the
"Woodbury matrix identity" and "matrix determinant lemma"::
inv(D + Wt.W) = inv(D) - inv(D).Wt.inv(I + W.inv(D).Wt).W.inv(D)
log|D + Wt.W| = log|Id + Wt.inv(D).W| + log|D|
"""
W_Dinv = W / D
M = W.shape[0]
Id = torch.eye(M, M, out=W.new_empty(M, M))
K = Id + W_Dinv.matmul(W.t())
L = K.potrf(upper=False)
if y.dim() == 1:
W_Dinv_y = W_Dinv.matmul(y)
elif y.dim() == 2:
W_Dinv_y = W_Dinv.matmul(y.t())
else:
raise NotImplementedError("SparseMultivariateNormal distribution does not support "
"computing log_prob for a tensor with more than 2 dimensionals.")
Linv_W_Dinv_y = matrix_triangular_solve_compat(W_Dinv_y, L, upper=False)
if y.dim() == 2:
Linv_W_Dinv_y = Linv_W_Dinv_y.t()
logdet = 2 * L.diag().log().sum() + D.log().sum()
mahalanobis1 = (y * y / D).sum(-1)
mahalanobis2 = (Linv_W_Dinv_y * Linv_W_Dinv_y).sum(-1)
mahalanobis_squared = mahalanobis1 - mahalanobis2 + trace_term
return logdet, mahalanobis_squared
示例12: test_forward_backward
def test_forward_backward(self):
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from reid.loss import OIMLoss
criterion = OIMLoss(3, 3, scalar=1.0, size_average=False)
criterion.lut = torch.eye(3)
x = Variable(torch.randn(3, 3), requires_grad=True)
y = Variable(torch.range(0, 2).long())
loss = criterion(x, y)
loss.backward()
probs = F.softmax(x)
grads = probs.data - torch.eye(3)
abs_diff = torch.abs(grads - x.grad.data)
self.assertEquals(torch.log(probs).diag().sum(), -loss)
self.assertTrue(torch.max(abs_diff) < 1e-6)
示例13: torch_eye
def torch_eye(n, m=None, out=None):
"""
Like `torch.eye()`, but works with cuda tensors.
"""
if m is None:
m = n
try:
return torch.eye(n, m, out=out)
except TypeError:
# Only catch errors due to torch.eye() not being available for cuda tensors.
module = torch.Tensor.__module__ if out is None else type(out).__module__
if module != 'torch.cuda':
raise
Tensor = getattr(torch, torch.Tensor.__name__)
cpu_out = Tensor(n, m)
cuda_out = torch.eye(m, n, out=cpu_out).cuda()
return cuda_out if out is None else out.copy_(cuda_out)
示例14: get_cat_mapping
def get_cat_mapping(model: infogan.InfoGAN, data_loader: DataLoader):
eye = torch.eye(10)
confusion = torch.zeros(10, 10)
for data, labels in data_loader:
real_data = data.to(model.device).unsqueeze(1).float() / 255.
cat_logits = model.rec(model.dis(real_data)[1])[0]
confusion += eye[labels.long()].t() @ eye[cat_logits.cpu().argmax(1)]
return confusion.argmax(0).numpy()
示例15: check
def check(self, value):
value_tril = batch_tril(value)
lower_triangular = (value_tril == value).view(value.shape[:-2] + (-1,)).min(-1)[0]
n = value.size(-1)
diag_mask = torch.eye(n, n, out=value.new(n, n))
positive_diagonal = (value * diag_mask > (diag_mask - 1)).min(-1)[0].min(-1)[0]
return lower_triangular & positive_diagonal