本文整理匯總了Python中torch.tan方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.tan方法的具體用法?Python torch.tan怎麽用?Python torch.tan使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.tan方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: inv_vecs_Xg_ig
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tan [as 別名]
def inv_vecs_Xg_ig(x):
""" H = inv(vecs_Xg_ig(x)) """
t = x.view(-1, 3).norm(p=2, dim=1).view(-1, 1, 1)
X = mat(x)
S = X.bmm(X)
I = torch.eye(3).to(x)
e = 0.01
eta = torch.zeros_like(t)
s = (t < e)
c = (s == 0)
t2 = t[s] ** 2
eta[s] = ((t2/40 + 1)*t2/42 + 1)*t2/720 + 1/12 # O(t**8)
eta[c] = (1 - (t[c]/2) / torch.tan(t[c]/2)) / (t[c]**2)
H = I - 1/2*X + eta*S
return H.view(*(x.size()[0:-1]), 3, 3)
示例2: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tan [as 別名]
def forward(self, input):
self.batchgrid = torch.zeros(
torch.Size([input.size(0)]) + self.grid.size())
# print(self.batchgrid.size())
for i in range(input.size(0)):
self.batchgrid[i, :, :, :] = self.grid
self.batchgrid = Variable(self.batchgrid)
# print(self.batchgrid.size())
input_u = input.view(-1, 1, 1, 1).repeat(1, self.height, self.width, 1)
# print(input_u.requires_grad, self.batchgrid)
output0 = self.batchgrid[:, :, :, 0:1]
output1 = torch.atan(torch.tan(
np.pi / 2.0 * (
self.batchgrid[:, :, :, 1:2] + self.batchgrid[:, :, :,
2:] * input_u[:, :, :,
:]))) / (
np.pi / 2)
# print(output0.size(), output1.size())
output = torch.cat([output0, output1], 3)
return output
示例3: perspective
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tan [as 別名]
def perspective(vertices, angle=30.):
'''
Compute perspective distortion from a given angle
'''
if (vertices.ndimension() != 3):
raise ValueError('vertices Tensor should have 3 dimensions')
device = vertices.device
angle = torch.tensor(angle / 180 * math.pi, dtype=torch.float32, device=device)
angle = angle[None]
width = torch.tan(angle)
width = width[:, None]
z = vertices[:, :, 2]
x = vertices[:, :, 0] / z / width
y = vertices[:, :, 1] / z / width
vertices = torch.stack((x,y,z), dim=2)
return vertices
示例4: tan
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tan [as 別名]
def tan(x, out=None):
"""
Compute tangent element-wise.
Equivalent to ht.sin(x) / ht.cos(x) element-wise.
Parameters
----------
x : ht.DNDarray
The value for which to compute the trigonometric tangent.
out : ht.DNDarray or None, optional
A location in which to store the results. If provided, it must have a broadcastable shape. If not provided
or set to None, a fresh tensor is allocated.
Returns
-------
tangent : ht.DNDarray
A tensor of the same shape as x, containing the trigonometric tangent of each element in this tensor.
Examples
--------
>>> ht.tan(ht.arange(-6, 7, 2))
tensor([ 0.29100619, -1.15782128, 2.18503986, 0., -2.18503986, 1.15782128, -0.29100619])
"""
return local_op(torch.tan, x, out)
示例5: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tan [as 別名]
def forward(self, input):
self.batchgrid = torch.zeros(torch.Size([input.size(0)]) + self.grid.size())
# print(self.batchgrid.size())
for i in range(input.size(0)):
self.batchgrid[i, :, :, :] = self.grid
self.batchgrid = Variable(self.batchgrid)
# print(self.batchgrid.size())
input_u = input.view(-1, 1, 1, 1).repeat(1, self.height, self.width, 1)
# print(input_u.requires_grad, self.batchgrid)
output0 = self.batchgrid[:, :, :, 0:1]
output1 = torch.atan(torch.tan(
np.pi / 2.0 * (self.batchgrid[:, :, :, 1:2] + self.batchgrid[:, :, :, 2:] * input_u[:, :, :, :]))) / (
np.pi / 2)
# print(output0.size(), output1.size())
output = torch.cat([output0, output1], 3)
return output
示例6: SphericalClassToDirs
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tan [as 別名]
def SphericalClassToDirs(x_cls, y_cls, cls_num):
theta = (x_cls.float() + 0.5) / cls_num * 180 - 90
phi = (y_cls.float() + 0.5) / cls_num * 180 - 90
neg_x = theta < 0
neg_y = phi < 0
theta = theta.clamp(-90, 90) / 180.0 * np.pi
phi = phi.clamp(-90, 90) / 180.0 * np.pi
tan2_phi = pow(torch.tan(phi), 2)
tan2_theta = pow(torch.tan(theta), 2)
y = torch.sqrt(tan2_phi / (1 + tan2_phi))
y[neg_y] = y[neg_y] * -1
#y = torch.sin(phi)
z = torch.sqrt((1 - y * y) / (1 + tan2_theta))
x = z * torch.tan(theta)
dirs = torch.stack([x,y,z], 1)
dirs = dirs / dirs.norm(p=2, dim=1, keepdim=True)
return dirs
示例7: test_angular_loss
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tan [as 別名]
def test_angular_loss(self):
loss_func = AngularLoss(alpha=40)
embedding_angles = [0, 20, 40, 60, 80]
embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
labels = torch.LongTensor([0, 0, 1, 1, 2])
loss = loss_func(embeddings, labels)
loss.backward()
sq_tan_alpha = torch.tan(torch.tensor(np.radians(40)))**2
triplets = [(0,1,2), (0,1,3), (0,1,4), (1,0,2), (1,0,3), (1,0,4), (2,3,0), (2,3,1), (2,3,4), (3,2,0), (3,2,1), (3,2,4)]
correct_losses = [0,0,0,0]
for a, p, n in triplets:
anchor, positive, negative = embeddings[a], embeddings[p], embeddings[n]
exponent = 4*sq_tan_alpha*torch.matmul(anchor+positive,negative) - 2*(1+sq_tan_alpha)*torch.matmul(anchor, positive)
correct_losses[a] += torch.exp(exponent)
total_loss = 0
for c in correct_losses:
total_loss += torch.log(1+c)
total_loss /= len(correct_losses)
self.assertTrue(torch.isclose(loss, total_loss.to(torch.float32)))
示例8: inverse
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tan [as 別名]
def inverse(self, inputs, context=None):
if torch.min(inputs) < 0 or torch.max(inputs) > 1:
raise transforms.InputOutsideDomain()
outputs = torch.tan(np.pi * (inputs - 0.5))
logabsdet = - utils.sum_except_batch(
- np.log(np.pi) - torch.log(1 + outputs ** 2)
)
return outputs, logabsdet
示例9: fov_weights
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tan [as 別名]
def fov_weights(coords, horizontal_fov=69.4, vertical_fov=42.5):
half_horizontal_fov = torch.tensor(horizontal_fov / 2)
half_vertical_fov = torch.tensor(vertical_fov / 2)
_, __, h, w = coords.size()
horizontal_center = w / 2
vertical_center = h / 2
u_d = (coords[:, 0, :, :] - horizontal_center) / (w / 2) + 1e-8
v_d = (coords[:, 1, :, :] - vertical_center) / (h / 2) + 1e-8
r_d = torch.sqrt(u_d * u_d + v_d * v_d)
r_u_ud = torch.tan(r_d * torch.tan(half_horizontal_fov)) / torch.tan(half_horizontal_fov)
r_v_ud = torch.tan(r_d * torch.tan(half_vertical_fov)) / torch.tan(half_vertical_fov)
r_ud = torch.sqrt(r_u_ud * r_u_ud + r_v_ud * r_v_ud)
dist = (r_d / r_ud).unsqueeze(1)
dist = torch.abs((dist - torch.mean(dist)) / torch.std(dist))
return torch.exp(dist * (dist < 3 * torch.std(dist)).type(dist.dtype))
示例10: aten_tan
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tan [as 別名]
def aten_tan(inputs, attributes, scope):
inp = inputs[0]
ctx = current_context()
net = ctx.network
if ctx.is_tensorrt and has_trt_tensor(inputs):
layer = net.add_unary(inp, trt.UnaryOperation.TAN)
output = layer.get_output(0)
output.name = scope
layer.name = scope
return [output]
elif ctx.is_tvm and has_tvm_tensor(inputs):
raise NotImplementedError
return [torch.tan(inp)]
示例11: tan
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tan [as 別名]
def tan(t):
"""
Element-wise tangent computed using cross-approximation; see PyTorch's `tan()`.
:param t: input :class:`Tensor`
:return: a :class:`Tensor`
"""
return tn.cross(lambda x: torch.tan(x), tensors=t, verbose=False)
示例12: compute_loss
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tan [as 別名]
def compute_loss(self, embeddings, labels, indices_tuple):
anchors, positives, keep_mask, anchor_idx = self.set_stats_get_pairs(embeddings, labels, indices_tuple)
if anchors is None:
return self.zero_losses()
sq_tan_alpha = torch.tan(self.alpha) ** 2
ap_dot = torch.sum(anchors * positives, dim=1, keepdim=True)
ap_matmul_embeddings = torch.matmul((anchors + positives),(embeddings.unsqueeze(2)))
ap_matmul_embeddings = ap_matmul_embeddings.squeeze(2).t()
final_form = (4 * sq_tan_alpha * ap_matmul_embeddings) - (2 * (1 + sq_tan_alpha) * ap_dot)
final_form = self.maybe_modify_loss(final_form)
losses = lmu.logsumexp(final_form, keep_mask=keep_mask, add_one=True)
return {"loss": {"losses": losses, "indices": anchor_idx, "reduction_type": "element"}}
示例13: __getitem__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tan [as 別名]
def __getitem__(self, idx):
"""
Get item
:param idx:
:return:
"""
# History
history = collections.deque(1.2 * torch.ones(self.history_len) + 0.2 * (torch.rand(self.history_len) - 0.5))
# Preallocate tensor for time-serie
inp = torch.zeros(self.sample_len, 1)
# For each time step
for timestep in range(self.sample_len):
for _ in range(self.delta_t):
xtau = history.popleft()
history.append(self.timeseries)
self.timeseries = history[-1] + (0.2 * xtau / (1.0 + xtau ** 10) - 0.1 * history[-1]) / self.delta_t
# end for
inp[timestep] = self.timeseries
# end for
# Inputs
inputs = torch.tan(inp - 1)
# Squash timeseries through tanh
return inputs[:-1], inputs[1:]
# end __getitem__
# end MackeyGlassDataset