本文整理匯總了Python中torch.asin方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.asin方法的具體用法?Python torch.asin怎麽用?Python torch.asin使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.asin方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: arcsin
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import asin [as 別名]
def arcsin(x, out=None):
"""
Return the trigonometric arcsin, element-wise.
Parameters
----------
x : ht.DNDarray
The value for which to compute the trigonometric cosine.
out : ht.DNDarray or None, optional
A location in which to store the results. If provided, it must have a broadcastable shape. If not provided
or set to None, a fresh tensor is allocated.
Returns
-------
arcsin : ht.DNDarray
A tensor of the same shape as x, containing the trigonometric arcsin of each element in this tensor.
Input elements outside [-1., 1.] are returned as nan. If out was provided, arcsin is a reference to it.
Examples
--------
>>> ht.arcsin(ht.array([-1.,-0., 0.83]))
tensor([-1.5708, 0.0000, 0.9791])
"""
return local_op(torch.asin, x, out)
示例2: test_with_terminate_on_inf
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import asin [as 別名]
def test_with_terminate_on_inf():
torch.manual_seed(12)
data = [
1.0,
0.8,
torch.rand(4, 4),
(1.0 / torch.randint(0, 2, size=(4,)).type(torch.float), torch.tensor(1.234)),
torch.rand(5),
torch.asin(torch.randn(4, 4)),
0.0,
1.0,
]
def update_fn(engine, batch):
return batch
trainer = Engine(update_fn)
h = TerminateOnNan()
trainer.add_event_handler(Events.ITERATION_COMPLETED, h)
trainer.run(data, max_epochs=2)
assert trainer.state.iteration == 4
示例3: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import asin [as 別名]
def __init__(self, size, complex=False, ABCD=None, ortho_init=False):
"""
Parameters:
size: size of butterfly matrix
complex: real or complex matrix
ABCD: block of [[A, B], [C, D]], of shape (2, 2, size//2) if real or (2, 2, size//2, 2) if complex
ortho_init: whether the twiddle factors are initialized to be orthogonal (real) or unitary (complex)
"""
super().__init__()
assert size % 2 == 0, 'size must be even'
self.size = size
self.complex = complex
self.mul_op = complex_mul if complex else operator.mul
ABCD_shape = (2, 2, size // 2) if not complex else (2, 2, size // 2, 2)
scaling = 1.0 / 2 if complex else 1.0 / math.sqrt(2)
if ABCD is None:
if not ortho_init:
self.ABCD = nn.Parameter(torch.randn(ABCD_shape) * scaling)
else:
if not complex:
theta = torch.rand(size // 2) * math.pi * 2
c, s = torch.cos(theta), torch.sin(theta)
det = torch.randint(0, 2, (size // 2, ), dtype=c.dtype) * 2 - 1 # Rotation (+1) or reflection (-1)
self.ABCD = nn.Parameter(torch.stack((torch.stack((det * c, -det * s)),
torch.stack((s, c)))))
else:
# Sampling from the Haar measure on U(2) is a bit subtle.
# Using the parameterization here: http://home.lu.lv/~sd20008/papers/essays/Random%20unitary%20[paper].pdf
phi = torch.asin(torch.sqrt(torch.rand(size // 2)))
c, s = torch.cos(phi), torch.sin(phi)
alpha, psi, chi = torch.randn(3, size // 2) * math.pi * 2
A = torch.stack((c * torch.cos(alpha + psi), c * torch.sin(alpha + psi)), dim=-1)
B = torch.stack((s * torch.cos(alpha + chi), s * torch.sin(alpha + chi)), dim=-1)
C = torch.stack((-s * torch.cos(alpha - chi), -s * torch.sin(alpha - chi)), dim=-1)
D = torch.stack((c * torch.cos(alpha - psi), c * torch.sin(alpha - psi)), dim=-1)
self.ABCD = nn.Parameter(torch.stack((torch.stack((A, B)),
torch.stack((C, D)))))
else:
assert ABCD.shape == ABCD_shape, f'ABCD must have shape {ABCD_shape}'
self.ABCD = ABCD
示例4: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import asin [as 別名]
def __init__(self, in_size, out_size, bias=True, complex=False, tied_weight=True, increasing_stride=True, ortho_init=False):
super().__init__()
self.in_size = in_size
m = int(math.ceil(math.log2(in_size)))
size = self.in_size_extended = 1 << m # Will zero-pad input if in_size is not a power of 2
self.out_size = out_size
self.nstack = int(math.ceil(out_size / self.in_size_extended))
self.complex = complex
self.tied_weight = tied_weight
self.increasing_stride = increasing_stride
self.ortho_init = ortho_init
twiddle_core_shape = (self.nstack, size - 1) if tied_weight else (self.nstack, m, size // 2)
if not ortho_init:
twiddle_shape = twiddle_core_shape + ((2, 2) if not complex else (2, 2, 2))
scaling = 1.0 / 2 if complex else 1.0 / math.sqrt(2)
self.twiddle = nn.Parameter(torch.randn(twiddle_shape) * scaling)
else:
if not complex:
theta = torch.rand(twiddle_core_shape) * math.pi * 2
c, s = torch.cos(theta), torch.sin(theta)
det = torch.randint(0, 2, (twiddle_core_shape), dtype=c.dtype) * 2 - 1 # Rotation (+1) or reflection (-1)
self.twiddle = nn.Parameter(torch.stack((torch.stack((det * c, -det * s), dim=-1),
torch.stack((s, c), dim=-1)), dim=-1))
else:
# Sampling from the Haar measure on U(2) is a bit subtle.
# Using the parameterization here: http://home.lu.lv/~sd20008/papers/essays/Random%20unitary%20[paper].pdf
phi = torch.asin(torch.sqrt(torch.rand(twiddle_core_shape)))
c, s = torch.cos(phi), torch.sin(phi)
alpha, psi, chi = torch.randn((3, ) + twiddle_core_shape) * math.pi * 2
A = torch.stack((c * torch.cos(alpha + psi), c * torch.sin(alpha + psi)), dim=-1)
B = torch.stack((s * torch.cos(alpha + chi), s * torch.sin(alpha + chi)), dim=-1)
C = torch.stack((-s * torch.cos(alpha - chi), -s * torch.sin(alpha - chi)), dim=-1)
D = torch.stack((c * torch.cos(alpha - psi), c * torch.sin(alpha - psi)), dim=-1)
self.twiddle = nn.Parameter(torch.stack((torch.stack((A, B), dim=-2),
torch.stack((C, D), dim=-2)), dim=-2))
if bias:
bias_shape = (out_size, ) if not complex else (out_size, 2)
self.bias = nn.Parameter(torch.Tensor(bias_shape))
else:
self.register_parameter('bias', None)
self.reset_parameters()
示例5: quan_to_angle
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import asin [as 別名]
def quan_to_angle(qw, qx, qy, qz):
rx = torch.atan2(2.*(qw*qx + qy*qz), 1.-2.*(qx*qx + qy*qy))
sinp = 2.*(qw*qy - qz*qx)
sinp = sinp.clamp(-1., 1.)
ry = torch.asin(sinp)
rz = torch.atan2(2.*(qw*qz + qx*qy), 1.-2.*(qy*qy + qz*qz))
return rx, ry, rz
示例6: aten_asin
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import asin [as 別名]
def aten_asin(inputs, attributes, scope):
inp = inputs[0]
ctx = current_context()
net = ctx.network
if ctx.is_tensorrt and has_trt_tensor(inputs):
layer = net.add_unary(inp, trt.UnaryOperation.ASIN)
output = layer.get_output(0)
output.name = scope
layer.name = scope
return [output]
elif ctx.is_tvm and has_tvm_tensor(inputs):
raise NotImplementedError
return [torch.asin(inp)]
示例7: test_with_terminate_on_nan
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import asin [as 別名]
def test_with_terminate_on_nan():
torch.manual_seed(12)
data = [1.0, 0.8, (torch.rand(4, 4), torch.rand(4, 4)), torch.rand(5), torch.asin(torch.randn(4, 4)), 0.0, 1.0]
def update_fn(engine, batch):
return batch
trainer = Engine(update_fn)
h = TerminateOnNan()
trainer.add_event_handler(Events.ITERATION_COMPLETED, h)
trainer.run(data, max_epochs=2)
assert trainer.state.iteration == 5
示例8: asin
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import asin [as 別名]
def asin(t):
"""
Element-wise arcsine computed using cross-approximation; see PyTorch's `asin()`.
:param t: input :class:`Tensor`
:return: a :class:`Tensor`
"""
return tn.cross(lambda x: torch.asin(x), tensors=t, verbose=False)
示例9: qeuler
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import asin [as 別名]
def qeuler(q, order, epsilon=0):
"""
Convert quaternion(s) q to Euler angles.
Expects a tensor of shape (*, 4), where * denotes any number of dimensions.
Returns a tensor of shape (*, 3).
"""
assert q.shape[-1] == 4
original_shape = list(q.shape)
original_shape[-1] = 3
q = q.view(-1, 4)
q0 = q[:, 0]
q1 = q[:, 1]
q2 = q[:, 2]
q3 = q[:, 3]
if order == 'xyz':
x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2*(q1 * q1 + q2 * q2))
y = torch.asin(torch.clamp(2 * (q1 * q3 + q0 * q2), -1+epsilon, 1-epsilon))
z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2*(q2 * q2 + q3 * q3))
elif order == 'yzx':
x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2*(q1 * q1 + q3 * q3))
y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2*(q2 * q2 + q3 * q3))
z = torch.asin(torch.clamp(2 * (q1 * q2 + q0 * q3), -1+epsilon, 1-epsilon))
elif order == 'zxy':
x = torch.asin(torch.clamp(2 * (q0 * q1 + q2 * q3), -1+epsilon, 1-epsilon))
y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2*(q1 * q1 + q2 * q2))
z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2*(q1 * q1 + q3 * q3))
elif order == 'xzy':
x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2*(q1 * q1 + q3 * q3))
y = torch.atan2(2 * (q0 * q2 + q1 * q3), 1 - 2*(q2 * q2 + q3 * q3))
z = torch.asin(torch.clamp(2 * (q0 * q3 - q1 * q2), -1+epsilon, 1-epsilon))
elif order == 'yxz':
x = torch.asin(torch.clamp(2 * (q0 * q1 - q2 * q3), -1+epsilon, 1-epsilon))
y = torch.atan2(2 * (q1 * q3 + q0 * q2), 1 - 2*(q1 * q1 + q2 * q2))
z = torch.atan2(2 * (q1 * q2 + q0 * q3), 1 - 2*(q1 * q1 + q3 * q3))
elif order == 'zyx':
x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2*(q1 * q1 + q2 * q2))
y = torch.asin(torch.clamp(2 * (q0 * q2 - q1 * q3), -1+epsilon, 1-epsilon))
z = torch.atan2(2 * (q0 * q3 + q1 * q2), 1 - 2*(q2 * q2 + q3 * q3))
else:
raise
return torch.stack((x, y, z), dim=1).view(original_shape)
# Numpy-backed implementations
示例10: rotmat2euler_torch
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import asin [as 別名]
def rotmat2euler_torch(R):
"""
Converts a rotation matrix to euler angles
batch pytorch version ported from the corresponding numpy method above
:param R:N*3*3
:return: N*3
"""
n = R.data.shape[0]
eul = Variable(torch.zeros(n, 3).float()).cuda()
idx_spec1 = (R[:, 0, 2] == 1).nonzero().cpu().data.numpy().reshape(-1).tolist()
idx_spec2 = (R[:, 0, 2] == -1).nonzero().cpu().data.numpy().reshape(-1).tolist()
if len(idx_spec1) > 0:
R_spec1 = R[idx_spec1, :, :]
eul_spec1 = Variable(torch.zeros(len(idx_spec1), 3).float()).cuda()
eul_spec1[:, 2] = 0
eul_spec1[:, 1] = -np.pi / 2
delta = torch.atan2(R_spec1[:, 0, 1], R_spec1[:, 0, 2])
eul_spec1[:, 0] = delta
eul[idx_spec1, :] = eul_spec1
if len(idx_spec2) > 0:
R_spec2 = R[idx_spec2, :, :]
eul_spec2 = Variable(torch.zeros(len(idx_spec2), 3).float()).cuda()
eul_spec2[:, 2] = 0
eul_spec2[:, 1] = np.pi / 2
delta = torch.atan2(R_spec2[:, 0, 1], R_spec2[:, 0, 2])
eul_spec2[:, 0] = delta
eul[idx_spec2] = eul_spec2
idx_remain = np.arange(0, n)
idx_remain = np.setdiff1d(np.setdiff1d(idx_remain, idx_spec1), idx_spec2).tolist()
if len(idx_remain) > 0:
R_remain = R[idx_remain, :, :]
eul_remain = Variable(torch.zeros(len(idx_remain), 3).float()).cuda()
eul_remain[:, 1] = -torch.asin(R_remain[:, 0, 2])
eul_remain[:, 0] = torch.atan2(R_remain[:, 1, 2] / torch.cos(eul_remain[:, 1]),
R_remain[:, 2, 2] / torch.cos(eul_remain[:, 1]))
eul_remain[:, 2] = torch.atan2(R_remain[:, 0, 1] / torch.cos(eul_remain[:, 1]),
R_remain[:, 0, 0] / torch.cos(eul_remain[:, 1]))
eul[idx_remain, :] = eul_remain
return eul