本文整理汇总了Python中torch.sinh方法的典型用法代码示例。如果您正苦于以下问题:Python torch.sinh方法的具体用法?Python torch.sinh怎么用?Python torch.sinh使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.sinh方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sinh
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sinh [as 别名]
def sinh(x, out=None):
"""
Return the hyperbolic sine, element-wise.
Parameters
----------
x : ht.DNDarray
The value for which to compute the hyperbolic sine.
out : ht.DNDarray or None, optional
A location in which to store the results. If provided, it must have a broadcastable shape. If not provided
or set to None, a fresh tensor is allocated.
Returns
-------
hyperbolic sine : ht.DNDarray
A tensor of the same shape as x, containing the trigonometric sine of each element in this tensor.
Negative input elements are returned as nan. If out was provided, square_roots is a reference to it.
Examples
--------
>>> ht.sinh(ht.arange(-6, 7, 2))
tensor([[-201.7132, -27.2899, -3.6269, 0.0000, 3.6269, 27.2899, 201.7132])
"""
return local_op(torch.sinh, x, out)
示例2: exp_map
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sinh [as 别名]
def exp_map(x, v):
# BD, BD -> BD
tn = tangent_norm(v).unsqueeze(dim=1)
tn_expand = tn.repeat(1, x.size()[-1])
result = torch.cosh(tn) * x + torch.sinh(tn) * (v / tn)
result = torch.where(tn_expand > 0, result, x) # only update if tangent norm is > 0
return result
示例3: build_tensor
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sinh [as 别名]
def build_tensor(K):
lam = [torch.cosh(K)*2, torch.sinh(K)*2]
T = []
for i in range(2):
for j in range(2):
for k in range(2):
for l in range(2):
if ((i+j+k+l)%2==0):
T.append(torch.sqrt(lam[i]*lam[j]*lam[k]*lam[l])/2.)
else:
T.append(torch.tensor(0.0, dtype=K.dtype, device=K.device))
T = torch.stack(T).view(2, 2, 2, 2)
return T
示例4: bwd
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sinh [as 别名]
def bwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:
a, b, c, d, g = NLSQ.get_pseudo_params(params)
# double needed for stability. No effect on overall speed
a = a.double()
b = b.double()
c = c.double()
d = d.double()
g = g.double()
z = z.double()
aa = -b * d.pow(2)
bb = (z - a) * d.pow(2) - 2 * b * d * g
cc = (z - a) * 2 * d * g - b * (1 + g.pow(2))
dd = (z - a) * (1 + g.pow(2)) - c
p = (3 * aa * cc - bb.pow(2)) / (3 * aa.pow(2))
q = (2 * bb.pow(3) - 9 * aa * bb * cc + 27 * aa.pow(2) * dd) / (27 * aa.pow(3))
t = -2 * torch.abs(q) / q * torch.sqrt(torch.abs(p) / 3)
inter_term1 = -3 * torch.abs(q) / (2 * p) * torch.sqrt(3 / torch.abs(p))
inter_term2 = 1 / 3 * arccosh(torch.abs(inter_term1 - 1) + 1)
t = t * torch.cosh(inter_term2)
tpos = -2 * torch.sqrt(torch.abs(p) / 3)
inter_term1 = 3 * q / (2 * p) * torch.sqrt(3 / torch.abs(p))
inter_term2 = 1 / 3 * arcsinh(inter_term1)
tpos = tpos * torch.sinh(inter_term2)
t[p > 0] = tpos[p > 0]
z = t - bb / (3 * aa)
arg = d * z + g
denom = arg.pow(2) + 1
logdet = torch.log(b - 2 * c * d * arg / denom.pow(2))
z = z.float().mul(mask.unsqueeze(2))
logdet = logdet.float().mul(mask.unsqueeze(2)).view(z.size(0), -1).sum(dim=1) * -1.0
return z, logdet
示例5: log_map
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sinh [as 别名]
def log_map(x, y):
"""Perform the log step."""
d = dist(x, y)
return (d / torch.sinh(d)) * (y - torch.cosh(d) * x)
示例6: exp_map
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sinh [as 别名]
def exp_map(x, y):
"""Perform the exp step."""
n = torch.clamp(norm(y), min=EPSILON)
return torch.cosh(n) * x + (torch.sinh(n) / n) * y
示例7: grad_log_prob
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sinh [as 别名]
def grad_log_prob(self, value):
res = - value / self.scale.pow(2) + (self.dim - 1) * self.c.sqrt() * torch.cosh(self.c.sqrt() * value) / torch.sinh(self.c.sqrt() * value)
return res
示例8: logdetexp
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sinh [as 别名]
def logdetexp(self, x, y, is_vector=False, keepdim=False):
d = self.norm(x, y, keepdim=keepdim) if is_vector else self.dist(x, y, keepdim=keepdim)
return (self.dim - 1) * (torch.sinh(self.c.sqrt()*d) / self.c.sqrt() / d).log()
示例9: aten_sinh
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sinh [as 别名]
def aten_sinh(inputs, attributes, scope):
inp = inputs[0]
ctx = current_context()
net = ctx.network
if ctx.is_tensorrt and has_trt_tensor(inputs):
layer = net.add_unary(inp, trt.UnaryOperation.SINH)
output = layer.get_output(0)
output.name = scope
layer.name = scope
return [output]
elif ctx.is_tvm and has_tvm_tensor(inputs):
raise NotImplementedError
return [torch.sinh(inp)]
示例10: sinh
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sinh [as 别名]
def sinh(t):
"""
Element-wise hyperbolic sine computed using cross-approximation; see PyTorch's `inh()`.
:param t: input :class:`Tensor`
:return: a :class:`Tensor`
"""
return tn.cross(lambda x: torch.sinh(x), tensors=t, verbose=False)
示例11: standard
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sinh [as 别名]
def standard(x, nn_outp):
a, b, c, d, f = NLSq.get_pseudo_params(nn_outp)
# double needed for stability. No effect on overall speed
a = a.double()
b = b.double()
c = c.double()
d = d.double()
f = f.double()
x = x.double()
aa = -b*d.pow(2)
bb = (x-a)*d.pow(2) - 2*b*d*f
cc = (x-a)*2*d*f - b*(1+f.pow(2))
dd = (x-a)*(1+f.pow(2)) - c
p = (3*aa*cc - bb.pow(2))/(3*aa.pow(2))
q = (2*bb.pow(3) - 9*aa*bb*cc + 27*aa.pow(2)*dd)/(27*aa.pow(3))
t = -2*torch.abs(q)/q*torch.sqrt(torch.abs(p)/3)
inter_term1 = -3*torch.abs(q)/(2*p)*torch.sqrt(3/torch.abs(p))
inter_term2 = 1/3*arccosh(torch.abs(inter_term1-1)+1)
t = t*torch.cosh(inter_term2)
tpos = -2*torch.sqrt(torch.abs(p)/3)
inter_term1 = 3*q/(2*p)*torch.sqrt(3/torch.abs(p))
inter_term2 = 1/3*arcsinh(inter_term1)
tpos = tpos*torch.sinh(inter_term2)
t[p > 0] = tpos[p > 0]
y = t - bb/(3*aa)
arg = d*y + f
denom = 1 + arg.pow(2)
x_new = a + b*y + c/denom
logdet = -torch.log(b - 2*c*d*arg/denom.pow(2)).sum(-1)
y = y.float()
logdet = logdet.float()
return y, logdet
示例12: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sinh [as 别名]
def forward(self, x):
self.stored_input = x
g_add = self._compute_gate(x, self.G_add, self.bias_add)
self.stored_gate_add = g_add
if self.nalu_two_gate:
g_mul = self._compute_gate(x, self.G_mul, self.bias_mul)
self.stored_gate_mul = g_mul
self.writer.add_histogram('gate/add', g_add)
self.writer.add_histogram('gate/mul', g_mul)
else:
g_mul = 1 - g_add
self.writer.add_histogram('gate', g_add)
self.writer.add_scalar('gate/mean', torch.mean(g_add), verbose_only=False)
# a = W x = nac(x)
a = self.nac_add(x)
# m = exp(W log(|x| + eps)) = exp(nac(log(|x| + eps)))
if self.nalu_mul == 'normal':
m = torch.exp(self.nac_mul(
torch.log(torch.abs(x) + self.eps)
))
elif self.nalu_mul == 'safe':
m = torch.exp(self.nac_mul(
torch.log(torch.abs(x - 1) + 1)
))
elif self.nac_mul == 'max-safe':
m = torch.exp(self.nac_mul(
torch.log(torch.relu(x - 1) + 1)
))
elif self.nalu_mul == 'trig':
m = torch.sinh(self.nac_mul(
torch.log(x+(x**2+1)**0.5 + self.eps) # torch.asinh(x) does not exist
))
elif self.nalu_mul == 'mnac':
m = self.nac_mul(x)
else:
raise ValueError(f'Unsupported nalu_mul option ({self.nalu_mul})')
self.writer.add_histogram('add', a)
self.writer.add_histogram('mul', m)
# y = g (*) a + (1 - g) (*) m
y = g_add * a + g_mul * m
return y