本文整理匯總了Python中torch.log1p方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.log1p方法的具體用法?Python torch.log1p怎麽用?Python torch.log1p使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.log1p方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: mu_law_encoding
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log1p [as 別名]
def mu_law_encoding(
x: Tensor,
quantization_channels: int
) -> Tensor:
r"""Encode signal based on mu-law companding. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This algorithm assumes the signal has been scaled to between -1 and 1 and
returns a signal encoded with values from 0 to quantization_channels - 1.
Args:
x (Tensor): Input tensor
quantization_channels (int): Number of channels
Returns:
Tensor: Input after mu-law encoding
"""
mu = quantization_channels - 1.0
if not x.is_floating_point():
x = x.to(torch.float)
mu = torch.tensor(mu, dtype=x.dtype)
x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu)
x_mu = ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64)
return x_mu
示例2: mu_law_decoding
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log1p [as 別名]
def mu_law_decoding(
x_mu: Tensor,
quantization_channels: int
) -> Tensor:
r"""Decode mu-law encoded signal. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This expects an input with values between 0 and quantization_channels - 1
and returns a signal scaled between -1 and 1.
Args:
x_mu (Tensor): Input tensor
quantization_channels (int): Number of channels
Returns:
Tensor: Input after mu-law decoding
"""
mu = quantization_channels - 1.0
if not x_mu.is_floating_point():
x_mu = x_mu.to(torch.float)
mu = torch.tensor(mu, dtype=x_mu.dtype)
x = ((x_mu) / mu) * 2 - 1.0
x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu
return x
示例3: get_temperature
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log1p [as 別名]
def get_temperature(max_value, bound=1-1e-3):
"""
For a dataset with max value 'max_value', returns the temperature such that
sigmoid(temperature * max_value) = bound.
If temperature is greater than 1, returns 1.
:param max_value:
:param bound:
:return:
"""
max_value = torch.Tensor([max_value])
bound = torch.Tensor([bound])
temperature = min(- (1 / max_value) * (torch.log1p(-bound) - torch.log(bound)), 1)
return temperature
示例4: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log1p [as 別名]
def forward(self, x):
self.W_sigma = torch.log1p(torch.exp(self.W_rho))
if self.use_bias:
self.bias_sigma = torch.log1p(torch.exp(self.bias_rho))
bias_var = self.bias_sigma ** 2
else:
self.bias_sigma = bias_var = None
act_mu = F.conv2d(
x, self.W_mu, self.bias_mu, self.stride, self.padding, self.dilation, self.groups)
act_var = 1e-16 + F.conv2d(
x ** 2, self.W_sigma ** 2, bias_var, self.stride, self.padding, self.dilation, self.groups)
act_std = torch.sqrt(act_var)
if self.training or sample:
eps = torch.empty(act_mu.size()).normal_(0, 1).to(self.device)
return act_mu + act_std * eps
else:
return act_mu
示例5: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log1p [as 別名]
def forward(self, x, sample=True):
self.W_sigma = torch.log1p(torch.exp(self.W_rho))
if self.use_bias:
self.bias_sigma = torch.log1p(torch.exp(self.bias_rho))
bias_var = self.bias_sigma ** 2
else:
self.bias_sigma = bias_var = None
act_mu = F.linear(x, self.W_mu, self.bias_mu)
act_var = 1e-16 + F.linear(x ** 2, self.W_sigma ** 2, bias_var)
act_std = torch.sqrt(act_var)
if self.training or sample:
eps = torch.empty(act_mu.size()).normal_(0, 1).to(self.device)
return act_mu + act_std * eps
else:
return act_mu
示例6: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log1p [as 別名]
def forward(self, input, sample=True):
if self.training or sample:
W_eps = torch.empty(self.W_mu.size()).normal_(0, 1).to(self.device)
self.W_sigma = torch.log1p(torch.exp(self.W_rho))
weight = self.W_mu + W_eps * self.W_sigma
if self.use_bias:
bias_eps = torch.empty(self.bias_mu.size()).normal_(0, 1).to(self.device)
self.bias_sigma = torch.log1p(torch.exp(self.bias_rho))
bias = self.bias_mu + bias_eps * self.bias_sigma
else:
bias = None
else:
weight = self.W_mu
bias = self.bias_mu if self.use_bias else None
return F.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups)
示例7: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log1p [as 別名]
def forward(self, input, sample=True):
if self.training or sample:
W_eps = torch.empty(self.W_mu.size()).normal_(0, 1).to(self.device)
self.W_sigma = torch.log1p(torch.exp(self.W_rho))
weight = self.W_mu + W_eps * self.W_sigma
if self.use_bias:
bias_eps = torch.empty(self.bias_mu.size()).normal_(0, 1).to(self.device)
self.bias_sigma = torch.log1p(torch.exp(self.bias_rho))
bias = self.bias_mu + bias_eps * self.bias_sigma
else:
bias = None
else:
weight = self.W_mu
bias = self.bias_mu if self.use_bias else None
return F.linear(input, weight, bias)
示例8: log1p
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log1p [as 別名]
def log1p(x, out=None):
"""
Return the natural logarithm of one plus the input array, element-wise.
Parameters
----------
x : ht.DNDarray
The value for which to compute the logarithm.
out : ht.DNDarray or None, optional
A location in which to store the results. If provided, it must have a broadcastable shape. If not provided
or set to None, a fresh tensor is allocated.
Returns
-------
logarithms : ht.DNDarray
A tensor of the same shape as x, containing the positive logarithms plus one of each element in this tensor.
Negative input elements are returned as nan. If out was provided, logarithms is a reference to it.
Examples
--------
>>> ht.log1p(ht.arange(5))
array([0., 0.69314718, 1.09861229, 1.38629436, 1.60943791])
"""
return operations.__local_op(torch.log1p, x, out)
示例9: heatmap_loss
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log1p [as 別名]
def heatmap_loss(scores, labels, pos_weight=100):
labels = labels.float()
# loss = F.binary_cross_entropy_with_logits(scores, labels, reduction='none')
loss = F.l1_loss(scores, labels, reduction='none')
weighted = loss * (1. + (pos_weight - 1.) * labels)
return weighted.sum()
# def uncertainty_loss(logvar, sqr_dists):
# sqr_dists = sqr_dists.clamp(min=1.+1e-6)
# c = (1 + torch.log(sqr_dists)) / sqr_dists
# loss = torch.log1p(logvar.exp()) / sqr_dists + torch.sigmoid(-logvar) - c
# print('dists', float(sqr_dists.min()), float(sqr_dists.max()))
# print('logvar', float(logvar.min()), float(logvar.max()))
# print('loss', float(loss.min()), float(loss.max()))
# def hook(grad):
# print('grad', float(grad.min()), float(grad.max()), float(grad.sum()))
# logvar.register_hook(hook)
# return loss.mean()
示例10: semantic_loss_exactly_one
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log1p [as 別名]
def semantic_loss_exactly_one(log_prob):
"""Semantic loss to encourage the multinomial probability to be "peaked",
i.e. only one class is picked.
The loss has the form -log sum_{i=1}^n p_i prod_{j=1, j!=i}^n (1 - p_j).
Paper: http://web.cs.ucla.edu/~guyvdb/papers/XuICML18.pdf
Code: https://github.com/UCLA-StarAI/Semantic-Loss/blob/master/semi_supervised/semantic.py
Parameters:
log_prob: log probability of a multinomial distribution, shape (batch_size, n)
Returns:
semantic_loss: shape (batch_size)
"""
_, argmaxes = torch.max(log_prob, dim=-1)
# Compute log(1-p) separately for the largest probabilities, by doing
# logsumexp on the rest of the log probabilities.
log_prob_temp = log_prob.clone()
log_prob_temp[range(log_prob.shape[0]), argmaxes] = torch.tensor(float('-inf'))
log_1mprob_max = torch.logsumexp(log_prob_temp, dim=-1)
# Compute log(1-p) normally for the rest of the probabilities
log_1mprob = torch.log1p(-torch.exp(log_prob_temp))
log_1mprob[range(log_prob.shape[0]), argmaxes] = log_1mprob_max
loss = -(log_1mprob.sum(dim=-1) + torch.logsumexp(log_prob - log_1mprob, dim=-1))
return loss
示例11: mulaw_quantize
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log1p [as 別名]
def mulaw_quantize(x, quantization_channels=256):
"""Encode signal based on mu-law companding. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This algorithm assumes the signal has been scaled to between -1 and 1 and
returns a signal encoded with values from 0 to quantization_channels - 1
Args:
quantization_channels (int): Number of channels. default: 256
"""
mu = quantization_channels - 1
if isinstance(x, np.ndarray):
x_mu = np.sign(x) * np.log1p(mu * np.abs(x)) / np.log1p(mu)
x_mu = ((x_mu + 1) / 2 * mu + 0.5).astype(int)
elif isinstance(x, (torch.Tensor, torch.LongTensor)):
if isinstance(x, torch.LongTensor):
x = x.float()
mu = torch.FloatTensor([mu])
x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu)
x_mu = ((x_mu + 1) / 2 * mu + 0.5).long()
return x_mu
示例12: inv_mulaw_quantize
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log1p [as 別名]
def inv_mulaw_quantize(x_mu, quantization_channels=256, cuda=False):
"""Decode mu-law encoded signal. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This expects an input with values between 0 and quantization_channels - 1
and returns a signal scaled between -1 and 1.
Args:
quantization_channels (int): Number of channels. default: 256
"""
mu = quantization_channels - 1.
if isinstance(x_mu, np.ndarray):
x = ((x_mu) / mu) * 2 - 1.
x = np.sign(x) * (np.exp(np.abs(x) * np.log1p(mu)) - 1.) / mu
elif isinstance(x_mu, (torch.Tensor, torch.LongTensor)):
if isinstance(x_mu, (torch.LongTensor, torch.cuda.LongTensor)):
x_mu = x_mu.float()
if cuda:
mu = (torch.FloatTensor([mu])).cuda()
else:
mu = torch.FloatTensor([mu])
x = ((x_mu) / mu) * 2 - 1.
x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.) / mu
return x
示例13: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log1p [as 別名]
def forward(ctx, scale, c, dim):
scale = scale.double()
c = c.double()
ctx.scale = scale.clone().detach()
ctx.c = c.clone().detach()
ctx.dim = dim
device = scale.device
output = .5 * (Constants.logpi - Constants.log2) + scale.log() -(int(dim) - 1) * (c.log() / 2 + Constants.log2)
dim = torch.tensor(int(dim)).to(device).double()
k_float = rexpand(torch.arange(int(dim)), *scale.size()).double().to(device)
s = torch.lgamma(dim) - torch.lgamma(k_float + 1) - torch.lgamma(dim - k_float) \
+ (dim - 1 - 2 * k_float).pow(2) * c * scale.pow(2) / 2 \
+ torch.log1p(torch.erf((dim - 1 - 2 * k_float) * c.sqrt() * scale / math.sqrt(2)))
signs = torch.tensor([1., -1.]).double().to(device).repeat(((int(dim)+1) // 2)*2)[:int(dim)]
signs = rexpand(signs, *scale.size())
ctx.log_sum_term = log_sum_exp_signs(s, signs, dim=0)
output = output + ctx.log_sum_term
return output.float()
示例14: mean
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log1p [as 別名]
def mean(self):
c = self.c.double()
scale = self.scale.double()
dim = torch.tensor(int(self.dim)).double().to(self.device)
signs = torch.tensor([1., -1.]).double().to(self.device).repeat(((self.dim+1) // 2)*2)[:self.dim].unsqueeze(-1).unsqueeze(-1).expand(self.dim, *self.scale.size())
k_float = rexpand(torch.arange(self.dim), *self.scale.size()).double().to(self.device)
s2 = torch.lgamma(dim) - torch.lgamma(k_float + 1) - torch.lgamma(dim - k_float) \
+ (dim - 1 - 2 * k_float).pow(2) * c * scale.pow(2) / 2 \
+ torch.log1p(torch.erf((dim - 1 - 2 * k_float) * c.sqrt() * scale / math.sqrt(2)))
S2 = log_sum_exp_signs(s2, signs, dim=0)
log_arg = (dim - 1 - 2 * k_float) * c.sqrt() * scale.pow(2) * (1 + torch.erf((dim - 1 - 2 * k_float) * c.sqrt() * scale / math.sqrt(2))) + \
torch.exp(-(dim - 1 - 2 * k_float).pow(2) * c * scale.pow(2) / 2) * scale * math.sqrt(2 / math.pi)
log_arg_signs = torch.sign(log_arg)
s1 = torch.lgamma(dim) - torch.lgamma(k_float + 1) - torch.lgamma(dim - k_float) \
+ (dim - 1 - 2 * k_float).pow(2) * c * scale.pow(2) / 2 \
+ torch.log(log_arg_signs * log_arg)
S1 = log_sum_exp_signs(s1, signs * log_arg_signs, dim=0)
output = torch.exp(S1 - S2)
return output.float()
示例15: variance
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log1p [as 別名]
def variance(self):
c = self.c.double()
scale = self.scale.double()
dim = torch.tensor(int(self.dim)).double().to(self.device)
signs = torch.tensor([1., -1.]).double().to(self.device).repeat(((int(dim)+1) // 2)*2)[:int(dim)].unsqueeze(-1).unsqueeze(-1).expand(int(dim), *self.scale.size())
k_float = rexpand(torch.arange(self.dim), *self.scale.size()).double().to(self.device)
s2 = torch.lgamma(dim) - torch.lgamma(k_float + 1) - torch.lgamma(dim - k_float) \
+ (dim - 1 - 2 * k_float).pow(2) * c * scale.pow(2) / 2 \
+ torch.log1p(torch.erf((dim - 1 - 2 * k_float) * c.sqrt() * scale / math.sqrt(2)))
S2 = log_sum_exp_signs(s2, signs, dim=0)
log_arg = (1 + (dim - 1 - 2 * k_float).pow(2) * c * scale.pow(2)) * (1 + torch.erf((dim - 1 - 2 * k_float) * c.sqrt() * scale / math.sqrt(2))) + \
(dim - 1 - 2 * k_float) * c.sqrt() * torch.exp(-(dim - 1 - 2 * k_float).pow(2) * c * scale.pow(2) / 2) * scale * math.sqrt(2 / math.pi)
log_arg_signs = torch.sign(log_arg)
s1 = torch.lgamma(dim) - torch.lgamma(k_float + 1) - torch.lgamma(dim - k_float) \
+ (dim - 1 - 2 * k_float).pow(2) * c * scale.pow(2) / 2 \
+ 2 * scale.log() \
+ torch.log(log_arg_signs * log_arg)
S1 = log_sum_exp_signs(s1, signs * log_arg_signs, dim=0)
output = torch.exp(S1 - S2)
output = output.float() - self.mean.pow(2)
return output