本文整理汇总了Python中torch.sign方法的典型用法代码示例。如果您正苦于以下问题:Python torch.sign方法的具体用法?Python torch.sign怎么用?Python torch.sign使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.sign方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: mu_law_encoding
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sign [as 别名]
def mu_law_encoding(
x: Tensor,
quantization_channels: int
) -> Tensor:
r"""Encode signal based on mu-law companding. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This algorithm assumes the signal has been scaled to between -1 and 1 and
returns a signal encoded with values from 0 to quantization_channels - 1.
Args:
x (Tensor): Input tensor
quantization_channels (int): Number of channels
Returns:
Tensor: Input after mu-law encoding
"""
mu = quantization_channels - 1.0
if not x.is_floating_point():
x = x.to(torch.float)
mu = torch.tensor(mu, dtype=x.dtype)
x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu)
x_mu = ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64)
return x_mu
示例2: mu_law_decoding
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sign [as 别名]
def mu_law_decoding(
x_mu: Tensor,
quantization_channels: int
) -> Tensor:
r"""Decode mu-law encoded signal. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This expects an input with values between 0 and quantization_channels - 1
and returns a signal scaled between -1 and 1.
Args:
x_mu (Tensor): Input tensor
quantization_channels (int): Number of channels
Returns:
Tensor: Input after mu-law decoding
"""
mu = quantization_channels - 1.0
if not x_mu.is_floating_point():
x_mu = x_mu.to(torch.float)
mu = torch.tensor(mu, dtype=x_mu.dtype)
x = ((x_mu) / mu) * 2 - 1.0
x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu
return x
示例3: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sign [as 别名]
def __init__(self,in_channel):
super(InvConv,self).__init__()
weight=np.random.randn(in_channel,in_channel)
q,_=linalg.qr(weight)
w_p,w_l,w_u=linalg.lu(q.astype(np.float32))
w_s=np.diag(w_u)
w_u=np.triu(w_u,1)
u_mask=np.triu(np.ones_like(w_u),1)
l_mask=u_mask.T
self.register_buffer('w_p',torch.from_numpy(w_p))
self.register_buffer('u_mask',torch.from_numpy(u_mask))
self.register_buffer('l_mask',torch.from_numpy(l_mask))
self.register_buffer('l_eye',torch.eye(l_mask.shape[0]))
self.register_buffer('s_sign',torch.sign(torch.from_numpy(w_s)))
self.w_l=torch.nn.Parameter(torch.from_numpy(w_l))
self.w_s=torch.nn.Parameter(torch.log(1e-7+torch.abs(torch.from_numpy(w_s))))
self.w_u=torch.nn.Parameter(torch.from_numpy(w_u))
self.weight=None
self.invweight=None
return
示例4: lp_pool2d
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sign [as 别名]
def lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False):
# type: (Tensor, float, int, Optional[BroadcastingList2[int]], bool) -> Tensor
r"""Applies a 2D power-average pooling over an input signal composed of
several input planes. If the sum of all inputs to the power of `p` is
zero, the gradient is set to zero as well.
See :class:`~torch.nn.LPPool2d` for details.
"""
kw, kh = utils._pair(kernel_size)
if stride is not None:
stride = torch.jit._unwrap_optional(stride)
out = avg_pool2d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)
else:
out = avg_pool2d(input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode)
return (torch.sign(out) * relu(torch.abs(out))).mul(kw * kh).pow(1. / norm_type)
示例5: BPDA_attack
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sign [as 别名]
def BPDA_attack(image,target, model, step_size = 1., iterations = 10, linf=False, transform_func=identity_transform):
target = label2tensor(target)
adv = image.detach().numpy()
adv = torch.from_numpy(adv)
adv.requires_grad_()
for _ in range(iterations):
adv_def = transform_func(adv)
adv_def.requires_grad_()
l2 = nn.MSELoss()
loss = l2(0, adv_def)
loss.backward()
g = get_cw_grad(adv_def, image, target, model)
if linf:
g = torch.sign(g)
print(g.numpy().sum())
adv = adv.detach().numpy() - step_size * g.numpy()
adv = clip_bound(adv)
adv = torch.from_numpy(adv)
adv.requires_grad_()
if linf:
print('label', torch.argmax(model(adv)), 'linf', torch.max(torch.abs(adv - image)).detach().numpy())
else:
print('label', torch.argmax(model(adv)), 'l2', l2_norm(adv, image))
return adv.detach().numpy()
示例6: fgsm
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sign [as 别名]
def fgsm(model, data, target, eps, cuda = True):
"""Generate an adversarial pertubation using the fast gradient sign method.
Args:
data: input image to perturb
"""
model.eval()
if cuda:
data, target = data.cuda(), target.cuda()
data.requires_grad = True
model.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward(create_graph = False)
pertubation = eps * torch.sign(data.grad.data)
x_fgsm = data.data + pertubation
X_adv = torch.clamp(x_fgsm, torch.min(data.data), torch.max(data.data))
return X_adv.cpu()
示例7: _vf_unscale
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sign [as 别名]
def _vf_unscale(self, scaled_x):
"""Computes the inverse of _vf_scale(x), if vf-rescaling is enabled"""
if not self.vf_scale_epsilon:
return scaled_x
# We need double() otherwise we lose too much precision for low eps
# values such as 1e-3, due to the eps**2 terms
scaled_x = scaled_x.double()
abs_scaled_x = torch.abs(scaled_x)
eps = self.vf_scale_epsilon
# TODO: Can this be simplified somehow?
x = abs_scaled_x / eps - (
(1 / (2. * (eps**2))) *
torch.sqrt(
4 * self.vf_scale_epsilon*abs_scaled_x +
(2. * eps + 1)**2)
) + \
(2. * eps + 1) / (2. * (eps ** 2))
x *= torch.sign(scaled_x)
# SANITY CHECK to make sure the inverse is working, enable only to
# test this function
# assert(torch.all(torch.abs(scaled_x - self._vf_scale(x))<1e-5)), ("_vf_unscale() sanity failed:",(scaled_x, self._vf_scale(x)),(scaled_x == self._vf_scale(x)))
return x.float()
示例8: prune_sign_change
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sign [as 别名]
def prune_sign_change(self,reinitialize_unused_to_zero = True,enable_print = False):
W_flat = self.s_tensor.data.view(-1)
new_tensor_sign = torch.sign(W_flat)
mask_flat = self.mask.view(-1)
mask_indices = torch.nonzero(mask_flat > 0.5).view(-1)
sign_change_indices = mask_indices[((new_tensor_sign[mask_indices] * self.tensor_sign[mask_indices].to(new_tensor_sign.device)) < -0.5).nonzero().view(-1)]
mask_flat[sign_change_indices] = 0
self.reinitialize_unused(reinitialize_unused_to_zero)
cutoff = sign_change_indices.numel()
if enable_print:
print('pruned {} connections'.format(cutoff))
if self.grown_indices is not None and enable_print:
overlap = np.intersect1d(sign_change_indices.cpu().numpy(),self.grown_indices.cpu().numpy())
print('pruned {} ({} %) just grown weights'.format(overlap.size,overlap.size * 100.0 / self.grown_indices.size(0) if self.grown_indices.size(0) > 0 else 0.0))
self.tensor_sign = new_tensor_sign
return sign_change_indices
示例9: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sign [as 别名]
def forward(self, input):
if not self.training:
return F.linear(input, self.weight, self.bias)
torch.randn(self.epsilon_input.size(), out=self.epsilon_input)
torch.randn(self.epsilon_output.size(), out=self.epsilon_output)
func = lambda x: torch.sign(x) * torch.sqrt(torch.abs(x))
eps_in = func(self.epsilon_input)
eps_out = func(self.epsilon_output)
bias = self.bias
if bias is not None:
bias = bias + self.sigma_bias * eps_out.t()
noise_v = torch.mul(eps_in, eps_out)
return F.linear(input, self.weight + self.sigma_weight * noise_v, bias)
示例10: quantizeConvParams
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sign [as 别名]
def quantizeConvParams(self):
for index in range(self.num_of_params):
if bitsW == 1:
n = self.target_modules[index].data[0].nelement()
s = self.target_modules[index].data.size()
m = self.target_modules[index].data.norm(1, 3)\
.sum(2).sum(1).div(n).expand(s)
m = Q(m, bitsG)
self.target_modules[index].data.sign()\
.mul(m, out=self.target_modules[index].data)
if bitsW == 2:
w = self.target_modules[index].data
n = self.target_modules[index].data[0].nelement()
s = self.target_modules[index].data.size()
d = self.target_modules[index].data.norm(1, 3)\
.sum(2).sum(1).div(n).mul(0.7)
wt = w
for col in range(s[0]):
d_col = d[col,0,0,0]
wt_neg = w[col,:,:,:].lt(-1.0 * d_col).float().mul(-1)
wt_pos = w[col,:,:,:].gt(1.0 * d_col).float()
wt[col,:,:,:] = wt_pos.add(wt_neg)
wt.mul(1, out=self.target_modules[index].data)
else:
self.target_modules[index].data = Q(C(self.target_modules[index].data, bitsW), bitsW)
示例11: updateQuanGradWeight
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sign [as 别名]
def updateQuanGradWeight(self):
for index in range(self.num_of_params):
if bitsW == 1:
weight = self.target_modules[index].data
n = weight[0].nelement()
s = weight.size()
m = weight.norm(1, 3)\
.sum(2).sum(1).div(n).expand(s)
m[weight.lt(-1.0)] = 0
m[weight.gt(1.0)] = 0
m = Q(m, bitsG)
m = m.mul(self.target_modules[index].grad.data)
m_add = weight.sign().mul(self.target_modules[index].grad.data)
m_add = m_add.sum(3)\
.sum(2).sum(1).div(n).expand(s)
m_add = m_add.mul(weight.sign())
self.target_modules[index].grad.data = m.add(m_add).mul(1.0-1.0/s[1]).mul(n)
self.target_modules[index].grad.data = Q(C(self.target_modules[index].grad.data, bitsG), bitsG)
else:
self.target_modules[index].grad.data = Q(C(self.target_modules[index].grad.data, bitsG), bitsG)
示例12: quantizeConvParams
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sign [as 别名]
def quantizeConvParams(self):
for index in range(self.num_of_params):
if bitsW == 1:
n = self.target_modules[index].data[0].nelement()
s = self.target_modules[index].data.size()
m = self.target_modules[index].data.norm(1, 3, True)\
.sum(2, True).sum(1, True).div(n).expand(s)
m = Q(m, bitsG)
self.target_modules[index].data = self.target_modules[index].data.sign()\
.mul(m)
if bitsW == 2:
w = self.target_modules[index].data
n = self.target_modules[index].data[0].nelement()
s = self.target_modules[index].data.size()
d = self.target_modules[index].data.norm(1, 3, True)\
.sum(2, True).sum(1, True).div(n).mul(0.7)
wt = w
for col in range(s[0]):
d_col = d[col,0,0,0]
wt_neg = w[col,:,:,:].lt(-1.0 * d_col).float().mul(-1)
wt_pos = w[col,:,:,:].gt(1.0 * d_col).float()
wt[col,:,:,:] = wt_pos.add(wt_neg)
self.target_modules[index].data = wt.mul(1)
else:
self.target_modules[index].data = Q(C(self.target_modules[index].data, bitsW), bitsW)
示例13: exp_deriv_WQR
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sign [as 别名]
def exp_deriv_WQR(x, kapa, gamma=2, init=0.25/2, size=5):
res = torch.zeros_like(x)
res -= kapa*(torch.sign(x)*torch.abs(x-(init)) + torch.abs(x)) *\
(x>0).float()*(x< ((init+init*gamma)/2) ).float()
res -= kapa*(torch.sign(x)*torch.abs(x+init) + -1*torch.abs(x)) *\
(x<0).float()*(x< ((-init-init*gamma)/2) ).float()
cur = init
for _ in range(size-1):
previous = cur
cur *=gamma
res -= kapa*(torch.sign(x)*torch.abs(x-cur) + torch.abs(x) ) *(x > (cur + previous) / 2).float()*(x < (cur+cur*gamma)/2).float()
res -= kapa*(torch.sign(x)*torch.abs(x+cur) + torch.abs(x) ) *(x < (-cur - previous) / 2).float()*(x > (-cur-cur*gamma)/2).float()
return res
示例14: _quantOpXnor
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sign [as 别名]
def _quantOpXnor(dim=1):
class _QuantXNOR(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
mean = torch.mean(input) if dim<0 else torch.mean(input, dim)
ctx.save_for_backward(input, mean)
if dim<0:
return torch.sign(input)*mean
else:
form_mean = {0 : (1,-1), 1 : (-1,1)}[dim]
return torch.sign(input)*mean.view(form_mean)
@staticmethod
def backward(ctx, grad_outputs):
input, mean = ctx.saved_tensors
sgn_input = torch.sign(input)
if dim<0:
return sgn_input*torch.mean(grad_outputs*sgn_input) + grad_outputs*mean
form_mean = {0 : (1,-1), 1 : (-1,1)}[dim]
return sgn_input*torch.mean( grad_outputs*sgn_input, dim ,keepdim=True) + grad_outputs*mean.view(form_mean).expand(input.size())
return _QuantXNOR
示例15: _quantOpXnor2d
# 需要导入模块: import torch [as 别名]
# 或者: from torch import sign [as 别名]
def _quantOpXnor2d(kernel_size, stride=1, padding=1, dilation=1, groups=1, form="NCHW"):
if not form in ["NHWC", "NCHW"]:
raise RuntimeError("Input form insupported ")
if type(kernel_size) !=int:
raise RuntimeError("Only int kernel_size supported (square kernel)")
class _QuantXNOR2d(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
input_mean_channel = torch.mean(input, 1, keepdim=True)
kernel = torch.ones(1, 1,kernel_size, kernel_size).to(input.device)
kernel.data.mul_(1/(kernel_size**2))
input_mean = torch.nn.functional.conv2d(input_mean_channel,kernel ,bias=False,stride=1, padding=1, dilation=1, groups=1)
input_mean.require_grad = False
ctx.save_for_backward(input, input_mean)
return torch.sign(input)*input_mean
@staticmethod
def backward(ctx, grad_outputs):
raise NotImplementedError("Conv XNor net not implemented !")