本文整理汇总了Python中torch.pow函数的典型用法代码示例。如果您正苦于以下问题:Python pow函数的具体用法?Python pow怎么用?Python pow使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pow函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: pairwise_distance
def pairwise_distance(x1, x2, p=2, eps=1e-6):
r"""
Computes the batchwise pairwise distance between vectors v1,v2:
.. math ::
\Vert x \Vert _p := \left( \sum_{i=1}^n \vert x_i \vert ^ p \right) ^ {1/p}
Args:
x1: first input tensor
x2: second input tensor
p: the norm degree. Default: 2
eps (float, optional): Small value to avoid division by zero. Default: 1e-6
Shape:
- Input: :math:`(N, D)` where `D = vector dimension`
- Output: :math:`(N, 1)`
Example::
>>> input1 = autograd.Variable(torch.randn(100, 128))
>>> input2 = autograd.Variable(torch.randn(100, 128))
>>> output = F.pairwise_distance(input1, input2, p=2)
>>> output.backward()
"""
assert x1.size() == x2.size(), "Input sizes must be equal."
assert x1.dim() == 2, "Input must be a 2D matrix."
diff = torch.abs(x1 - x2)
out = torch.pow(diff + eps, p).sum(dim=1, keepdim=True)
return torch.pow(out, 1. / p)
示例2: forward
def forward(self, x, labels):
"""
Args:
- x: feature matrix with shape (batch_size, feat_dim).
- labels: ground truth labels with shape (num_classes).
"""
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
distmat.addmm_(1, -2, x, self.centers.t())
classes = torch.arange(self.num_classes).long()
if self.use_gpu: classes = classes.cuda()
labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
mask = labels.eq(classes.expand(batch_size, self.num_classes))
dist = []
for i in range(batch_size):
value = distmat[i][mask[i]]
value = value.clamp(min=1e-12, max=1e+12) # for numerical stability
dist.append(value)
dist = torch.cat(dist)
loss = dist.mean()
return loss
示例3: forward
def forward(self, model_output, target, mask, attr):
pred_seq, pred_attr = model_output
# input (from model.forward()) (batch_size, max_seq_len, vocab_size)
# target (from dataloader->labels) (batch_size, max_seq_len)
# mask (from dataloader->masks) (batch_size, max_seq_len)
if not self.seen:
print('> in LanguageModelCriterion.forward(input, target, mask):')
print(' pred_seq', pred_seq.shape) # (200, 17, 3562)
print(' pred_attr', pred_attr.shape) # (200, 1000)
print(' target', target.shape) # (200, 17)
print(' mask', mask.shape) # (200, 17)
print(' attr', attr.shape) # (200, 1000)
self.seen = True
# truncate to the same size
target = target[:, :pred_seq.size(1)]
mask = mask[:, :pred_seq.size(1)]
pred_seq = to_contiguous(pred_seq).view(-1, pred_seq.size(2))
target = to_contiguous(target).view(-1, 1)
mask = to_contiguous(mask).view(-1, 1)
output = - pred_seq.gather(1, target) * mask
output = torch.sum(output) / torch.sum(mask)
bsize = pred_attr.size(0)
pred_attr = to_contiguous(pred_attr)
attr = to_contiguous(attr.float())
attr_loss = torch.pow(torch.sum(torch.pow((pred_attr - attr), 2)), 0.5) / bsize
output = output + self.attr_weight * attr_loss
return output
示例4: model
def model():
mu_latent = pyro.sample("mu_latent", dist.normal,
self.mu0, torch.pow(self.tau0, -0.5))
sigma = torch.pow(self.tau, -0.5)
pyro.observe("obs0", dist.lognormal, self.data[0], mu_latent, sigma)
pyro.observe("obs1", dist.lognormal, self.data[1], mu_latent, sigma)
return mu_latent
示例5: updateOutput
def updateOutput(self, input):
assert input.dim() == 4
if self.scale is None:
self.scale = input.new()
if input.type() == 'torch.cuda.FloatTensor':
self._backend.SpatialCrossMapLRN_updateOutput(
self._backend.library_state,
input,
self.output,
self.scale,
self.size,
self.alpha,
self.beta,
self.k
)
else:
batchSize = input.size(0)
channels = input.size(1)
inputHeight = input.size(2)
inputWidth = input.size(3)
self.output.resize_as_(input)
self.scale.resize_as_(input)
# use output storage as temporary buffer
inputSquare = self.output
torch.pow(input, 2, out=inputSquare)
prePad = int((self.size - 1) / 2 + 1)
prePadCrop = channels if prePad > channels else prePad
scaleFirst = self.scale.select(1, 0)
scaleFirst.zero_()
# compute first feature map normalization
for c in range(prePadCrop):
scaleFirst.add_(inputSquare.select(1, c))
# reuse computations for next feature maps normalization
# by adding the next feature map and removing the previous
for c in range(1, channels):
scalePrevious = self.scale.select(1, c - 1)
scaleCurrent = self.scale.select(1, c)
scaleCurrent.copy_(scalePrevious)
if c < channels - prePad + 1:
squareNext = inputSquare.select(1, c + prePad - 1)
scaleCurrent.add_(1, squareNext)
if c > prePad:
squarePrevious = inputSquare.select(1, c - prePad)
scaleCurrent.add_(-1, squarePrevious)
self.scale.mul_(self.alpha / self.size).add_(self.k)
torch.pow(self.scale, -self.beta, out=self.output)
self.output.mul_(input)
return self.output
示例6: model
def model():
mu_latent = pyro.sample("mu_latent", dist.normal,
self.mu0, torch.pow(self.lam0, -0.5))
pyro.map_data("aaa", self.data, lambda i,
x: pyro.observe(
"obs_%d" % i, dist.normal,
x, mu_latent, torch.pow(self.lam, -0.5)),
batch_size=self.batch_size)
return mu_latent
示例7: mean_dist
def mean_dist(source_points,warped_points,L_pck):
# compute precentage of correct keypoints
batch_size=source_points.size(0)
dist=torch.zeros((batch_size))
for i in range(batch_size):
p_src = source_points[i,:]
p_wrp = warped_points[i,:]
N_pts = torch.sum(torch.ne(p_src[0,:],-1)*torch.ne(p_src[1,:],-1))
point_distance = torch.pow(torch.sum(torch.pow(p_src[:,:N_pts]-p_wrp[:,:N_pts],2),0),0.5)
L_pck_mat = L_pck[i].expand_as(point_distance)
dist[i]=torch.mean(torch.div(point_distance,L_pck_mat))
return dist
示例8: model
def model(*args, **kwargs):
next_mean = self.mu0
for k in range(1, self.N + 1):
latent_dist = dist.Normal(next_mean, torch.pow(self.lambdas[k - 1], -0.5))
mu_latent = pyro.sample("mu_latent_%d" % k, latent_dist)
next_mean = mu_latent
mu_N = next_mean
for i, x in enumerate(self.data):
pyro.observe("obs_%d" % i, dist.normal, x, mu_N,
torch.pow(self.lambdas[self.N], -0.5))
return mu_N
示例9: model
def model(self, reparameterized, difficulty=0.0):
next_mean = self.loc0
for k in range(1, self.N + 1):
latent_dist = dist.Normal(next_mean, torch.pow(self.lambdas[k - 1], -0.5))
loc_latent = pyro.sample("loc_latent_%d" % k, latent_dist)
next_mean = loc_latent
loc_N = next_mean
with pyro.iarange("data", self.data.size(0)):
pyro.sample("obs", dist.Normal(loc_N.expand_as(self.data),
torch.pow(self.lambdas[self.N], -0.5).expand_as(self.data)), obs=self.data)
return loc_N
示例10: log_norm
def log_norm(x, mu, std):
"""Compute the log pdf of x,
under a normal distribution with mean mu and standard deviation std."""
# print ("X device: ", x.device)
# print ("mu device: ", mu.device)
# print ("std device: ", std.device)
x = x.view(-1)
mu = mu.view(-1)
std = std.view(-1)
return -0.5 * torch.log(2*np.pi*torch.pow(std,2)) \
- 0.5 * (1/torch.pow(std,2))* torch.pow( (x-mu),2)
示例11: pck
def pck(source_points,warped_points,L_pck,alpha=0.1):
# compute precentage of correct keypoints
batch_size=source_points.size(0)
pck=torch.zeros((batch_size))
for i in range(batch_size):
p_src = source_points[i,:]
p_wrp = warped_points[i,:]
N_pts = torch.sum(torch.ne(p_src[0,:],-1)*torch.ne(p_src[1,:],-1))
point_distance = torch.pow(torch.sum(torch.pow(p_src[:,:N_pts]-p_wrp[:,:N_pts],2),0),0.5)
L_pck_mat = L_pck[i].expand_as(point_distance)
correct_points = torch.le(point_distance,L_pck_mat*alpha)
pck[i]=torch.mean(correct_points.float())
return pck
示例12: euclidean_dist
def euclidean_dist(x, y):
"""
Args:
x: pytorch Variable, with shape [m, d]
y: pytorch Variable, with shape [n, d]
Returns:
dist: pytorch Variable, with shape [m, n]
"""
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
return dist
示例13: backward
def backward(self, grad_output):
input, output = self.saved_tensors
grad_input = grad_output.new()
if self._backend is not None:
self._backend.SpatialCrossMapLRN_updateGradInput(
self._backend.library_state,
input,
grad_output,
grad_input,
self.scale,
output,
self.size,
self.alpha,
self.beta,
self.k
)
else:
batch_size = input.size(0)
channels = input.size(1)
input_height = input.size(2)
input_width = input.size(3)
paddded_ratio = input.new(channels + self.size - 1, input_height,
input_width)
accum_ratio = input.new(input_height, input_width)
cache_ratio_value = 2 * self.alpha * self.beta / self.size
inversePrePad = int(self.size - (self.size - 1) / 2)
grad_input.resize_as_(input)
torch.pow(self.scale, -self.beta, out=grad_input).mul_(grad_output)
paddded_ratio.zero_()
padded_ratio_center = paddded_ratio.narrow(0, inversePrePad,
channels)
for n in range(batch_size):
torch.mul(grad_output[n], output[n], out=padded_ratio_center)
padded_ratio_center.div_(self.scale[n])
torch.sum(
paddded_ratio.narrow(0, 0, self.size - 1), 0, keepdim=False, out=accum_ratio)
for c in range(channels):
accum_ratio.add_(paddded_ratio[c + self.size - 1])
grad_input[n][c].addcmul_(-cache_ratio_value, input[n][c],
accum_ratio)
accum_ratio.add_(-1, paddded_ratio[c])
return grad_input
示例14: test_save_and_load
def test_save_and_load(self):
lin = pyro.module("mymodule", self.linear_module)
pyro.module("mymodule2", self.linear_module2)
x = torch.randn(1, 3)
myparam = pyro.param("myparam", torch.tensor(1.234 * torch.ones(1), requires_grad=True))
cost = torch.sum(torch.pow(lin(x), 2.0)) * torch.pow(myparam, 4.0)
cost.backward()
params = list(self.linear_module.parameters()) + [myparam]
optim = torch.optim.Adam(params, lr=.01)
myparam_copy_stale = copy(pyro.param("myparam").detach().cpu().numpy())
optim.step()
myparam_copy = copy(pyro.param("myparam").detach().cpu().numpy())
param_store_params = copy(pyro.get_param_store()._params)
param_store_param_to_name = copy(pyro.get_param_store()._param_to_name)
assert len(list(param_store_params.keys())) == 5
assert len(list(param_store_param_to_name.values())) == 5
pyro.get_param_store().save('paramstore.unittest.out')
pyro.clear_param_store()
assert len(list(pyro.get_param_store()._params)) == 0
assert len(list(pyro.get_param_store()._param_to_name)) == 0
pyro.get_param_store().load('paramstore.unittest.out')
def modules_are_equal():
weights_equal = np.sum(np.fabs(self.linear_module3.weight.detach().cpu().numpy() -
self.linear_module.weight.detach().cpu().numpy())) == 0.0
bias_equal = np.sum(np.fabs(self.linear_module3.bias.detach().cpu().numpy() -
self.linear_module.bias.detach().cpu().numpy())) == 0.0
return (weights_equal and bias_equal)
assert not modules_are_equal()
pyro.module("mymodule", self.linear_module3, update_module_params=False)
assert id(self.linear_module3.weight) != id(pyro.param('mymodule$$$weight'))
assert not modules_are_equal()
pyro.module("mymodule", self.linear_module3, update_module_params=True)
assert id(self.linear_module3.weight) == id(pyro.param('mymodule$$$weight'))
assert modules_are_equal()
myparam = pyro.param("myparam")
store = pyro.get_param_store()
assert myparam_copy_stale != myparam.detach().cpu().numpy()
assert myparam_copy == myparam.detach().cpu().numpy()
assert sorted(param_store_params.keys()) == sorted(store._params.keys())
assert sorted(param_store_param_to_name.values()) == sorted(store._param_to_name.values())
assert sorted(store._params.keys()) == sorted(store._param_to_name.values())
示例15: forward
def forward(self, input, label):
# --------------------------- cos(theta) & phi(theta) ---------------------------
if self.device_id == None:
cosine = F.linear(F.normalize(input), F.normalize(self.weight))
else:
x = input
sub_weights = torch.chunk(self.weight, len(self.device_id), dim=0)
temp_x = x.cuda(self.device_id[0])
weight = sub_weights[0].cuda(self.device_id[0])
cosine = F.linear(F.normalize(temp_x), F.normalize(weight))
for i in range(1, len(self.device_id)):
temp_x = x.cuda(self.device_id[i])
weight = sub_weights[i].cuda(self.device_id[i])
cosine = torch.cat((cosine, F.linear(F.normalize(temp_x), F.normalize(weight)).cuda(self.device_id[0])), dim=1)
sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
phi = cosine * self.cos_m - sine * self.sin_m
if self.easy_margin:
phi = torch.where(cosine > 0, phi, cosine)
else:
phi = torch.where(cosine > self.th, phi, cosine - self.mm)
# --------------------------- convert label to one-hot ---------------------------
one_hot = torch.zeros(cosine.size())
if self.device_id != None:
one_hot = one_hot.cuda(self.device_id[0])
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
# -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
output = (one_hot * phi) + ((1.0 - one_hot) * cosine) # you can use torch.where if your torch.__version__ is 0.4
output *= self.s
return output