本文整理汇总了Python中torch.mul方法的典型用法代码示例。如果您正苦于以下问题:Python torch.mul方法的具体用法?Python torch.mul怎么用?Python torch.mul使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.mul方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: node_forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mul [as 别名]
def node_forward(self, inputs, child_c, child_h):
child_h_sum = torch.sum(child_h, dim=0, keepdim=True)
iou = self.ioux(inputs) + self.iouh(child_h_sum)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = F.sigmoid(i), F.sigmoid(o), F.tanh(u)
f = F.sigmoid(
self.fh(child_h) +
self.fx(inputs).repeat(len(child_h), 1)
)
fc = torch.mul(f, child_c)
c = torch.mul(i, u) + torch.sum(fc, dim=0, keepdim=True)
h = torch.mul(o, F.tanh(c))
return c, h
示例2: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mul [as 别名]
def forward(self, input1):
self.batchgrid3d = torch.zeros(torch.Size([input1.size(0)]) + self.grid3d.size())
for i in range(input1.size(0)):
self.batchgrid3d[i] = self.grid3d
self.batchgrid3d = Variable(self.batchgrid3d)
#print(self.batchgrid3d)
x = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,0:4]), 3)
y = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,4:8]), 3)
z = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,8:]), 3)
#print(x)
r = torch.sqrt(x**2 + y**2 + z**2) + 1e-5
#print(r)
theta = torch.acos(z/r)/(np.pi/2) - 1
#phi = torch.atan(y/x)
phi = torch.atan(y/(x + 1e-5)) + np.pi * x.lt(0).type(torch.FloatTensor) * (y.ge(0).type(torch.FloatTensor) - y.lt(0).type(torch.FloatTensor))
phi = phi/np.pi
output = torch.cat([theta,phi], 3)
return output
示例3: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mul [as 别名]
def forward(self, inputs):
if len(inputs.shape) != 3:
raise ValueError(
"Unexpected inputs dimensions %d, expect to be 3 dimensions" % (len(inputs.shape)))
inputs = torch.split(inputs, 1, dim=1)
if self.bilinear_type == "all":
p = [torch.mul(self.bilinear(v_i), v_j)
for v_i, v_j in itertools.combinations(inputs, 2)]
elif self.bilinear_type == "each":
p = [torch.mul(self.bilinear[i](inputs[i]), inputs[j])
for i, j in itertools.combinations(range(len(inputs)), 2)]
elif self.bilinear_type == "interaction":
p = [torch.mul(bilinear(v[0]), v[1])
for v, bilinear in zip(itertools.combinations(inputs, 2), self.bilinear)]
else:
raise NotImplementedError
return torch.cat(p, dim=1)
示例4: compute_L_inverse
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mul [as 别名]
def compute_L_inverse(self,X,Y):
N = X.size()[0] # num of points (along dim 0)
# construct matrix K
Xmat = X.expand(N,N)
Ymat = Y.expand(N,N)
P_dist_squared = torch.pow(Xmat-Xmat.transpose(0,1),2)+torch.pow(Ymat-Ymat.transpose(0,1),2)
P_dist_squared[P_dist_squared==0]=1 # make diagonal 1 to avoid NaN in log computation
K = torch.mul(P_dist_squared,torch.log(P_dist_squared))
if self.reg_factor != 0:
K+=torch.eye(K.size(0),K.size(1))*self.reg_factor
# construct matrix L
O = torch.FloatTensor(N,1).fill_(1)
Z = torch.FloatTensor(3,3).fill_(0)
P = torch.cat((O,X,Y),1)
L = torch.cat((torch.cat((K,P),1),torch.cat((P.transpose(0,1),Z),1)),0)
Li = torch.inverse(L)
if self.use_cuda:
Li = Li.cuda()
return Li
示例5: generate
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mul [as 别名]
def generate(self, target_layer):
fmaps = self._find(self.fmap_pool, target_layer)
grads = self._find(self.grad_pool, target_layer)
weights = F.adaptive_avg_pool2d(grads, 1)
gcam = torch.mul(fmaps, weights).sum(dim=1, keepdim=True)
gcam = F.relu(gcam)
gcam = F.interpolate(
gcam, self.image_shape, mode="bilinear", align_corners=False
)
B, C, H, W = gcam.shape
gcam = gcam.view(B, -1)
gcam -= gcam.min(dim=1, keepdim=True)[0]
gcam /= gcam.max(dim=1, keepdim=True)[0]
gcam = gcam.view(B, C, H, W)
return gcam
示例6: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mul [as 别名]
def forward(self, x, hc=None):
if hc is None:
hc = (self.init_hidden(x), self.init_hidden(x))
h, c = hc
gate_x = self.fc_xh(x)
gate_h = self.fc_hh(h)
x_i, x_f, x_c, x_o = gate_x.chunk(self.num_chunks, 1)
h_i, h_f, h_c, h_o = gate_h.chunk(self.num_chunks, 1)
inputgate = torch.sigmoid(x_i + h_i)
forgetgate = torch.sigmoid(x_f + h_f)
cellgate = torch.tanh(x_c + h_c)
outputgate = torch.sigmoid(x_o + h_o)
c_ = torch.mul(forgetgate, c) + torch.mul(inputgate, cellgate)
h_ = torch.mul(outputgate, torch.tanh(c_))
return h_, c_
示例7: _private_mul
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mul [as 别名]
def _private_mul(self, other, equation: str):
"""Abstractly Multiplies two tensors
Args:
self: an AdditiveSharingTensor
other: another AdditiveSharingTensor
equation: a string representation of the equation to be computed in einstein
summation form
"""
# check to see that operation is either mul or matmul
assert equation == "mul" or equation == "matmul"
cmd = getattr(torch, equation)
assert isinstance(other, AdditiveSharingTensor)
assert len(self.child) == len(other.child)
if self.crypto_provider is None:
raise AttributeError("For multiplication a crypto_provider must be passed.")
shares = spdz.spdz_mul(cmd, self, other, self.crypto_provider, self.field, self.dtype)
return shares
示例8: pow
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mul [as 别名]
def pow(self, power):
"""
Compute integer power of a number by recursion using mul
This uses the following trick:
- Divide power by 2 and multiply base to itself (if the power is even)
- Decrement power by 1 to make it even and then follow the first step
"""
base = self
result = 1
while power > 0:
# If power is odd
if power % 2 == 1:
result = result * base
# Divide the power by 2
power = power // 2
# Multiply base to itself
base = base * base
return result
示例9: weighted_cross_entropy_loss
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mul [as 别名]
def weighted_cross_entropy_loss(prediction, label, output_mask=False):
criterion = torch.nn.CrossEntropyLoss(reduce=False)
label = torch.squeeze(label.long(), dim=0)
nch = prediction.shape[1]
label[label >= nch] = 0
cost = criterion(prediction, label)
mask = (label != 0).float()
num_positive = torch.sum(mask).float()
num_negative = mask.numel() - num_positive
mask[mask == 1] = num_negative / (num_positive + num_negative)
mask[mask == 0] = num_positive / (num_positive + num_negative)
cost = torch.mul(cost, mask)
if output_mask:
return torch.sum(cost), (label != 0)
else:
return torch.sum(cost)
示例10: compute_L_inverse
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mul [as 别名]
def compute_L_inverse(self,X,Y):
N = X.size()[0] # num of points (along dim 0)
# construct matrix K
Xmat = X.expand(N,N)
Ymat = Y.expand(N,N)
P_dist_squared = torch.pow(Xmat-Xmat.transpose(0,1),2)+torch.pow(Ymat-Ymat.transpose(0,1),2)
P_dist_squared[P_dist_squared==0]=1 # make diagonal 1 to avoid NaN in log computation
K = torch.mul(P_dist_squared,torch.log(P_dist_squared))
# construct matrix L
O = torch.FloatTensor(N,1).fill_(1)
Z = torch.FloatTensor(3,3).fill_(0)
P = torch.cat((O,X,Y),1)
L = torch.cat((torch.cat((K,P),1),torch.cat((P.transpose(0,1),Z),1)),0)
Li = torch.inverse(L)
if self.use_cuda:
Li = Li.cuda()
return Li
示例11: build_bow_rep
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mul [as 别名]
def build_bow_rep(self, lat_code, n_sample):
batch_sz = lat_code.size()[0]
tup = self.estimate_param(latent_code=lat_code)
mean = tup['mean']
logvar = tup['logvar']
kld = self.compute_KLD(tup)
if n_sample == 1:
eps = self.sample_cell(batch_size=batch_sz)
vec = torch.mul(torch.exp(logvar), eps) + mean
return tup, kld, vec
vecs = []
for ns in range(n_sample):
eps = self.sample_cell(batch_size=batch_sz)
vec = torch.mul(torch.exp(logvar), eps) + mean
vecs.append(vec)
vecs = torch.cat(vecs, dim=0)
return tup, kld, vecs
示例12: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mul [as 别名]
def forward(self, lvec, rvec):
mult_dist = torch.mul(lvec, rvec)
abs_dist = torch.abs(torch.add(lvec, -rvec))
vec_dist = torch.cat((mult_dist, abs_dist), 1)
out = F.sigmoid(self.wh(vec_dist))
out = F.log_softmax(self.wp(out), dim=1)
return out
# putting the whole model together
示例13: _signed_sqrt
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mul [as 别名]
def _signed_sqrt(self, x):
x = torch.mul(x.sign(), torch.sqrt(x.abs()+self.thresh))
return x
示例14: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mul [as 别名]
def forward(self, x):
bsn = 1
batchSize, dim, h, w = x.data.shape
x_flat = x.permute(0, 2, 3, 1).contiguous().view(-1, dim) # batchsize,h, w, dim,
y = torch.ones(batchSize, self.output_dim, device=x.device)
for img in range(batchSize // bsn):
segLen = bsn * h * w
upper = batchSize * h * w
interLarge = torch.arange(img * segLen, min(upper, (img + 1) * segLen), dtype=torch.long)
interSmall = torch.arange(img * bsn, min(upper, (img + 1) * bsn), dtype=torch.long)
batch_x = x_flat[interLarge, :]
sketch1 = batch_x.mm(self.sparseM[0].to(x.device)).unsqueeze(2)
sketch1 = torch.fft(torch.cat((sketch1, torch.zeros(sketch1.size(), device=x.device)), dim=2), 1)
sketch2 = batch_x.mm(self.sparseM[1].to(x.device)).unsqueeze(2)
sketch2 = torch.fft(torch.cat((sketch2, torch.zeros(sketch2.size(), device=x.device)), dim=2), 1)
Re = sketch1[:, :, 0].mul(sketch2[:, :, 0]) - sketch1[:, :, 1].mul(sketch2[:, :, 1])
Im = sketch1[:, :, 0].mul(sketch2[:, :, 1]) + sketch1[:, :, 1].mul(sketch2[:, :, 0])
tmp_y = torch.ifft(torch.cat((Re.unsqueeze(2), Im.unsqueeze(2)), dim=2), 1)[:, :, 0]
y[interSmall, :] = tmp_y.view(torch.numel(interSmall), h, w, self.output_dim).sum(dim=1).sum(dim=1)
y = self._signed_sqrt(y)
y = self._l2norm(y)
return y
示例15: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mul [as 别名]
def forward(self, x, lengths=None):
# x(batch, sentence, embed_dim)
score = self.non_linear(self.fc(x))
score = F.softmax(score, dim=-1)
weights = torch.mul(x, score.unsqueeze(-1).expand_as(x))