本文整理匯總了Python中torch.add方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.add方法的具體用法?Python torch.add怎麽用?Python torch.add使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.add方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _transform
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import add [as 別名]
def _transform(x, mat, maxmin):
rot = mat[:,0:3]
trans = mat[:,3:6]
x = x.contiguous().view(-1, x.size()[1] , x.size()[2] * x.size()[3])
max_val, min_val = maxmin[:,0], maxmin[:,1]
max_val, min_val = max_val.contiguous().view(-1,1), min_val.contiguous().view(-1,1)
max_val, min_val = max_val.repeat(1,3), min_val.repeat(1,3)
trans, rot = _trans_rot(trans, rot)
x1 = torch.matmul(rot,x)
min_val1 = torch.cat((min_val, Variable(min_val.data.new(min_val.size()[0], 1).fill_(1))), dim=-1)
min_val1 = min_val1.unsqueeze(-1)
min_val1 = torch.matmul(trans, min_val1)
min_val = torch.div( torch.add(torch.matmul(rot, min_val1).squeeze(-1), - min_val), torch.add(max_val, - min_val))
min_val = min_val.mul_(255)
x = torch.add(x1, min_val.unsqueeze(-1))
x = x.contiguous().view(-1,3, 224,224)
return x
開發者ID:microsoft,項目名稱:View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition,代碼行數:26,代碼來源:transform_cnn.py
示例2: get_loss
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import add [as 別名]
def get_loss(pred, y, criterion, mtr, a=0.5):
"""
To calculate loss
:param pred: predicted value
:param y: actual value
:param criterion: nn.CrossEntropyLoss
:param mtr: beta matrix
"""
mtr_t = torch.transpose(mtr, 1, 2)
aa = torch.bmm(mtr, mtr_t)
loss_fn = 0
for i in range(aa.size()[0]):
aai = torch.add(aa[i, ], Variable(torch.neg(torch.eye(mtr.size()[1]))))
loss_fn += torch.trace(torch.mul(aai, aai).data)
loss_fn /= aa.size()[0]
loss = torch.add(criterion(pred, y), Variable(torch.FloatTensor([loss_fn * a])))
return loss
示例3: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import add [as 別名]
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
if self.equalInOut:
out = self.relu2(self.bn2(self.conv1(out)))
else:
out = self.relu2(self.bn2(self.conv1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
if not self.equalInOut:
return torch.add(self.convShortcut(x), out)
else:
return torch.add(x, out)
示例4: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import add [as 別名]
def forward(self, x):
residual = x
if self.norm is not None:
out = self.bn(self.conv1(x))
else:
out = self.conv1(x)
if self.activation is not None:
out = self.act(out)
if self.norm is not None:
out = self.bn(self.conv2(out))
else:
out = self.conv2(out)
out = torch.add(out, residual)
if self.activation is not None:
out = self.act(out)
return out
示例5: test_train
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import add [as 別名]
def test_train(self):
self._metric.train()
calls = [[torch.FloatTensor([0.0]), torch.LongTensor([0])],
[torch.FloatTensor([0.0, 0.1, 0.2, 0.3]), torch.LongTensor([0, 1, 2, 3])]]
for i in range(len(self._states)):
self._metric.process(self._states[i])
self.assertEqual(2, len(self._metric_function.call_args_list))
for i in range(len(self._metric_function.call_args_list)):
self.assertTrue(torch.eq(self._metric_function.call_args_list[i][0][0], calls[i][0]).all)
self.assertTrue(torch.lt(torch.abs(torch.add(self._metric_function.call_args_list[i][0][1], -calls[i][1])), 1e-12).all)
self._metric_function.reset_mock()
self._metric.process_final({})
self.assertEqual(self._metric_function.call_count, 1)
self.assertTrue(torch.eq(self._metric_function.call_args_list[0][0][1], torch.LongTensor([0, 1, 2, 3, 4])).all)
self.assertTrue(torch.lt(torch.abs(torch.add(self._metric_function.call_args_list[0][0][0], -torch.FloatTensor([0.0, 0.1, 0.2, 0.3, 0.4]))), 1e-12).all)
示例6: inner_forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import add [as 別名]
def inner_forward(self, st_inp, first_dimension_size):
"""Implements the forward pass layers of the algorithm."""
x = self.bn0(st_inp) # 2d batch norm over feature dimension.
x = self.inp_drop(x) # [b, 1, 2*hidden_size_2, hidden_size_1]
x = self.conv2d_1(x) # [b, 32, 2*hidden_size_2-3+1, hidden_size_1-3+1]
x = self.bn1(x) # 2d batch normalization across feature dimension
x = torch.relu(x)
x = self.feat_drop(x)
x = x.view(first_dimension_size, -1) # flatten => [b, 32*(2*hidden_size_2-3+1)*(hidden_size_1-3+1)
x = self.fc(x) # dense layer => [b, k]
x = self.hidden_drop(x)
if self.training:
x = self.bn2(x) # batch normalization across the last axis
x = torch.relu(x)
x = torch.matmul(x, self.transpose(self.ent_embeddings.weight)) # [b, k] * [k, tot_ent] => [b, tot_ent]
x = torch.add(x, self.b.weight) # add a bias value
return torch.sigmoid(x) # sigmoid activation
示例7: _hook_tensor
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import add [as 別名]
def _hook_tensor(hook_self):
"""Hooks the function torch.tensor()
We need to do this seperately from hooking the class because internally
torch does not pick up the change to add the args
Args:
hook_self: the hook itself
"""
if "native_tensor" not in dir(hook_self.torch):
hook_self.torch.native_tensor = hook_self.torch.tensor
def new_tensor(*args, owner=None, id=None, register=True, **kwargs):
current_tensor = hook_self.torch.native_tensor(*args, **kwargs)
_apply_args(hook_self, current_tensor, owner, id)
if register:
current_tensor.owner.register_obj(current_tensor)
return current_tensor
hook_self.torch.tensor = new_tensor
示例8: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import add [as 別名]
def forward(self, x):
if self.filter_size > 0:
return self.layers(x) #image, conv, batchnorm, relu
else:
y = torch.add(x.unsqueeze(2), self.noise * self.level)
# (10, 3, 1, 32, 32) + (1, 3, 128, 32, 32) --> (10, 3, 128, 32, 32)
if self.debug:
print_values(x, self.noise, y, self.unique_masks)
y = y.view(-1, self.in_channels * self.nmasks, self.input_size, self.input_size)
y = self.layers(y)
if self.mix_maps:
y = self.mix_layers(y)
return y #image, perturb, (relu?), conv1x1, batchnorm, relu + mix_maps (conv1x1, batchnorm relu)
示例9: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import add [as 別名]
def forward(self, x):
self._reset_state()
x = self.sub_mean(x)
# uncomment for pytorch 0.4.0
# inter_res = self.upsample(x)
# comment for pytorch 0.4.0
inter_res = nn.functional.interpolate(x, scale_factor=self.upscale_factor, mode='bilinear', align_corners=False)
x = self.conv_in(x)
x = self.feat_in(x)
outs = []
for _ in range(self.num_steps):
h = self.block(x)
h = torch.add(inter_res, self.conv_out(self.out(h)))
h = self.add_mean(h)
outs.append(h)
return outs # return output of every timesteps
示例10: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import add [as 別名]
def forward(self, input_tensors):
assert len(input_tensors) == 3
aspect_i = input_tensors[2]
sentence = self.word_rep(input_tensors)
length = sentence.size()[0]
output, hidden = self.rnn_p(sentence)
hidden = hidden[0].view(1, -1)
output = output.view(output.size()[0], -1)
aspect_embedding = self.AE(aspect_i)
aspect_embedding = aspect_embedding.view(1, -1)
aspect_embedding = aspect_embedding.expand(length, -1)
M = F.tanh(torch.cat((self.W_h(output), self.W_v(aspect_embedding)), dim=1))
weights = self.attn_softmax(self.w(M)).t()
r = torch.matmul(weights, output)
r = F.tanh(torch.add(self.W_p(r), self.W_x(hidden)))
decoded = self.decoder_p(r)
output = decoded
return output
示例11: test_global_avg_pool_module
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import add [as 別名]
def test_global_avg_pool_module(self):
"""
Tests the global average pool module with fixed 4-d test tensors
"""
# construct basic input
base_tensor = torch.Tensor([[2, 1], [3, 0]])
all_init = []
for i in range(-2, 3):
all_init.append(torch.add(base_tensor, i))
init_tensor = torch.stack(all_init, dim=2)
init_tensor = init_tensor.unsqueeze(-1)
reference = base_tensor.unsqueeze(-1).unsqueeze(-1)
# create module
encr_module = crypten.nn.GlobalAveragePool().encrypt()
self.assertTrue(encr_module.encrypted, "module not encrypted")
# check correctness for a variety of input sizes
for i in range(1, 10):
input = init_tensor.repeat(1, 1, i, i)
encr_input = crypten.cryptensor(input)
encr_output = encr_module(encr_input)
self._check(encr_output, reference, "GlobalAveragePool failed")
示例12: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import add [as 別名]
def forward(self, input, ker_code):
B, C, H, W = input.size() # I_LR batch
B_h, C_h = ker_code.size() # Batch, Len=10
ker_code_exp = ker_code.view((B_h, C_h, 1, 1)).expand((B_h, C_h, H, W)) #kernel_map stretch
fea_bef = self.conv3(self.relu_conv2(self.conv2(self.relu_conv1(self.conv1(input)))))
fea_in = fea_bef
for i in range(self.num_blocks):
fea_in = self.__getattr__('SFT-residual' + str(i + 1))(fea_in, ker_code_exp)
fea_mid = fea_in
#fea_in = self.sft_branch((fea_in, ker_code_exp))
fea_add = torch.add(fea_mid, fea_bef)
fea = self.upscale(self.conv_mid(self.sft(fea_add, ker_code_exp)))
out = self.conv_output(fea)
return torch.clamp(out, min=self.min, max=self.max)
示例13: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import add [as 別名]
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
示例14: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import add [as 別名]
def forward(self,inputs):
out1 = inputs
out2 = inputs
out2 = self.skip(out2)
out = torch.add(out1,out2)
return out
示例15: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import add [as 別名]
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields, embedding_size)``
"""
embed_x_abs = torch.abs(x) # Computes the element-wise absolute value of the given input tensor.
embed_x_afn = torch.add(embed_x_abs, 1e-7)
# Logarithmic Transformation
embed_x_log = torch.log1p(embed_x_afn) # torch.log1p and torch.expm1
lnn_out = torch.matmul(self.weight, embed_x_log)
if self.bias is not None:
lnn_out += self.bias
lnn_exp = torch.expm1(lnn_out)
output = F.relu(lnn_exp).contiguous().view(-1, self.lnn_output_dim)
return output