本文整理汇总了Python中torch.add函数的典型用法代码示例。如果您正苦于以下问题:Python add函数的具体用法?Python add怎么用?Python add使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了add函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __call__
def __call__(self, image_batch, theta_aff, theta_aff_tps, use_cuda=True):
sampling_grid_aff = self.affTnf(image_batch=None,
theta_batch=theta_aff.view(-1,2,3),
return_sampling_grid=True,
return_warped_image=False)
sampling_grid_aff_tps = self.tpsTnf(image_batch=None,
theta_batch=theta_aff_tps,
return_sampling_grid=True,
return_warped_image=False)
if self.padding_crop_factor is not None:
sampling_grid_aff_tps = sampling_grid_aff_tps*self.padding_crop_factor;
# put 1e10 value in region out of bounds of sampling_grid_aff
in_bound_mask_aff = ((sampling_grid_aff[:,:,:,0]>-1) * (sampling_grid_aff[:,:,:,0]<1) * (sampling_grid_aff[:,:,:,1]>-1) * (sampling_grid_aff[:,:,:,1]<1)).unsqueeze(3)
in_bound_mask_aff = in_bound_mask_aff.expand_as(sampling_grid_aff)
sampling_grid_aff = torch.mul(in_bound_mask_aff.float(),sampling_grid_aff)
sampling_grid_aff = torch.add((in_bound_mask_aff.float()-1)*(1e10),sampling_grid_aff)
# compose transformations
sampling_grid_aff_tps_comp = F.grid_sample(sampling_grid_aff.transpose(2,3).transpose(1,2), sampling_grid_aff_tps).transpose(1,2).transpose(2,3)
# put 1e10 value in region out of bounds of sampling_grid_aff_tps_comp
in_bound_mask_aff_tps=((sampling_grid_aff_tps[:,:,:,0]>-1) * (sampling_grid_aff_tps[:,:,:,0]<1) * (sampling_grid_aff_tps[:,:,:,1]>-1) * (sampling_grid_aff_tps[:,:,:,1]<1)).unsqueeze(3)
in_bound_mask_aff_tps=in_bound_mask_aff_tps.expand_as(sampling_grid_aff_tps_comp)
sampling_grid_aff_tps_comp=torch.mul(in_bound_mask_aff_tps.float(),sampling_grid_aff_tps_comp)
sampling_grid_aff_tps_comp = torch.add((in_bound_mask_aff_tps.float()-1)*(1e10),sampling_grid_aff_tps_comp)
# sample transformed image
warped_image_batch = F.grid_sample(image_batch, sampling_grid_aff_tps_comp)
return warped_image_batch
示例2: forward
def forward(self, img, att_size=14):
x0 = self.conv(img)
x = self.pool_mil(x0)
x = x.squeeze(2).squeeze(2)
x = self.l1(x)
x1 = torch.add(torch.mul(x.view(x.size(0), 1000, -1), -1), 1)
cumprod = torch.cumprod(x1, 2)
out = torch.max(x, torch.add(torch.mul(cumprod[:, :, -1], -1), 1))
return out
示例3: forward
def forward(self, x):
x0 = self.conv.forward(x.float())
x = self.pool_mil(x0)
x = x.squeeze(2).squeeze(2)
x1 = torch.add(torch.mul(x0.view(x.size(0), 1000, -1), -1), 1)
cumprod = torch.cumprod(x1, 2)
out = torch.max(x, torch.add(torch.mul(cumprod[:, :, -1], -1), 1))
#out = F.softmax(out)
return out
示例4: match
def match(self, passage_encoders, question_encoders, wq_matrix, wp_matrix, fw = True):
'''
passage_encoders (pn_steps, batch, hidden_size)
question_encoders (qn_steps, batch, hidden_size)
wq_matrix (qn_steps, batch, hidden_size)
wp_matrix (pn_steps, batch, hidden_size)
'''
if fw:
match_lstm = self.fw_match_lstm
start = 0
end = passage_encoders.size(0)
stride = 1
else:
match_lstm = self.bw_match_lstm
start = passage_encoders.size(0) - 1
end = -1
stride = -1
hx = Variable(torch.zeros(passage_encoders.size(1), self.hidden_size)).cuda()
cx = Variable(torch.zeros(passage_encoders.size(1), self.hidden_size)).cuda()
match_encoders = [0 for i in range(passage_encoders.size(0))]
for i in range(start, end, stride):
wphp = wp_matrix[i]
wrhr = self.whr_net(hx)
_sum = torch.add(wphp, wrhr) # batch, hidden_size
_sum = _sum.expand(wq_matrix.size(0), wq_matrix.size(1), self.hidden_size) # qn_steps, batch, hidden_size
g = self.tanh(torch.add(wq_matrix, _sum)) # qn_steps, batch, hidden_size
g = torch.transpose(g, 0, 1)# batch, qn_steps, hidden_size
wg = self.w_net(g) # bactch, qn_steps, 1
wg = wg.squeeze(-1) # bactch, qn_steps
alpha = wg # bactch, qn_steps
alpha = self.softmax(alpha).view(alpha.size(0), 1, alpha.size(1)) # batch,1, qn_steps
attentionv = torch.bmm(alpha, question_encoders.transpose(0, 1)) # bacth, 1, hidden_size
attentionv = attentionv.squeeze(1) # bacth, hidden_size
inp = torch.cat([passage_encoders[i], attentionv], -1)
hx, cx = match_lstm(inp, (hx, cx)) # batch, hidden_size
match_encoders[i] = hx.view(1, hx.size(0), -1)
match_encoders = torch.cat(match_encoders)
return match_encoders
示例5: updateOutput
def updateOutput(self, input):
self.output.resize_(1)
assert input[0].dim() == 2
if self.diff is None:
self.diff = input[0].new()
torch.add(input[0], -1, input[1], out=self.diff).abs_()
self.output.resize_(input[0].size(0))
self.output.zero_()
self.output.add_(self.diff.pow_(self.norm).sum(1, keepdim=False))
self.output.pow_(1. / self.norm)
return self.output
示例6: test_train
def test_train(self):
self._metric.train()
calls = [[torch.FloatTensor([0.0]), torch.LongTensor([0])],
[torch.FloatTensor([0.0, 0.1, 0.2, 0.3]), torch.LongTensor([0, 1, 2, 3])]]
for i in range(len(self._states)):
self._metric.process(self._states[i])
self.assertEqual(2, len(self._metric_function.call_args_list))
for i in range(len(self._metric_function.call_args_list)):
self.assertTrue(torch.eq(self._metric_function.call_args_list[i][0][0], calls[i][0]).all)
self.assertTrue(torch.lt(torch.abs(torch.add(self._metric_function.call_args_list[i][0][1], -calls[i][1])), 1e-12).all)
self._metric_function.reset_mock()
self._metric.process_final({})
self._metric_function.assert_called_once()
self.assertTrue(torch.eq(self._metric_function.call_args_list[0][0][1], torch.LongTensor([0, 1, 2, 3, 4])).all)
self.assertTrue(torch.lt(torch.abs(torch.add(self._metric_function.call_args_list[0][0][0], -torch.FloatTensor([0.0, 0.1, 0.2, 0.3, 0.4]))), 1e-12).all)
示例7: unit_test
def unit_test(args):
''' test different (kinds of) predicate detectors '''
print("Torch uninitialized 5x3 matrix:")
x_t = torch.Tensor(5, 3)
print(x_t)
print("Torch randomly initialized 5x3 matrix X:")
x_t = torch.rand(5, 3)
if args.verbose:
print(x_t)
print("size:", x_t.size())
print("Torch randomly initialized 5x3 matrix Y:")
y_t = torch.rand(5, 3)
if args.verbose:
print(y_t)
print("X + Y:")
z_t = torch.add(x_t, y_t)
print(z_t)
print("slice (X + Y)[:, 1]:")
print(z_t[:, 1])
num_wrong = 0
print("unit_test: num_tests:", 1,
" num_wrong:", num_wrong, " -- ", "FAIL" if num_wrong else "PASS")
示例8: test_remote_var_binary_methods
def test_remote_var_binary_methods(self):
''' Unit tests for methods mentioned on issue 1385
https://github.com/OpenMined/PySyft/issues/1385'''
hook = TorchHook(verbose=False)
local = hook.local_worker
remote = VirtualWorker(hook, 1)
local.add_worker(remote)
x = Var(torch.FloatTensor([1, 2, 3, 4])).send(remote)
y = Var(torch.FloatTensor([[1, 2, 3, 4]])).send(remote)
z = torch.matmul(x, y.t())
assert (torch.equal(z.get(), Var(torch.FloatTensor([30]))))
z = torch.add(x, y)
assert (torch.equal(z.get(), Var(torch.FloatTensor([[2, 4, 6, 8]]))))
x = Var(torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])).send(remote)
y = Var(torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])).send(remote)
z = torch.cross(x, y, dim=1)
assert (torch.equal(z.get(), Var(torch.FloatTensor([[0, 0, 0], [0, 0, 0], [0, 0, 0]]))))
x = Var(torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])).send(remote)
y = Var(torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])).send(remote)
z = torch.dist(x, y)
assert (torch.equal(z.get(), Var(torch.FloatTensor([0.]))))
x = Var(torch.FloatTensor([1, 2, 3])).send(remote)
y = Var(torch.FloatTensor([1, 2, 3])).send(remote)
z = torch.dot(x, y)
print(torch.equal(z.get(), Var(torch.FloatTensor([14]))))
z = torch.eq(x, y)
assert (torch.equal(z.get(), Var(torch.ByteTensor([1, 1, 1]))))
z = torch.ge(x, y)
assert (torch.equal(z.get(), Var(torch.ByteTensor([1, 1, 1]))))
示例9: test_local_var_binary_methods
def test_local_var_binary_methods(self):
''' Unit tests for methods mentioned on issue 1385
https://github.com/OpenMined/PySyft/issues/1385'''
x = torch.FloatTensor([1, 2, 3, 4])
y = torch.FloatTensor([[1, 2, 3, 4]])
z = torch.matmul(x, y.t())
assert (torch.equal(z, torch.FloatTensor([30])))
z = torch.add(x, y)
assert (torch.equal(z, torch.FloatTensor([[2, 4, 6, 8]])))
x = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
y = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
z = torch.cross(x, y, dim=1)
assert (torch.equal(z, torch.FloatTensor([[0, 0, 0], [0, 0, 0], [0, 0, 0]])))
x = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
y = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
z = torch.dist(x, y)
t = torch.FloatTensor([z])
assert (torch.equal(t, torch.FloatTensor([0.])))
x = torch.FloatTensor([1, 2, 3])
y = torch.FloatTensor([1, 2, 3])
z = torch.dot(x, y)
t = torch.FloatTensor([z])
assert torch.equal(t, torch.FloatTensor([14]))
z = torch.eq(x, y)
assert (torch.equal(z, torch.ByteTensor([1, 1, 1])))
z = torch.ge(x, y)
assert (torch.equal(z, torch.ByteTensor([1, 1, 1])))
示例10: forward
def forward(self, x):
x = self.embed(x)
x = self.dropout(x)
# x = x.view(len(x), x.size(1), -1)
# x = embed.view(len(x), embed.size(1), -1)
bilstm_out, self.hidden = self.bilstm(x, self.hidden)
bilstm_out = torch.transpose(bilstm_out, 0, 1)
bilstm_out = torch.transpose(bilstm_out, 1, 2)
# bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2))
bilstm_out = bilstm_out.squeeze(2)
hidden2lable = self.hidden2label1(F.tanh(bilstm_out))
gate_layer = F.sigmoid(self.gate_layer(bilstm_out))
# calculate highway layer values
gate_hidden_layer = torch.mul(hidden2lable, gate_layer)
# if write like follow ,can run,but not equal the HighWay NetWorks formula
# gate_input = torch.mul((1 - gate_layer), hidden2lable)
gate_input = torch.mul((1 - gate_layer), bilstm_out)
highway_output = torch.add(gate_hidden_layer, gate_input)
logit = self.logit_layer(highway_output)
return logit
开发者ID:fengzhangyin,项目名称:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch,代码行数:26,代码来源:model_HighWay_BiLSTM_1.py
示例11: fade_in_layer
def fade_in_layer(self,x,alpha):
for l in self.layers:
x = l(x)
x_new = self.next_block(x)
x = self.toRGB(x)
x_new = self.new_toRGB(x_new)
return torch.add(x.mul(1.0-alpha),x_new.mul(alpha))
示例12: forward
def forward(self, context_ids, doc_ids, target_noise_ids):
"""Sparse computation of scores (unnormalized log probabilities)
that should be passed to the negative sampling loss.
Parameters
----------
context_ids: torch.Tensor of size (batch_size, num_context_words)
Vocabulary indices of context words.
doc_ids: torch.Tensor of size (batch_size,)
Document indices of paragraphs.
target_noise_ids: torch.Tensor of size (batch_size, num_noise_words + 1)
Vocabulary indices of target and noise words. The first element in
each row is the ground truth index (i.e. the target), other
elements are indices of samples from the noise distribution.
Returns
-------
autograd.Variable of size (batch_size, num_noise_words + 1)
"""
# combine a paragraph vector with word vectors of
# input (context) words
x = torch.add(
self._D[doc_ids, :], torch.sum(self._W[context_ids, :], dim=1))
# sparse computation of scores (unnormalized log probabilities)
# for negative sampling
return torch.bmm(
x.unsqueeze(1),
self._O[:, target_noise_ids].permute(1, 0, 2)).squeeze()
示例13: forward
def forward(self, title, pg):
r_gate = F.sigmoid(self.wrx(title) + self.wrh(pg))
i_gate = F.sigmoid(self.wix(title) + self.wih(pg))
n_gate = F.tanh(self.wnx(title) + torch.mul(r_gate, self.wnh(pg)))
result = torch.mul(i_gate, pg) + torch.mul(torch.add(-i_gate, 1), n_gate)
return result
示例14: get_loss
def get_loss(self, image_a_pred, image_b_pred, mask_a, mask_b):
loss = 0
# get the nonzero indices
mask_a_indices_flat = torch.nonzero(mask_a)
mask_b_indices_flat = torch.nonzero(mask_b)
if len(mask_a_indices_flat) == 0:
return Variable(torch.cuda.LongTensor([0]), requires_grad=True)
if len(mask_b_indices_flat) == 0:
return Variable(torch.cuda.LongTensor([0]), requires_grad=True)
# take 5000 random pixel samples of the object, using the mask
num_samples = 10000
rand_numbers_a = (torch.rand(num_samples)*len(mask_a_indices_flat)).cuda()
rand_indices_a = Variable(torch.floor(rand_numbers_a).type(torch.cuda.LongTensor), requires_grad=False)
randomized_mask_a_indices_flat = torch.index_select(mask_a_indices_flat, 0, rand_indices_a).squeeze(1)
rand_numbers_b = (torch.rand(num_samples)*len(mask_b_indices_flat)).cuda()
rand_indices_b = Variable(torch.floor(rand_numbers_b).type(torch.cuda.LongTensor), requires_grad=False)
randomized_mask_b_indices_flat = torch.index_select(mask_b_indices_flat, 0, rand_indices_b).squeeze(1)
# index into the image and get descriptors
M_margin = 0.5 # margin parameter
random_img_a_object_descriptors = torch.index_select(image_a_pred, 1, randomized_mask_a_indices_flat)
random_img_b_object_descriptors = torch.index_select(image_b_pred, 1, randomized_mask_b_indices_flat)
pixel_wise_loss = (random_img_a_object_descriptors - random_img_b_object_descriptors).pow(2).sum(dim=2)
pixel_wise_loss = torch.add(pixel_wise_loss, -2*M_margin)
zeros_vec = torch.zeros_like(pixel_wise_loss)
loss += torch.max(zeros_vec, pixel_wise_loss).sum()
return loss
示例15: forward
def forward(self, x):
if not self.equalInOut: x = self.relu1(self.bn1(x))
else: out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)