本文整理匯總了Python中torch.lt方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.lt方法的具體用法?Python torch.lt怎麽用?Python torch.lt使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.lt方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_train
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import lt [as 別名]
def test_train(self):
self._metric.train()
calls = [[torch.FloatTensor([0.0]), torch.LongTensor([0])],
[torch.FloatTensor([0.0, 0.1, 0.2, 0.3]), torch.LongTensor([0, 1, 2, 3])]]
for i in range(len(self._states)):
self._metric.process(self._states[i])
self.assertEqual(2, len(self._metric_function.call_args_list))
for i in range(len(self._metric_function.call_args_list)):
self.assertTrue(torch.eq(self._metric_function.call_args_list[i][0][0], calls[i][0]).all)
self.assertTrue(torch.lt(torch.abs(torch.add(self._metric_function.call_args_list[i][0][1], -calls[i][1])), 1e-12).all)
self._metric_function.reset_mock()
self._metric.process_final({})
self.assertEqual(self._metric_function.call_count, 1)
self.assertTrue(torch.eq(self._metric_function.call_args_list[0][0][1], torch.LongTensor([0, 1, 2, 3, 4])).all)
self.assertTrue(torch.lt(torch.abs(torch.add(self._metric_function.call_args_list[0][0][0], -torch.FloatTensor([0.0, 0.1, 0.2, 0.3, 0.4]))), 1e-12).all)
示例2: regenerate_cache
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import lt [as 別名]
def regenerate_cache(self):
"""
Resamples the big matrix and resets the counter of the total
number of elements in the returned masks.
"""
low_size = int(self.resolution * self.max_size)
low_pattern = self.rng.uniform(0, 1, size=(low_size, low_size)) * 255
low_pattern = torch.from_numpy(low_pattern.astype('float32'))
pattern = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(self.max_size, Image.BICUBIC),
transforms.ToTensor(),
])(low_pattern[None])[0]
pattern = torch.lt(pattern, self.density).byte()
self.pattern = pattern.byte()
self.points_used = 0
示例3: test_terner_connect_sto_forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import lt [as 別名]
def test_terner_connect_sto_forward():
x = torch.Tensor([1,0,0.45,-1,-0.9]).view(1,-1)
results = list()
for i in range(1000):
temp_result = TernaryConnectStochastic.apply(x)
# Tensor must have only -1 , 0 , 1 values
assert not torch.any(torch.lt(torch.abs(temp_result-1),1e-8)*torch.lt(torch.abs(temp_result),1e-8))
results.append(temp_result)
result = torch.cat(results,0 )
result = torch.sum(result, 0)/1000
assert equals(
result,
torch.Tensor([1,0,0.45,-1,-0.9]).view(1,-1),
5e-2)
示例4: intersectionAndUnion
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import lt [as 別名]
def intersectionAndUnion(batch_data, pred, numClass):
(imgs, segs, infos) = batch_data
_, preds = torch.max(pred.data.cpu(), dim=1)
# compute area intersection
intersect = preds.clone()
intersect[torch.ne(preds, segs)] = -1
area_intersect = torch.histc(intersect.float(),
bins=numClass,
min=0,
max=numClass - 1)
# compute area union:
preds[torch.lt(segs, 0)] = -1
area_pred = torch.histc(preds.float(),
bins=numClass,
min=0,
max=numClass - 1)
area_lab = torch.histc(segs.float(),
bins=numClass,
min=0,
max=numClass - 1)
area_union = area_pred + area_lab - area_intersect
return area_intersect, area_union
示例5: _jit_linear_cg_updates
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import lt [as 別名]
def _jit_linear_cg_updates(
result, alpha, residual_inner_prod, eps, beta, residual, precond_residual, mul_storage, is_zero, curr_conjugate_vec
):
# # Update result
# # result_{k} = result_{k-1} + alpha_{k} p_vec_{k-1}
result = torch.addcmul(result, alpha, curr_conjugate_vec, out=result)
# beta_{k} = (precon_residual{k}^T r_vec_{k}) / (precon_residual{k-1}^T r_vec_{k-1})
beta.resize_as_(residual_inner_prod).copy_(residual_inner_prod)
torch.mul(residual, precond_residual, out=mul_storage)
torch.sum(mul_storage, -2, keepdim=True, out=residual_inner_prod)
# Do a safe division here
torch.lt(beta, eps, out=is_zero)
beta.masked_fill_(is_zero, 1)
torch.div(residual_inner_prod, beta, out=beta)
beta.masked_fill_(is_zero, 0)
# Update curr_conjugate_vec
# curr_conjugate_vec_{k} = precon_residual{k} + beta_{k} curr_conjugate_vec_{k-1}
curr_conjugate_vec.mul_(beta).add_(precond_residual)
示例6: check_monitor_top_k
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import lt [as 別名]
def check_monitor_top_k(self, current):
less_than_k_models = len(self.best_k_models) < self.save_top_k
if less_than_k_models:
return True
if not isinstance(current, torch.Tensor):
rank_zero_warn(
f'{current} is supposed to be a `torch.Tensor`. Saving checkpoint may not work correctly.'
f' HINT: check the value of {self.monitor} in your validation loop', RuntimeWarning
)
current = torch.tensor(current)
monitor_op = {
"min": torch.lt,
"max": torch.gt,
}[self.mode]
return monitor_op(current, self.best_k_models[self.kth_best_model_path])
示例7: reward_ddpg_A
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import lt [as 別名]
def reward_ddpg_A(solution, use_cuda):
"""
Count number of consecutively correctly sorted for each sample in minibatch
starting from beginning
Very hard to randomly find enough non-zero samples - reward is very sparse!
solution is FloatTensor of dim [batch,n]
"""
(batch_size, n, m) = solution.size()
n_correctly_sorted = Variable(torch.zeros(batch_size, 1), requires_grad=False)
mask = Variable(torch.ones(batch_size, 1).byte(), requires_grad=False)
if use_cuda:
n_correctly_sorted = n_correctly_sorted.cuda()
mask = mask.cuda()
for i in range(1, m):
res = torch.lt(solution[:,:,i-1], solution[:,:,i])
mask.data &= res.data
n_correctly_sorted[mask] += 1
return torch.div(n_correctly_sorted, m - 1)
示例8: reward_ddpg_B
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import lt [as 別名]
def reward_ddpg_B(solution, use_cuda):
"""
Count number of (nonconsecutively) correctly sorted starting from beginning
Tends to converge to [0,2,4,6,8,1,3,5,7,9]
solution is FloatTensor of dim [batch,n]
"""
(batch_size, n, m) = solution.size()
n_correctly_sorted = Variable(torch.zeros(batch_size, 1), requires_grad=False)
if use_cuda:
n_correctly_sorted = n_correctly_sorted.cuda()
for i in range(1, m):
res = torch.lt(solution[:,:,i-1], solution[:,:,i])
n_correctly_sorted += res.float()
return torch.div(n_correctly_sorted, m - 1)
示例9: get_morph
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import lt [as 別名]
def get_morph(batch):
#Not very nice but we do not have access to value comming from opt.gpuid command line parameter here.
use_cuda = batch.src[0].is_cuda
# morph_index = batch.morph.data.transpose(0, 1) # [ seqLen x batch_size ] ==> [ batch_size x seqLen ]
# morph_voc = batch.dataset.fields['morph'].vocab.stoi
morph_index = batch.morph.view((batch.src[0].data.size()[0], 6, batch.src[0].data.size()[1]))
morph_index = morph_index.permute(2, 0, 1).contiguous()
# morph_index = torch.LongTensor(morph_index)
morph_mask = torch.lt(torch.eq(morph_index, 1), 1).float()
# morph_index = autograd.Variable(morph_index)
# morph_mask = autograd.Variable(torch.FloatTensor(morph_mask), requires_grad=False)
if use_cuda:
morph_index = morph_index.cuda()
morph_mask = morph_mask.cuda()
return morph_index, morph_mask
示例10: wrapper_gmask
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import lt [as 別名]
def wrapper_gmask(opt):
# batchsize should be 1 for mask_global
mask_global = torch.ByteTensor(1, 1, \
opt.fineSize, opt.fineSize)
res = 0.06 # the lower it is, the more continuous the output will be. 0.01 is too small and 0.1 is too large
density = 0.25
MAX_SIZE = 350
maxPartition = 30
low_pattern = torch.rand(1, 1, int(res * MAX_SIZE), int(res * MAX_SIZE)).mul(255)
pattern = F.interpolate(low_pattern, (MAX_SIZE, MAX_SIZE), mode='bilinear').detach()
low_pattern = None
pattern.div_(255)
pattern = torch.lt(pattern, density).byte() # 25% 1s and 75% 0s
pattern = torch.squeeze(pattern).byte()
gMask_opts = {}
gMask_opts['pattern'] = pattern
gMask_opts['MAX_SIZE'] = MAX_SIZE
gMask_opts['fineSize'] = opt.fineSize
gMask_opts['maxPartition'] = maxPartition
gMask_opts['mask_global'] = mask_global
return create_gMask(gMask_opts) # create an initial random mask.
示例11: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import lt [as 別名]
def forward(self, words, frequent_tuning=False):
if frequent_tuning and self.training:
padding_mask = words.eq(0).long()
# Fine-tuning - N the most frequent
fine_tune_mask = torch.lt(words, self.threshold_index) * padding_mask.eq(
0
) # < threshold_index
fine_tune_words = words * fine_tune_mask.long()
fine_tune_embedded = self.fine_tune_word_embedding(fine_tune_words)
fine_tune_embedded = f.masked_zero(fine_tune_embedded, fine_tune_mask)
# Fixed - under N frequent
fixed_mask = torch.ge(words, self.threshold_index) # >= threshold_index
fixed_embedeed = self.fixed_word_embedding(words).detach() # Fixed
fixed_embedeed = f.masked_zero(fixed_embedeed, fixed_mask)
embedded_words = fine_tune_embedded + fixed_embedeed
else:
embedded_words = self.fixed_word_embedding(words)
return self.dropout(embedded_words)
示例12: maxOfTwo
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import lt [as 別名]
def maxOfTwo(x, y):
z = x.clone()
maskYLarger = torch.lt(x, y)
z[maskYLarger.detach()] = y[maskYLarger.detach()]
return z
示例13: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import lt [as 別名]
def forward(self, estDisp, gtDisp):
if not torch.is_tensor(gtDisp):
raise TypeError('ground truth disparity map is expected to be tensor, got {}'.format(type(gtDisp)))
if not torch.is_tensor(estDisp):
raise TypeError('estimated disparity map is expected to be tensor, got {}'.format(type(estDisp)))
assert estDisp.shape == gtDisp.shape
if gtDisp.dim() == 2: # single image H x W
h, w = gtDisp.size(0), gtDisp.size(1)
gtDisp = gtDisp.view(1, 1, h, w)
estDisp = estDisp.view(1, 1, h, w)
if gtDisp.dim() == 3: # multi image B x H x W
b, h, w = gtDisp.size(0), gtDisp.size(1), gtDisp.size(2)
gtDisp = gtDisp.view(b, 1, h, w)
estDisp = estDisp.view(b, 1, h, w)
if gtDisp.dim() == 4:
if gtDisp.size(1) == 1: # mult image B x 1 x H x W
self.gtDisp = gtDisp
self.estDisp = estDisp
else:
raise ValueError('2nd dimension size should be 1, got {}'.format(gtDisp.size(1)))
confidence_gt_label = torch.lt(torch.abs(self.estDisp - self.gtDisp), self.theta).type_as(self.gtDisp)
return confidence_gt_label
示例14: getProb
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import lt [as 別名]
def getProb(self):
# |d - d{gt}| < variance, [BatchSize, maxDisp, Height, Width]
probability = torch.lt(torch.abs(self.disp_sample - self.gtDisp), self.variance).type_as(self.gtDisp)
return probability
示例15: test_validate
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import lt [as 別名]
def test_validate(self):
self._metric.eval()
for i in range(len(self._states)):
self._metric.process(self._states[i])
self._metric_function.assert_not_called()
self._metric.process_final_validate({})
self.assertEqual(self._metric_function.call_count, 1)
self.assertTrue(torch.eq(self._metric_function.call_args_list[0][0][1], torch.LongTensor([0, 1, 2, 3, 4])).all)
self.assertTrue(torch.lt(torch.abs(torch.add(self._metric_function.call_args_list[0][0][0], -torch.FloatTensor([0.0, 0.1, 0.2, 0.3, 0.4]))), 1e-12).all)