当前位置: 首页>>代码示例>>Python>>正文


Python torch.lt方法代码示例

本文整理汇总了Python中torch.lt方法的典型用法代码示例。如果您正苦于以下问题:Python torch.lt方法的具体用法?Python torch.lt怎么用?Python torch.lt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.lt方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_train

# 需要导入模块: import torch [as 别名]
# 或者: from torch import lt [as 别名]
def test_train(self):
        self._metric.train()
        calls = [[torch.FloatTensor([0.0]), torch.LongTensor([0])],
                 [torch.FloatTensor([0.0, 0.1, 0.2, 0.3]), torch.LongTensor([0, 1, 2, 3])]]
        for i in range(len(self._states)):
            self._metric.process(self._states[i])
        self.assertEqual(2, len(self._metric_function.call_args_list))
        for i in range(len(self._metric_function.call_args_list)):
            self.assertTrue(torch.eq(self._metric_function.call_args_list[i][0][0], calls[i][0]).all)
            self.assertTrue(torch.lt(torch.abs(torch.add(self._metric_function.call_args_list[i][0][1], -calls[i][1])), 1e-12).all)
        self._metric_function.reset_mock()
        self._metric.process_final({})

        self.assertEqual(self._metric_function.call_count, 1)
        self.assertTrue(torch.eq(self._metric_function.call_args_list[0][0][1], torch.LongTensor([0, 1, 2, 3, 4])).all)
        self.assertTrue(torch.lt(torch.abs(torch.add(self._metric_function.call_args_list[0][0][0], -torch.FloatTensor([0.0, 0.1, 0.2, 0.3, 0.4]))), 1e-12).all) 
开发者ID:pytorchbearer,项目名称:torchbearer,代码行数:18,代码来源:test_wrappers.py

示例2: regenerate_cache

# 需要导入模块: import torch [as 别名]
# 或者: from torch import lt [as 别名]
def regenerate_cache(self):
        """
        Resamples the big matrix and resets the counter of the total
        number of elements in the returned masks.
        """
        low_size = int(self.resolution * self.max_size)
        low_pattern = self.rng.uniform(0, 1, size=(low_size, low_size)) * 255
        low_pattern = torch.from_numpy(low_pattern.astype('float32'))
        pattern = transforms.Compose([
                        transforms.ToPILImage(),
                        transforms.Resize(self.max_size, Image.BICUBIC),
                        transforms.ToTensor(),
        ])(low_pattern[None])[0]
        pattern = torch.lt(pattern, self.density).byte()
        self.pattern = pattern.byte()
        self.points_used = 0 
开发者ID:tigvarts,项目名称:vaeac,代码行数:18,代码来源:mask_generators.py

示例3: test_terner_connect_sto_forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import lt [as 别名]
def test_terner_connect_sto_forward():
    x = torch.Tensor([1,0,0.45,-1,-0.9]).view(1,-1)

    results = list()
    for i in range(1000):
        temp_result = TernaryConnectStochastic.apply(x)
        # Tensor must have only -1 , 0 , 1 values
        assert not torch.any(torch.lt(torch.abs(temp_result-1),1e-8)*torch.lt(torch.abs(temp_result),1e-8))
        results.append(temp_result) 

    result = torch.cat(results,0 )
    result = torch.sum(result, 0)/1000
    
    assert equals(
        result,
        torch.Tensor([1,0,0.45,-1,-0.9]).view(1,-1),
        5e-2) 
开发者ID:Enderdead,项目名称:Pytorch_Quantize_impls,代码行数:19,代码来源:function_test.py

示例4: intersectionAndUnion

# 需要导入模块: import torch [as 别名]
# 或者: from torch import lt [as 别名]
def intersectionAndUnion(batch_data, pred, numClass):
    (imgs, segs, infos) = batch_data
    _, preds = torch.max(pred.data.cpu(), dim=1)

    # compute area intersection
    intersect = preds.clone()
    intersect[torch.ne(preds, segs)] = -1

    area_intersect = torch.histc(intersect.float(),
                                 bins=numClass,
                                 min=0,
                                 max=numClass - 1)

    # compute area union:
    preds[torch.lt(segs, 0)] = -1
    area_pred = torch.histc(preds.float(),
                            bins=numClass,
                            min=0,
                            max=numClass - 1)
    area_lab = torch.histc(segs.float(),
                           bins=numClass,
                           min=0,
                           max=numClass - 1)
    area_union = area_pred + area_lab - area_intersect
    return area_intersect, area_union 
开发者ID:soeaver,项目名称:pytorch-priv,代码行数:27,代码来源:eval.py

示例5: _jit_linear_cg_updates

# 需要导入模块: import torch [as 别名]
# 或者: from torch import lt [as 别名]
def _jit_linear_cg_updates(
    result, alpha, residual_inner_prod, eps, beta, residual, precond_residual, mul_storage, is_zero, curr_conjugate_vec
):
    # # Update result
    # # result_{k} = result_{k-1} + alpha_{k} p_vec_{k-1}
    result = torch.addcmul(result, alpha, curr_conjugate_vec, out=result)

    # beta_{k} = (precon_residual{k}^T r_vec_{k}) / (precon_residual{k-1}^T r_vec_{k-1})
    beta.resize_as_(residual_inner_prod).copy_(residual_inner_prod)
    torch.mul(residual, precond_residual, out=mul_storage)
    torch.sum(mul_storage, -2, keepdim=True, out=residual_inner_prod)

    # Do a safe division here
    torch.lt(beta, eps, out=is_zero)
    beta.masked_fill_(is_zero, 1)
    torch.div(residual_inner_prod, beta, out=beta)
    beta.masked_fill_(is_zero, 0)

    # Update curr_conjugate_vec
    # curr_conjugate_vec_{k} = precon_residual{k} + beta_{k} curr_conjugate_vec_{k-1}
    curr_conjugate_vec.mul_(beta).add_(precond_residual) 
开发者ID:cornellius-gp,项目名称:gpytorch,代码行数:23,代码来源:linear_cg.py

示例6: check_monitor_top_k

# 需要导入模块: import torch [as 别名]
# 或者: from torch import lt [as 别名]
def check_monitor_top_k(self, current):
        less_than_k_models = len(self.best_k_models) < self.save_top_k
        if less_than_k_models:
            return True

        if not isinstance(current, torch.Tensor):
            rank_zero_warn(
                f'{current} is supposed to be a `torch.Tensor`. Saving checkpoint may not work correctly.'
                f' HINT: check the value of {self.monitor} in your validation loop', RuntimeWarning
            )
            current = torch.tensor(current)

        monitor_op = {
            "min": torch.lt,
            "max": torch.gt,
        }[self.mode]

        return monitor_op(current, self.best_k_models[self.kth_best_model_path]) 
开发者ID:PyTorchLightning,项目名称:pytorch-lightning,代码行数:20,代码来源:model_checkpoint.py

示例7: reward_ddpg_A

# 需要导入模块: import torch [as 别名]
# 或者: from torch import lt [as 别名]
def reward_ddpg_A(solution, use_cuda):
    """
    Count number of consecutively correctly sorted for each sample in minibatch
    starting from beginning
    
    Very hard to randomly find enough non-zero samples - reward is very sparse!

    solution is FloatTensor of dim [batch,n]
    """
    (batch_size, n, m) = solution.size()
    n_correctly_sorted = Variable(torch.zeros(batch_size, 1), requires_grad=False)
    mask = Variable(torch.ones(batch_size, 1).byte(), requires_grad=False)
    if use_cuda:
        n_correctly_sorted = n_correctly_sorted.cuda()
        mask = mask.cuda()

    for i in range(1, m):
        res = torch.lt(solution[:,:,i-1], solution[:,:,i])
        mask.data &= res.data
        n_correctly_sorted[mask] += 1

    return torch.div(n_correctly_sorted, m - 1) 
开发者ID:pemami4911,项目名称:sinkhorn-policy-gradient.pytorch,代码行数:24,代码来源:sorting_task.py

示例8: reward_ddpg_B

# 需要导入模块: import torch [as 别名]
# 或者: from torch import lt [as 别名]
def reward_ddpg_B(solution, use_cuda):
    """
    Count number of (nonconsecutively) correctly sorted starting from beginning
    
    Tends to converge to [0,2,4,6,8,1,3,5,7,9]

    solution is FloatTensor of dim [batch,n]
    """
    (batch_size, n, m) = solution.size()
    n_correctly_sorted = Variable(torch.zeros(batch_size, 1), requires_grad=False)
    
    if use_cuda:
        n_correctly_sorted = n_correctly_sorted.cuda()

    for i in range(1, m):
        res = torch.lt(solution[:,:,i-1], solution[:,:,i])
        n_correctly_sorted += res.float()

    return torch.div(n_correctly_sorted, m - 1) 
开发者ID:pemami4911,项目名称:sinkhorn-policy-gradient.pytorch,代码行数:21,代码来源:sorting_task.py

示例9: get_morph

# 需要导入模块: import torch [as 别名]
# 或者: from torch import lt [as 别名]
def get_morph(batch):

    #Not very nice but we do not have access to value comming from opt.gpuid command line parameter here.
    use_cuda = batch.src[0].is_cuda

    # morph_index = batch.morph.data.transpose(0, 1)  # [ seqLen x batch_size ] ==> [ batch_size x seqLen ]

    # morph_voc = batch.dataset.fields['morph'].vocab.stoi

    morph_index = batch.morph.view((batch.src[0].data.size()[0], 6, batch.src[0].data.size()[1]))
    morph_index = morph_index.permute(2, 0, 1).contiguous()



    # morph_index = torch.LongTensor(morph_index)
    morph_mask = torch.lt(torch.eq(morph_index, 1), 1).float()
    # morph_index = autograd.Variable(morph_index)
    # morph_mask = autograd.Variable(torch.FloatTensor(morph_mask), requires_grad=False)
    if use_cuda:
        morph_index = morph_index.cuda()
        morph_mask = morph_mask.cuda()

    return morph_index, morph_mask 
开发者ID:diegma,项目名称:graph-2-text,代码行数:25,代码来源:IO.py

示例10: wrapper_gmask

# 需要导入模块: import torch [as 别名]
# 或者: from torch import lt [as 别名]
def wrapper_gmask(opt):
    # batchsize should be 1 for mask_global
    mask_global = torch.ByteTensor(1, 1, \
                                        opt.fineSize, opt.fineSize)

    res = 0.06  # the lower it is, the more continuous the output will be. 0.01 is too small and 0.1 is too large
    density = 0.25
    MAX_SIZE = 350
    maxPartition = 30
    low_pattern = torch.rand(1, 1, int(res * MAX_SIZE), int(res * MAX_SIZE)).mul(255)
    pattern = F.interpolate(low_pattern, (MAX_SIZE, MAX_SIZE), mode='bilinear').detach()
    low_pattern = None
    pattern.div_(255)
    pattern = torch.lt(pattern, density).byte()  # 25% 1s and 75% 0s
    pattern = torch.squeeze(pattern).byte()

    gMask_opts = {}
    gMask_opts['pattern'] = pattern
    gMask_opts['MAX_SIZE'] = MAX_SIZE
    gMask_opts['fineSize'] = opt.fineSize
    gMask_opts['maxPartition'] = maxPartition
    gMask_opts['mask_global'] = mask_global
    return create_gMask(gMask_opts)  # create an initial random mask. 
开发者ID:Zhaoyi-Yan,项目名称:Shift-Net_pytorch,代码行数:25,代码来源:util.py

示例11: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import lt [as 别名]
def forward(self, words, frequent_tuning=False):
        if frequent_tuning and self.training:

            padding_mask = words.eq(0).long()

            # Fine-tuning - N the most frequent
            fine_tune_mask = torch.lt(words, self.threshold_index) * padding_mask.eq(
                0
            )  # < threshold_index
            fine_tune_words = words * fine_tune_mask.long()

            fine_tune_embedded = self.fine_tune_word_embedding(fine_tune_words)
            fine_tune_embedded = f.masked_zero(fine_tune_embedded, fine_tune_mask)

            # Fixed - under N frequent
            fixed_mask = torch.ge(words, self.threshold_index)  # >= threshold_index

            fixed_embedeed = self.fixed_word_embedding(words).detach()  # Fixed
            fixed_embedeed = f.masked_zero(fixed_embedeed, fixed_mask)

            embedded_words = fine_tune_embedded + fixed_embedeed
        else:
            embedded_words = self.fixed_word_embedding(words)

        return self.dropout(embedded_words) 
开发者ID:naver,项目名称:claf,代码行数:27,代码来源:frequent_word_embedding.py

示例12: maxOfTwo

# 需要导入模块: import torch [as 别名]
# 或者: from torch import lt [as 别名]
def maxOfTwo(x, y):
    z = x.clone()
    maskYLarger = torch.lt(x, y)
    z[maskYLarger.detach()] = y[maskYLarger.detach()]
    return z 
开发者ID:JunjH,项目名称:Visualizing-CNNs-for-monocular-depth-estimation,代码行数:7,代码来源:util.py

示例13: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import lt [as 别名]
def forward(self, estDisp, gtDisp):

        if not torch.is_tensor(gtDisp):
            raise TypeError('ground truth disparity map is expected to be tensor, got {}'.format(type(gtDisp)))
        if not torch.is_tensor(estDisp):
            raise TypeError('estimated disparity map is expected to be tensor, got {}'.format(type(estDisp)))

        assert estDisp.shape == gtDisp.shape

        if gtDisp.dim() == 2:  # single image H x W
            h, w = gtDisp.size(0), gtDisp.size(1)
            gtDisp = gtDisp.view(1, 1, h, w)
            estDisp = estDisp.view(1, 1, h, w)

        if gtDisp.dim() == 3:  # multi image B x H x W
            b, h, w = gtDisp.size(0), gtDisp.size(1), gtDisp.size(2)
            gtDisp = gtDisp.view(b, 1, h, w)
            estDisp = estDisp.view(b, 1, h, w)

        if gtDisp.dim() == 4:
            if gtDisp.size(1) == 1:  # mult image B x 1 x H x W
                self.gtDisp = gtDisp
                self.estDisp = estDisp
            else:
                raise ValueError('2nd dimension size should be 1, got {}'.format(gtDisp.size(1)))

        confidence_gt_label = torch.lt(torch.abs(self.estDisp - self.gtDisp), self.theta).type_as(self.gtDisp)

        return confidence_gt_label 
开发者ID:DeepMotionAIResearch,项目名称:DenseMatchingBenchmark,代码行数:31,代码来源:gen_conf.py

示例14: getProb

# 需要导入模块: import torch [as 别名]
# 或者: from torch import lt [as 别名]
def getProb(self):
        # |d - d{gt}| < variance, [BatchSize, maxDisp, Height, Width]
        probability = torch.lt(torch.abs(self.disp_sample - self.gtDisp), self.variance).type_as(self.gtDisp)

        return probability 
开发者ID:DeepMotionAIResearch,项目名称:DenseMatchingBenchmark,代码行数:7,代码来源:disp2prob.py

示例15: test_validate

# 需要导入模块: import torch [as 别名]
# 或者: from torch import lt [as 别名]
def test_validate(self):
        self._metric.eval()
        for i in range(len(self._states)):
            self._metric.process(self._states[i])
        self._metric_function.assert_not_called()
        self._metric.process_final_validate({})

        self.assertEqual(self._metric_function.call_count, 1)
        self.assertTrue(torch.eq(self._metric_function.call_args_list[0][0][1], torch.LongTensor([0, 1, 2, 3, 4])).all)
        self.assertTrue(torch.lt(torch.abs(torch.add(self._metric_function.call_args_list[0][0][0], -torch.FloatTensor([0.0, 0.1, 0.2, 0.3, 0.4]))), 1e-12).all) 
开发者ID:pytorchbearer,项目名称:torchbearer,代码行数:12,代码来源:test_wrappers.py


注:本文中的torch.lt方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。