当前位置: 首页>>代码示例>>Python>>正文


Python torch.le方法代码示例

本文整理汇总了Python中torch.le方法的典型用法代码示例。如果您正苦于以下问题:Python torch.le方法的具体用法?Python torch.le怎么用?Python torch.le使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.le方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: preProc2

# 需要导入模块: import torch [as 别名]
# 或者: from torch import le [as 别名]
def preProc2(x):
    # Access the global variables
    global P, expP, negExpP
    P = P.type_as(x)
    expP = expP.type_as(x)
    negExpP = negExpP.type_as(x)

    # Create a variable filled with -1. Second part of the condition
    z = Variable(torch.zeros(x.size())).type_as(x)
    absX = torch.abs(x)
    cond1 = torch.gt(absX, negExpP)
    cond2 = torch.le(absX, negExpP)
    if (torch.sum(cond1) > 0).data.all():
        x1 = torch.sign(x[cond1])
        z[cond1] = x1
    if (torch.sum(cond2) > 0).data.all():
        x2 = x[cond2]*expP
        z[cond2] = x2
    return z 
开发者ID:gitabcworld,项目名称:FewShotLearning,代码行数:21,代码来源:lstmhelper.py

示例2: loss_per_level

# 需要导入模块: import torch [as 别名]
# 或者: from torch import le [as 别名]
def loss_per_level(self, estDisp, gtDisp, label):
        N, C, H, W = estDisp.shape
        scaled_gtDisp = gtDisp
        scale = 1.0
        if gtDisp.shape[-2] != H or gtDisp.shape[-1] != W:
            # compute scale per level and scale gtDisp
            scale = gtDisp.shape[-1] / (W * 1.0)
            scaled_gtDisp = gtDisp / scale
            scaled_gtDisp = self.scale_func(scaled_gtDisp, (H, W))

        # mask for valid disparity
        # (start disparity, max disparity / scale)
        # Attention: the invalid disparity of KITTI is set as 0, be sure to mask it out
        mask = (scaled_gtDisp > self.start_disp) & (scaled_gtDisp < (self.max_disp / scale))
        if mask.sum() < 1.0:
            print('Relative loss: there is no point\'s disparity is in ({},{})!'.format(self.start_disp,
                                                                                        self.max_disp / scale))
            loss = (torch.abs(estDisp - scaled_gtDisp) * mask.float()).mean()
            return loss

        # relative loss
        valid_pixel_number = mask.float().sum()
        diff = scaled_gtDisp[mask] - estDisp[mask]
        label = label[mask]
        # some value which is over large for torch.exp() is not suitable for soft margin loss
        # get absolute value great than 66
        over_large_mask = torch.gt(torch.abs(diff), 66)
        over_large_diff = diff[over_large_mask]
        # get absolute value smaller than 66
        proper_mask = torch.le(torch.abs(diff), 66)
        proper_diff = diff[proper_mask]
        # generate lable for soft margin loss
        label = label[proper_mask]
        loss = F.soft_margin_loss(proper_diff, label, reduction='sum') + torch.abs(over_large_diff).sum()
        loss = loss / valid_pixel_number

        return loss 
开发者ID:DeepMotionAIResearch,项目名称:DenseMatchingBenchmark,代码行数:39,代码来源:relative_loss.py

示例3: pck

# 需要导入模块: import torch [as 别名]
# 或者: from torch import le [as 别名]
def pck(source_points,warped_points,L_pck,alpha=0.1):
    # compute precentage of correct keypoints
    batch_size=source_points.size(0)
    pck=torch.zeros((batch_size))
    for i in range(batch_size):
        p_src = source_points[i,:]
        p_wrp = warped_points[i,:]
        N_pts = torch.sum(torch.ne(p_src[0,:],-1)*torch.ne(p_src[1,:],-1))
        point_distance = torch.pow(torch.sum(torch.pow(p_src[:,:N_pts]-p_wrp[:,:N_pts],2),0),0.5)
        L_pck_mat = L_pck[i].expand_as(point_distance)
        correct_points = torch.le(point_distance,L_pck_mat*alpha)
        pck[i]=torch.mean(correct_points.float())
    return pck 
开发者ID:ignacio-rocco,项目名称:weakalign,代码行数:15,代码来源:eval_util.py

示例4: distance_bin

# 需要导入模块: import torch [as 别名]
# 或者: from torch import le [as 别名]
def distance_bin(self, mention_distance):
        bins = torch.zeros(mention_distance.size()).byte().to(self.device)
        rg = [[1, 1], [2, 2], [3, 3], [4, 4], [5, 7], [8, 15], [16, 31], [32, 63], [64, 300]]
        for t, k in enumerate(rg):
            i, j = k[0], k[1]
            b = torch.LongTensor([i]).unsqueeze(-1).expand(mention_distance.size()).to(self.device)
            m1 = torch.ge(mention_distance, b)
            e = torch.LongTensor([j]).unsqueeze(-1).expand(mention_distance.size()).to(self.device)
            m2 = torch.le(mention_distance, e)
            bins = bins + (t + 1) * (m1 & m2)
        return bins.long() 
开发者ID:fastnlp,项目名称:fastNLP,代码行数:13,代码来源:model_re.py

示例5: _siamese_metrics

# 需要导入模块: import torch [as 别名]
# 或者: from torch import le [as 别名]
def _siamese_metrics(output, label, margin=1):

        l2_dist_tensor = torch.from_numpy(output.data.cpu().numpy())
        label_tensor = torch.from_numpy(label.data.cpu().numpy())

        # Distance
        is_pos = torch.ByteTensor()
        POS_LABEL = 1
        NEG_LABEL = 0
        torch.eq(label_tensor, POS_LABEL, out=is_pos)  # y==1
        pos_dist = 0 if len(l2_dist_tensor[is_pos]) == 0 else l2_dist_tensor[is_pos].mean()
        neg_dist = 0 if len(l2_dist_tensor[~is_pos]) == 0 else l2_dist_tensor[~is_pos].mean()
        # print('same dis : diff dis  {} : {}'.format(l2_dist_tensor[is_pos == 0].mean(), l2_dist_tensor[is_pos].mean()))

        # accuracy
        pred_pos_flags = torch.ByteTensor()
        torch.le(l2_dist_tensor, margin, out=pred_pos_flags)  # y==1's idx

        cur_score = torch.FloatTensor(label.size(0))
        cur_score.fill_(NEG_LABEL)
        cur_score[pred_pos_flags] = POS_LABEL

        label_tensor_ = label_tensor.type(torch.FloatTensor)
        accuracy = torch.eq(cur_score, label_tensor_).sum() / label_tensor.size(0)

        metrics = {
            'accuracy': accuracy,
            'pos_dist': pos_dist,
            'neg_dist': neg_dist,
        }
        return metrics 
开发者ID:Erotemic,项目名称:ibeis,代码行数:33,代码来源:netmath.py

示例6: NegativeLogLoss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import le [as 别名]
def NegativeLogLoss(y_pred, y_true):
    """
    Shape:
        - y_pred:    batch x time
        - y_true:    batch
    """
    y_true_onehot = to_one_hot(y_true.unsqueeze(-1), y_pred.size(1))
    P = y_true_onehot.squeeze(-1) * y_pred  # batch x time
    P = torch.sum(P, dim=1)  # batch
    gt_zero = torch.gt(P, 0.0).float()  # batch
    epsilon = torch.le(P, 0.0).float() * 1e-8  # batch
    log_P = torch.log(P + epsilon) * gt_zero  # batch
    output = -log_P  # batch
    return output 
开发者ID:xingdi-eric-yuan,项目名称:qait_public,代码行数:16,代码来源:layers.py

示例7: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import le [as 别名]
def forward(self, y_pred, y_true):
        _assert_no_grad(y_true)
        P = y_true.float() * y_pred  # batch x time x class
        P = torch.sum(P, dim=1)  # batch x class
        gt_zero = torch.gt(P, 0.0).float()  # batch x class
        epsilon = torch.le(P, 0.0).float() * _eps  # batch x class
        log_P = torch.log(P + epsilon) * gt_zero  # batch x class
        sum_log_P = torch.sum(log_P, dim=1)  # n_b
        return -sum_log_P 
开发者ID:xingdi-eric-yuan,项目名称:MatchLSTM-PyTorch,代码行数:11,代码来源:generic.py

示例8: neg_log_obj

# 需要导入模块: import torch [as 别名]
# 或者: from torch import le [as 别名]
def neg_log_obj(self, words, word_seq_lens, batch_context_emb, chars, char_seq_lens, adj_matrixs, adjs_in, adjs_out, graphs, dep_label_adj, batch_dep_heads, tags, batch_dep_label, trees=None):
        features = self.neural_scoring(words, word_seq_lens, batch_context_emb, chars, char_seq_lens, adj_matrixs, adjs_in, adjs_out, graphs, dep_label_adj, batch_dep_heads, batch_dep_label, trees)

        all_scores = self.calculate_all_scores(features)

        batch_size = words.size(0)
        sent_len = words.size(1)

        maskTemp = torch.arange(1, sent_len + 1, dtype=torch.long).view(1, sent_len).expand(batch_size, sent_len).to(self.device)
        mask = torch.le(maskTemp, word_seq_lens.view(batch_size, 1).expand(batch_size, sent_len)).to(self.device)

        unlabed_score = self.forward_unlabeled(all_scores, word_seq_lens, mask)
        labeled_score = self.forward_labeled(all_scores, word_seq_lens, tags, mask)
        return unlabed_score - labeled_score 
开发者ID:allanj,项目名称:ner_with_dependency,代码行数:16,代码来源:lstmcrf.py

示例9: _compute_loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import le [as 别名]
def _compute_loss(self, prediction_tensor, target_tensor, weights=None):
    """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the (encoded) predicted locations of objects.
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the regression targets
      weights: a float tensor of shape [batch_size, num_anchors]

    Returns:
      loss: a float tensor of shape [batch_size, num_anchors] tensor
        representing the value of the loss function.
    """
    diff = prediction_tensor - target_tensor
    if self._code_weights is not None:
      code_weights = self._code_weights.type_as(prediction_tensor).to(target_tensor.device)
      diff = code_weights.view(1, 1, -1) * diff
    abs_diff = torch.abs(diff)
    abs_diff_lt_1 = torch.le(abs_diff, 1 / (self._sigma**2)).type_as(abs_diff)
    loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * self._sigma, 2) \
      + (abs_diff - 0.5 / (self._sigma**2)) * (1. - abs_diff_lt_1)
    if self._codewise:
      anchorwise_smooth_l1norm = loss
      if weights is not None:
        anchorwise_smooth_l1norm *= weights.unsqueeze(-1)
    else:
      anchorwise_smooth_l1norm = torch.sum(loss, 2)#  * weights
      if weights is not None:
        anchorwise_smooth_l1norm *= weights
    return anchorwise_smooth_l1norm 
开发者ID:traveller59,项目名称:second.pytorch,代码行数:33,代码来源:losses.py

示例10: le

# 需要导入模块: import torch [as 别名]
# 或者: from torch import le [as 别名]
def le(t1, t2):
    """
    Element-wise rich less than or equal comparison between values from operand t1 with respect to values of
    operand t2 (i.e. t1 <= t2), not commutative.
    Takes the first and second operand (scalar or tensor) whose elements are to be compared as argument.

    Parameters
    ----------
    t1: tensor or scalar
       The first operand to be compared less than or equal to second operand
    t2: tensor or scalar
       The second operand to be compared greater than or equal to first operand

    Returns
    -------
    result: ht.DNDarray
       A uint8-tensor holding 1 for all elements in which values of t1 are less than or equal to values of t2,
       0 for all other elements

    Examples
    -------
    >>> import heat as ht
    >>> T1 = ht.float32([[1, 2],[3, 4]])
    >>> ht.le(T1, 3.0)
    tensor([[1, 1],
            [1, 0]], dtype=torch.uint8)

    >>> T2 = ht.float32([[2, 2], [2, 2]])
    >>> ht.le(T1, T2)
    tensor([[1, 1],
            [0, 0]], dtype=torch.uint8)
    """
    return operations.__binary_op(torch.le, t1, t2) 
开发者ID:helmholtz-analytics,项目名称:heat,代码行数:35,代码来源:relational.py

示例11: compute

# 需要导入模块: import torch [as 别名]
# 或者: from torch import le [as 别名]
def compute(self, left, right) -> torch.Tensor:
        return torch.le(left, right) 
开发者ID:Heerozh,项目名称:spectre,代码行数:4,代码来源:filter.py

示例12: _bound_logvar_lookup

# 需要导入模块: import torch [as 别名]
# 或者: from torch import le [as 别名]
def _bound_logvar_lookup(self):
        self.logvar_lookup.weight.data[torch.le(self.logvar_lookup.weight, self.logvar_bound)] = self.logvar_bound 
开发者ID:yjlolo,项目名称:vae-audio,代码行数:4,代码来源:base_model.py

示例13: test_random_uniform_boundaries

# 需要导入模块: import torch [as 别名]
# 或者: from torch import le [as 别名]
def test_random_uniform_boundaries(dtype):
  lb = 1.2
  ub = 4.8
  backend = pytorch_backend.PyTorchBackend()
  a = backend.random_uniform((4, 4), seed=10, dtype=dtype)
  b = backend.random_uniform((4, 4), (lb, ub), seed=10, dtype=dtype)
  assert (torch.ge(a, 0).byte().all() and torch.le(a, 1).byte().all() and
          torch.ge(b, lb).byte().all() and torch.le(b, ub).byte().all()) 
开发者ID:google,项目名称:TensorNetwork,代码行数:10,代码来源:pytorch_backend_test.py

示例14: stable_cosine_distance

# 需要导入模块: import torch [as 别名]
# 或者: from torch import le [as 别名]
def stable_cosine_distance(a, b, squared=True):
    """Computes the pairwise distance matrix with numerical stability."""
    mat = torch.cat([a, b])

    pairwise_distances_squared = torch.add(
        mat.pow(2).sum(dim=1, keepdim=True).expand(mat.size(0), -1),
        torch.t(mat).pow(2).sum(dim=0, keepdim=True).expand(mat.size(0), -1)
    ) - 2 * (torch.mm(mat, torch.t(mat)))

    # Deal with numerical inaccuracies. Set small negatives to zero.
    pairwise_distances_squared = torch.clamp(pairwise_distances_squared, min=0.0)

    # Get the mask where the zero distances are at.
    error_mask = torch.le(pairwise_distances_squared, 0.0)

    # Optionally take the sqrt.
    if squared:
        pairwise_distances = pairwise_distances_squared
    else:
        pairwise_distances = torch.sqrt(pairwise_distances_squared + error_mask.float() * 1e-16)

    # Undo conditionally adding 1e-16.
    pairwise_distances = torch.mul(pairwise_distances, (error_mask == False).float())

    # Explicitly set diagonals to zero.
    mask_offdiagonals = 1 - torch.eye(*pairwise_distances.size(), device=pairwise_distances.device)
    pairwise_distances = torch.mul(pairwise_distances, mask_offdiagonals)

    return pairwise_distances[:a.shape[0], a.shape[0]:] 
开发者ID:arthurdouillard,项目名称:incremental_learning.pytorch,代码行数:31,代码来源:distance.py

示例15: _pairwise_distance

# 需要导入模块: import torch [as 别名]
# 或者: from torch import le [as 别名]
def _pairwise_distance(a, squared=False):
    """Computes the pairwise distance matrix with numerical stability."""
    pairwise_distances_squared = torch.add(
        a.pow(2).sum(dim=1, keepdim=True).expand(a.size(0), -1),
        torch.t(a).pow(2).sum(dim=0, keepdim=True).expand(a.size(0), -1)
    ) - 2 * (torch.mm(a, torch.t(a)))

    # Deal with numerical inaccuracies. Set small negatives to zero.
    pairwise_distances_squared = torch.clamp(pairwise_distances_squared, min=0.0)

    # Get the mask where the zero distances are at.
    error_mask = torch.le(pairwise_distances_squared, 0.0)

    # Optionally take the sqrt.
    if squared:
        pairwise_distances = pairwise_distances_squared
    else:
        pairwise_distances = torch.sqrt(pairwise_distances_squared + error_mask.float() * 1e-16)

    # Undo conditionally adding 1e-16.
    pairwise_distances = torch.mul(pairwise_distances, (error_mask == False).float())

    # Explicitly set diagonals to zero.
    mask_offdiagonals = 1 - torch.eye(*pairwise_distances.size(), device=pairwise_distances.device)
    pairwise_distances = torch.mul(pairwise_distances, mask_offdiagonals)

    return pairwise_distances 
开发者ID:arthurdouillard,项目名称:incremental_learning.pytorch,代码行数:29,代码来源:metrics.py


注:本文中的torch.le方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。