当前位置: 首页>>代码示例>>Python>>正文


Python torch.sqrt方法代码示例

本文整理汇总了Python中torch.sqrt方法的典型用法代码示例。如果您正苦于以下问题:Python torch.sqrt方法的具体用法?Python torch.sqrt怎么用?Python torch.sqrt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.sqrt方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _attn

# 需要导入模块: import torch [as 别名]
# 或者: from torch import sqrt [as 别名]
def _attn(self, q, k, v, sequence_mask):
        w = torch.matmul(q, k)
        if self.scale:
            w = w / math.sqrt(v.size(-1))

        b_subset = self.b[:, :, :w.size(-2), :w.size(-1)]

        if sequence_mask is not None:
            b_subset = b_subset * sequence_mask.view(
                sequence_mask.size(0), 1, -1)
            b_subset = b_subset.permute(1, 0, 2, 3)

        w = w * b_subset + -1e9 * (1 - b_subset)
        w = nn.Softmax(dim=-1)(w)
        w = self.attn_dropout(w)
        return torch.matmul(w, v) 
开发者ID:atcbosselut,项目名称:comet-commonsense,代码行数:18,代码来源:gpt.py

示例2: centerness_target

# 需要导入模块: import torch [as 别名]
# 或者: from torch import sqrt [as 别名]
def centerness_target(self, pos_bbox_targets):
        """Compute centerness targets.

        Args:
            pos_bbox_targets (Tensor): BBox targets of positive bboxes in shape
                (num_pos, 4)

        Returns:
            Tensor: Centerness target.
        """
        # only calculate pos centerness targets, otherwise there may be nan
        left_right = pos_bbox_targets[:, [0, 2]]
        top_bottom = pos_bbox_targets[:, [1, 3]]
        centerness_targets = (
            left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (
                top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
        return torch.sqrt(centerness_targets) 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:19,代码来源:fcos_head.py

示例3: centerness_target

# 需要导入模块: import torch [as 别名]
# 或者: from torch import sqrt [as 别名]
def centerness_target(self, anchors, bbox_targets):
        # only calculate pos centerness targets, otherwise there may be nan
        gts = self.bbox_coder.decode(anchors, bbox_targets)
        anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2
        anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2
        l_ = anchors_cx - gts[:, 0]
        t_ = anchors_cy - gts[:, 1]
        r_ = gts[:, 2] - anchors_cx
        b_ = gts[:, 3] - anchors_cy

        left_right = torch.stack([l_, r_], dim=1)
        top_bottom = torch.stack([t_, b_], dim=1)
        centerness = torch.sqrt(
            (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) *
            (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]))
        assert not torch.isnan(centerness).any()
        return centerness 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:19,代码来源:atss_head.py

示例4: map_roi_levels

# 需要导入模块: import torch [as 别名]
# 或者: from torch import sqrt [as 别名]
def map_roi_levels(self, rois, num_levels):
        """Map rois to corresponding feature levels by scales.

        - scale < finest_scale * 2: level 0
        - finest_scale * 2 <= scale < finest_scale * 4: level 1
        - finest_scale * 4 <= scale < finest_scale * 8: level 2
        - scale >= finest_scale * 8: level 3

        Args:
            rois (Tensor): Input RoIs, shape (k, 5).
            num_levels (int): Total level number.

        Returns:
            Tensor: Level index (0-based) of each RoI, shape (k, )
        """
        scale = torch.sqrt(
            (rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))
        target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
        target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
        return target_lvls 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:22,代码来源:single_level_roi_extractor.py

示例5: batch_norm

# 需要导入模块: import torch [as 别名]
# 或者: from torch import sqrt [as 别名]
def batch_norm(is_training, X, gamma, beta, moving_mean, moving_var, eps, momentum):
    # 训练模式和预测模式逻辑不同
    if not is_training:
        # 预测模式下,直接使用传入的移动平均值和方差
        X_hat = (X - moving_mean) / torch.sqrt(moving_var + eps)
    else:
        assert len(X.shape) in (2, 4)
        if len(X.shape) == 2:
            # 使用全连接层,二维数组,计算特征维上的均值和方差
            mean = X.mean(dim=0)
            var = ((X - mean) ** 2).mean(dim=0)
        else:
            # 使用卷积层,三维数组
            mean = X.mean(dim=0, keepdim=True).mean(dim=2, keepdim=True).mean(dim=3, keepdim=True)
            var = ((X - mean) ** 2).mean(dim=0, keepdim=True).mean(dim=2, keepdim=True).mean(dim=3, keepdim=True)
        # 训练模式下用当前的均值和方差做标准化
        X_hat = (X - mean) / torch.sqrt(var + eps)
        # 更新移动平均的均值和方差
        moving_mean = momentum * moving_mean + (1.0 - momentum) * mean
        moving_var = momentum * moving_var + (1.0 - momentum) *  var
    Y = gamma * X_hat + beta # 拉伸和偏移
    return Y, moving_mean, moving_var 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:24,代码来源:25_batch_normalization_raw.py

示例6: fuse_conv_and_bn

# 需要导入模块: import torch [as 别名]
# 或者: from torch import sqrt [as 别名]
def fuse_conv_and_bn(conv, bn):
    # https://tehnokv.com/posts/fusing-batchnorm-and-conv/
    with torch.no_grad():
        # init
        fusedconv = torch.nn.Conv2d(conv.in_channels,
                                    conv.out_channels,
                                    kernel_size=conv.kernel_size,
                                    stride=conv.stride,
                                    padding=conv.padding,
                                    bias=True)

        # prepare filters
        w_conv = conv.weight.clone().view(conv.out_channels, -1)
        w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
        fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))

        # prepare spatial bias
        if conv.bias is not None:
            b_conv = conv.bias
        else:
            b_conv = torch.zeros(conv.weight.size(0))
        b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
        fusedconv.bias.copy_(b_conv + b_bn)

        return fusedconv 
开发者ID:zbyuan,项目名称:pruning_yolov3,代码行数:27,代码来源:torch_utils.py

示例7: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import sqrt [as 别名]
def forward(self, input1):
        self.batchgrid3d = torch.zeros(torch.Size([input1.size(0)]) + self.grid3d.size())

        for i in range(input1.size(0)):
            self.batchgrid3d[i] = self.grid3d

        self.batchgrid3d = Variable(self.batchgrid3d)
        #print(self.batchgrid3d)

        x = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,0:4]), 3)
        y = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,4:8]), 3)
        z = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,8:]), 3)
        #print(x)
        r = torch.sqrt(x**2 + y**2 + z**2) + 1e-5

        #print(r)
        theta = torch.acos(z/r)/(np.pi/2)  - 1
        #phi = torch.atan(y/x)
        phi = torch.atan(y/(x + 1e-5))  + np.pi * x.lt(0).type(torch.FloatTensor) * (y.ge(0).type(torch.FloatTensor) - y.lt(0).type(torch.FloatTensor))
        phi = phi/np.pi


        output = torch.cat([theta,phi], 3)

        return output 
开发者ID:guoruoqian,项目名称:cascade-rcnn_Pytorch,代码行数:27,代码来源:gridgen.py

示例8: evo_norm

# 需要导入模块: import torch [as 别名]
# 或者: from torch import sqrt [as 别名]
def evo_norm(x, prefix, running_var, v, weight, bias,
             training, momentum, eps=0.1, groups=32):
    if prefix == 'b0':
        if training:
            var = torch.var(x, dim=(0, 2, 3), keepdim=True)
            running_var.mul_(momentum)
            running_var.add_((1 - momentum) * var)
        else:
            var = running_var
        if v is not None:
            den = torch.max((var + eps).sqrt(), v * x + instance_std(x, eps))
            x = x / den * weight + bias
        else:
            x = x * weight + bias
    else:
        if v is not None:
            x = x * torch.sigmoid(v * x) / group_std(x,
                                                     groups, eps) * weight + bias
        else:
            x = x * weight + bias

    return x 
开发者ID:PistonY,项目名称:torch-toolbox,代码行数:24,代码来源:functional.py

示例9: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import sqrt [as 别名]
def forward(self, x):
        x = self.features(x)
        x = x.view(x.size(0), -1)
        if self.convert_to_onnx:
            x = self.classifier[0](x)

            # manually perform 1d batchnorm, caffe2 currently requires a resize,
            # which is hard to squeeze into the exported network
            bn_1d = self.classifier[1]
            numerator = (x - Variable(bn_1d.running_mean))
            denominator = Variable(torch.sqrt(bn_1d.running_var + bn_1d.eps))
            x = numerator/denominator*Variable(bn_1d.weight.data) + Variable(bn_1d.bias.data)

            x = self.classifier[2](x)
            x = self.classifier[3](x)
            x = self.classifier[4](x)
            return x
        else:
            x = self.classifier(x)
            return x 
开发者ID:alexfjw,项目名称:prunnable-layers-pytorch,代码行数:22,代码来源:models.py

示例10: cond_samples

# 需要导入模块: import torch [as 别名]
# 或者: from torch import sqrt [as 别名]
def cond_samples(f, replay_buffer, args, device, fresh=False):
    sqrt = lambda x: int(t.sqrt(t.Tensor([x])))
    plot = lambda p, x: tv.utils.save_image(t.clamp(x, -1, 1), p, normalize=True, nrow=sqrt(x.size(0)))

    if fresh:
        replay_buffer = uncond_samples(f, args, device, save=False)
    n_it = replay_buffer.size(0) // 100
    all_y = []
    for i in range(n_it):
        x = replay_buffer[i * 100: (i + 1) * 100].to(device)
        y = f.classify(x).max(1)[1]
        all_y.append(y)

    all_y = t.cat(all_y, 0)
    each_class = [replay_buffer[all_y == l] for l in range(10)]
    print([len(c) for c in each_class])
    for i in range(100):
        this_im = []
        for l in range(10):
            this_l = each_class[l][i * 10: (i + 1) * 10]
            this_im.append(this_l)
        this_im = t.cat(this_im, 0)
        if this_im.size(0) > 0:
            plot('{}/samples_{}.png'.format(args.save_dir, i), this_im)
        print(i) 
开发者ID:wgrathwohl,项目名称:JEM,代码行数:27,代码来源:eval_wrn_ebm.py

示例11: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import sqrt [as 别名]
def forward(self, x, y):
        means = torch.mean(x, dim=(2, 3))
        m = torch.mean(means, dim=-1, keepdim=True)
        v = torch.var(means, dim=-1, keepdim=True)
        means = (means - m) / (torch.sqrt(v + 1e-5))
        h = self.instance_norm(x)

        if self.bias:
            gamma, alpha, beta = self.embed(y).chunk(3, dim=-1)
            h = h + means[..., None, None] * alpha[..., None, None]
            out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1)
        else:
            gamma, alpha = self.embed(y).chunk(2, dim=-1)
            h = h + means[..., None, None] * alpha[..., None, None]
            out = gamma.view(-1, self.num_features, 1, 1) * h
        return out 
开发者ID:wgrathwohl,项目名称:JEM,代码行数:18,代码来源:norms.py

示例12: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import sqrt [as 别名]
def forward(ctx, unknown, known):
        # type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
        r"""
            Find the three nearest neighbors of unknown in known
        Parameters
        ----------
        unknown : torch.Tensor
            (B, n, 3) tensor of known features
        known : torch.Tensor
            (B, m, 3) tensor of unknown features

        Returns
        -------
        dist : torch.Tensor
            (B, n, 3) l2 distance to the three nearest neighbors
        idx : torch.Tensor
            (B, n, 3) index of 3 nearest neighbors
        """
        dist2, idx = _ext.three_nn(unknown, known)

        return torch.sqrt(dist2), idx 
开发者ID:zaiweizhang,项目名称:H3DNet,代码行数:23,代码来源:pointnet2_utils.py

示例13: sample

# 需要导入模块: import torch [as 别名]
# 或者: from torch import sqrt [as 别名]
def sample(verts, faces, num=10000, ret_choice = False):
    dist_uni = torch.distributions.Uniform(torch.tensor([0.0]).cuda(), torch.tensor([1.0]).cuda())
    x1,x2,x3 = torch.split(torch.index_select(verts, 0, faces[:,0]) - torch.index_select(verts, 0, faces[:,1]), 1, dim = 1)
    y1,y2,y3 = torch.split(torch.index_select(verts, 0, faces[:,1]) - torch.index_select(verts, 0, faces[:,2]), 1, dim = 1)
    a = (x2*y3 - x3*y2)**2
    b = (x3*y1 - x1*y3)**2
    c = (x1*y2 - x2*y1)**2
    Areas = torch.sqrt(a+b+c)/2
    Areas = Areas / torch.sum(Areas)
    cat_dist = torch.distributions.Categorical(Areas.view(-1))
    choices = cat_dist.sample_n(num)
    select_faces = faces[choices]
    xs = torch.index_select(verts, 0,select_faces[:,0])
    ys = torch.index_select(verts, 0,select_faces[:,1])
    zs = torch.index_select(verts, 0,select_faces[:,2])
    u = torch.sqrt(dist_uni.sample_n(num))
    v = dist_uni.sample_n(num)
    points = (1- u)*xs + (u*(1-v))*ys + u*v*zs
    if ret_choice:
        return points, choices
    else:
        return points 
开发者ID:nv-tlabs,项目名称:DIB-R,代码行数:24,代码来源:check_chamfer.py

示例14: tforward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import sqrt [as 别名]
def tforward(self, disp, edge=None):
    self.sobel=self.sobel.to(disp.device)

    if edge is not None:
      grad = self.sobel(disp)
      grad = torch.sqrt(grad[:,0:1,...]**2 + grad[:,1:2,...]**2 + 1e-8)
      pdf = (1-edge)/self.b0 * torch.exp(-torch.abs(grad)/self.b0) + \
            edge/self.b1 * torch.exp(-torch.abs(grad)/self.b1)
      val = torch.mean(-torch.log(pdf.clamp(min=1e-4)))
    else:
      # on qifeng's data we don't have ambient info
      # therefore we supress edge everywhere
      grad = self.sobel(disp)
      grad = torch.sqrt(grad[:,0:1,...]**2 + grad[:,1:2,...]**2 + 1e-8)
      grad= torch.clamp(grad, 0, 1.0)
      val = torch.mean(grad)

    return val 
开发者ID:autonomousvision,项目名称:connecting_the_dots,代码行数:20,代码来源:networks.py

示例15: map_roi_levels

# 需要导入模块: import torch [as 别名]
# 或者: from torch import sqrt [as 别名]
def map_roi_levels(self, rois, num_levels):
        """Map rois to corresponding feature levels by scales.

        - scale < finest_scale: level 0
        - finest_scale <= scale < finest_scale * 2: level 1
        - finest_scale * 2 <= scale < finest_scale * 4: level 2
        - scale >= finest_scale * 4: level 3

        Args:
            rois (Tensor): Input RoIs, shape (k, 5).
            num_levels (int): Total level number.

        Returns:
            Tensor: Level index (0-based) of each RoI, shape (k, )
        """
        scale = torch.sqrt(
            (rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1))
        target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
        target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
        return target_lvls 
开发者ID:dingjiansw101,项目名称:AerialDetection,代码行数:22,代码来源:single_level.py


注:本文中的torch.sqrt方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。