當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.UpsamplingBilinear2d方法代碼示例

本文整理匯總了Python中torch.nn.UpsamplingBilinear2d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.UpsamplingBilinear2d方法的具體用法?Python nn.UpsamplingBilinear2d怎麽用?Python nn.UpsamplingBilinear2d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.UpsamplingBilinear2d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import UpsamplingBilinear2d [as 別名]
def __init__(self, alignsize = 8, reddim = 32, loadweight = True, model = None, downsample = 4):
        super(crop_model_multi_scale_shared, self).__init__()

        if model == 'shufflenetv2':
            self.Feat_ext = shufflenetv2_base(loadweight,downsample)
            self.DimRed = nn.Conv2d(812, reddim, kernel_size=1, padding=0)
        elif model == 'mobilenetv2':
            self.Feat_ext = mobilenetv2_base(loadweight,downsample)
            self.DimRed = nn.Conv2d(448, reddim, kernel_size=1, padding=0)
        elif model == 'vgg16':
            self.Feat_ext = vgg_base(loadweight,downsample)
            self.DimRed = nn.Conv2d(1536, reddim, kernel_size=1, padding=0)
        elif model == 'resnet50':
            self.Feat_ext = resnet50_base(loadweight,downsample)
            self.DimRed = nn.Conv2d(3584, reddim, kernel_size=1, padding=0)

        self.downsample2 = nn.UpsamplingBilinear2d(scale_factor=1.0/2.0)
        self.upsample2 = nn.UpsamplingBilinear2d(scale_factor=2.0)
        self.RoIAlign = RoIAlignAvg(alignsize, alignsize, 1.0/2**downsample)
        self.RoDAlign = RoDAlignAvg(alignsize, alignsize, 1.0/2**downsample)
        self.FC_layers = fc_layers(reddim*2, alignsize) 
開發者ID:HuiZeng,項目名稱:Grid-Anchor-based-Image-Cropping-Pytorch,代碼行數:23,代碼來源:croppingModel.py

示例2: forward

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import UpsamplingBilinear2d [as 別名]
def forward(self,x):
        input_size = x.size()[2]
        self.interp1 = nn.UpsamplingBilinear2d(size = (  int(input_size*0.75)+1,  int(input_size*0.75)+1  ))
        self.interp2 = nn.UpsamplingBilinear2d(size = (  int(input_size*0.5)+1,   int(input_size*0.5)+1   ))
        self.interp3 = nn.UpsamplingBilinear2d(size = (  outS(input_size),   outS(input_size)   ))

        out = []
        x2 = self.interp1(x)        
        x3 = self.interp2(x)

        out.append(self.Scale(x))                   #1.0x
        out.append(self.interp3(self.Scale(x2)))    #0.75x
        out.append(self.interp3(self.Scale(x3)))    #0.5x
        #out.append(self.Scale(x3))  # for 0.5x scale
        
        x2Out_interp = out[1]
        x3Out_interp = out[2]
        temp1 = torch.max(out[0],x2Out_interp)
        out.append(torch.max(temp1,x3Out_interp))
        return out 
開發者ID:Achilleas,項目名稱:pytorch-mri-segmentation-3D,代碼行數:22,代碼來源:deeplab_resnet_2D.py

示例3: forward

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import UpsamplingBilinear2d [as 別名]
def forward(self,x):
        input_size_1 = x.size()[2]
        input_size_2 = x.size()[3]
        #print(x.size())

        self.interp1 = nn.UpsamplingBilinear2d(size = (int(input_size_1*0.75)+1, int(input_size_2*0.75)+1))
        self.interp2 = nn.UpsamplingBilinear2d(size = (int(input_size_1*0.5)+1, int(input_size_2*0.5)+1))
        self.interp3 = nn.UpsamplingBilinear2d(size = (outS(input_size_1), outS(input_size_2)))

        out = []
        x2 = self.interp1(x)
        x3 = self.interp2(x)
        out.append(self.Scale(x))	# for original scale
        #print(out[0].shape)

        out.append(self.interp3(self.Scale(x2)))	# for 0.75x scale
        #print(out[1].shape)

        out.append(self.interp3(self.Scale(x3)))	# for 0.5x scale

        x2Out_interp = out[1]
        x3Out_interp = out[2]
        temp1 = torch.max(out[0], x2Out_interp)
        out.append(torch.max(temp1, x3Out_interp))
        return out 
開發者ID:omkar13,項目名稱:MaskTrack,代碼行數:27,代碼來源:deeplab_resnet.py

示例4: forward

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import UpsamplingBilinear2d [as 別名]
def forward(self,x):
        input_size = x.size()[2]
	self.interp1 = nn.UpsamplingBilinear2d(size = (  int(input_size*0.75)+1,  int(input_size*0.75)+1  ))
        self.interp2 = nn.UpsamplingBilinear2d(size = (  int(input_size*0.5)+1,   int(input_size*0.5)+1   ))
        self.interp3 = nn.UpsamplingBilinear2d(size = (  outS(input_size),   outS(input_size)   ))
        out = []
        x2 = self.interp1(x)
        x3 = self.interp2(x)
	out.append(self.Scale(x))	# for original scale
	out.append(self.interp3(self.Scale(x2)))	# for 0.75x scale
	out.append(self.Scale(x3))	# for 0.5x scale


        x2Out_interp = out[1]
        x3Out_interp = self.interp3(out[2])
        temp1 = torch.max(out[0],x2Out_interp)
        out.append(torch.max(temp1,x3Out_interp))
	return out 
開發者ID:isht7,項目名稱:pytorch-deeplab-resnet,代碼行數:20,代碼來源:deeplab_resnet.py

示例5: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import UpsamplingBilinear2d [as 別名]
def __init__(self, alignsize = 8, reddim = 32, loadweight = True, model = None, downsample = 4):
        super(crop_model_multi_scale_individual, self).__init__()

        if model == 'shufflenetv2':
            self.Feat_ext1 = shufflenetv2_base(loadweight,downsample)
            self.Feat_ext2 = shufflenetv2_base(loadweight,downsample)
            self.Feat_ext3 = shufflenetv2_base(loadweight,downsample)
            self.DimRed = nn.Conv2d(232, reddim, kernel_size=1, padding=0)
        elif model == 'mobilenetv2':
            self.Feat_ext1 = mobilenetv2_base(loadweight,downsample)
            self.Feat_ext2 = mobilenetv2_base(loadweight,downsample)
            self.Feat_ext3 = mobilenetv2_base(loadweight,downsample)
            self.DimRed = nn.Conv2d(96, reddim, kernel_size=1, padding=0)
        elif model == 'vgg16':
            self.Feat_ext1 = vgg_base(loadweight,downsample)
            self.Feat_ext2 = vgg_base(loadweight,downsample)
            self.Feat_ext3 = vgg_base(loadweight,downsample)
            self.DimRed = nn.Conv2d(512, reddim, kernel_size=1, padding=0)

        self.downsample2 = nn.UpsamplingBilinear2d(scale_factor=1.0/2.0)
        self.upsample2 = nn.UpsamplingBilinear2d(scale_factor=2.0)
        self.RoIAlign = RoIAlignAvg(alignsize, alignsize, 1.0/2**downsample)
        self.RoDAlign = RoDAlignAvg(alignsize, alignsize, 1.0/2**downsample)
        self.FC_layers = fc_layers(reddim*2, alignsize) 
開發者ID:lld533,項目名稱:Grid-Anchor-based-Image-Cropping-Pytorch,代碼行數:26,代碼來源:croppingModel.py

示例6: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import UpsamplingBilinear2d [as 別名]
def __init__(self, inchannel, outchannel, upsampling=False, end=False):
        """
        Reverse Vgg19_bn block
        :param inchannel: input channel
        :param outchannel: output channel
        :param upsampling: judge for adding upsampling module
        :param padding: padding mode: 'zero', 'reflect', by default:'reflect'
        """
        super(ReVggBlock, self).__init__()

        model = []
        model += [nn.ReplicationPad2d(1)]
        model += [nn.Conv2d(inchannel, outchannel, 3)]

        if upsampling:
            model += [nn.UpsamplingBilinear2d(scale_factor=2)]

        if not end:
            model += [nn.LeakyReLU(True), nn.BatchNorm2d(outchannel)]

        self.model = nn.Sequential(*model) 
開發者ID:CM-BF,項目名稱:FeatureFlow,代碼行數:23,代碼來源:model.py

示例7: forward

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import UpsamplingBilinear2d [as 別名]
def forward(self, in_left, in_up):
        """
            Monocular Relative Depth Perception with Web Stereo Data Supervision
            Figure 4
        """
        x_left = self.conv_trans_left(in_left)
        x_left = self.bn_trans_left(x_left)        
        x_left = self.resConv_left(x_left)
        

        x_up = self.conv_trans_up(in_up)
        x_up = self.bn_trans_up(x_up)        

        x = x_left + x_up

        x = self.resConv_down(x)
        x = nn.UpsamplingBilinear2d(scale_factor=2)(x)
                       
        return x 
開發者ID:princeton-vl,項目名稱:YouTube3D,代碼行數:21,代碼來源:ReDWebNet.py

示例8: convert_network_prediction_to_depthmap

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import UpsamplingBilinear2d [as 別名]
def convert_network_prediction_to_depthmap(netpred, cropbox, output_nc=51):

    x1, y1, x2, y2 = cropbox[:4]

    crop_w = (x2 - x1)
    crop_h = (y2 - y1)
    upsampler = nn.UpsamplingBilinear2d(size=(int(crop_h), int(crop_w)))
    final_prediction = upsampler(netpred)
    final_prediction = np.argmax(final_prediction.cpu().data.numpy(), axis=1)[0, :, :]

    estimated_mask = np.zeros_like(final_prediction)
    I, J = (final_prediction > 0).nonzero()
    estimated_mask[I, J] = 1

    bins = np.linspace(-0.5, 0.5, output_nc - 1)
    estimated_depthmap = bins[final_prediction - 1]

    return estimated_depthmap, estimated_mask 
開發者ID:krematas,項目名稱:soccerontable,代碼行數:20,代碼來源:postprocess.py

示例9: select_mask_logistic_loss

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import UpsamplingBilinear2d [as 別名]
def select_mask_logistic_loss(p_m, mask, weight, o_sz=63, g_sz=127):
    weight = weight.view(-1)
    pos = Variable(weight.data.eq(1).nonzero().squeeze())
    if pos.nelement() == 0: return p_m.sum() * 0, p_m.sum() * 0, p_m.sum() * 0, p_m.sum() * 0

    if len(p_m.shape) == 4:
        p_m = p_m.permute(0, 2, 3, 1).contiguous().view(-1, 1, o_sz, o_sz)
        p_m = torch.index_select(p_m, 0, pos)
        p_m = nn.UpsamplingBilinear2d(size=[g_sz, g_sz])(p_m)
        p_m = p_m.view(-1, g_sz * g_sz)
    else:
        p_m = torch.index_select(p_m, 0, pos)

    mask_uf = F.unfold(mask, (g_sz, g_sz), padding=0, stride=8)
    mask_uf = torch.transpose(mask_uf, 1, 2).contiguous().view(-1, g_sz * g_sz)

    mask_uf = torch.index_select(mask_uf, 0, pos)
    loss = F.soft_margin_loss(p_m, mask_uf)
    iou_m, iou_5, iou_7 = iou_measure(p_m, mask_uf)
    return loss, iou_m, iou_5, iou_7 
開發者ID:foolwood,項目名稱:SiamMask,代碼行數:22,代碼來源:siammask_sharp.py

示例10: select_mask_logistic_loss

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import UpsamplingBilinear2d [as 別名]
def select_mask_logistic_loss(p_m, mask, weight, o_sz=63, g_sz=127):
    weight = weight.view(-1)
    pos = Variable(weight.data.eq(1).nonzero().squeeze())
    if pos.nelement() == 0: return p_m.sum() * 0, p_m.sum() * 0, p_m.sum() * 0, p_m.sum() * 0

    p_m = p_m.permute(0, 2, 3, 1).contiguous().view(-1, 1, o_sz, o_sz)
    p_m = torch.index_select(p_m, 0, pos)
    p_m = nn.UpsamplingBilinear2d(size=[g_sz, g_sz])(p_m)
    p_m = p_m.view(-1, g_sz * g_sz)

    mask_uf = F.unfold(mask, (g_sz, g_sz), padding=32, stride=8)
    mask_uf = torch.transpose(mask_uf, 1, 2).contiguous().view(-1, g_sz * g_sz)

    mask_uf = torch.index_select(mask_uf, 0, pos)
    loss = F.soft_margin_loss(p_m, mask_uf)
    iou_m, iou_5, iou_7 = iou_measure(p_m, mask_uf)
    return loss, iou_m, iou_5, iou_7 
開發者ID:foolwood,項目名稱:SiamMask,代碼行數:19,代碼來源:siammask.py

示例11: resize_label_batch

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import UpsamplingBilinear2d [as 別名]
def resize_label_batch(label, size):
    label_resized = np.zeros((size,size,1,label.shape[3]))
    interp = nn.UpsamplingBilinear2d(size=(size, size))
    labelVar = Variable(torch.from_numpy(label.transpose(3, 2, 0, 1)))
    label_resized[:, :, :, :] = interp(labelVar).data.numpy().transpose(2, 3, 1, 0)

    return label_resized 
開發者ID:Achilleas,項目名稱:pytorch-mri-segmentation-3D,代碼行數:9,代碼來源:train_deeplab2D.py

示例12: assureRatio

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import UpsamplingBilinear2d [as 別名]
def assureRatio(img):
    """Ensure imgH <= imgW."""
    b, c, h, w = img.size()
    if h > w:
        main = nn.UpsamplingBilinear2d(size=(h, h), scale_factor=None)
        img = main(img)
    return img 
開發者ID:zzzDavid,項目名稱:ICDAR-2019-SROIE,代碼行數:9,代碼來源:utils.py

示例13: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import UpsamplingBilinear2d [as 別名]
def __init__(self, in_size, out_size, is_deconv):
        super(unetUp, self).__init__()
        self.conv = unetConv2(in_size, out_size, False)
        if is_deconv:
            self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=4, stride=2, padding=1)
        else:
            self.up = nn.UpsamplingBilinear2d(scale_factor=2)

        # initialise the blocks
        for m in self.children():
            if m.__class__.__name__.find('unetConv2') != -1: continue
            init_weights(m, init_type='kaiming') 
開發者ID:ozan-oktay,項目名稱:Attention-Gated-Networks,代碼行數:14,代碼來源:utils.py

示例14: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import UpsamplingBilinear2d [as 別名]
def __init__(self, in_size, out_size, is_deconv):
        super(unetUp, self).__init__()
        self.conv = unetConv2(in_size, out_size, False)
        if is_deconv:
            self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2)
        else:
            self.up = nn.UpsamplingBilinear2d(scale_factor=2) 
開發者ID:zhechen,項目名稱:PLARD,代碼行數:9,代碼來源:utils.py

示例15: forward

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import UpsamplingBilinear2d [as 別名]
def forward(self, input):
        '''
        :param input: Input RGB Image
        :return: down-sampled image (pyramid-based approach)
        '''
        for pool in self.pool:
            input = pool(input)
        return input

#
#
#
# class C3_FineNet(nn.Module):
#     def __init__(self, classes=20):
#         super().__init__()
#
#         self.detail = nn.Sequential(
#             # nn.Conv2d(kernel_size=3, stride=2, padding=1, in_channels=3, out_channels=basic_3,bias=False),
#             CBR(3, basic_3, 3, 2),
#             AdvancedC3(basic_3, basic_3, add=True),
#             nn.BatchNorm2d(basic_3, eps=1e-03),
#         )
#         self.classifier = nn.Sequential(
#                                         nn.PReLU(basic_3),
#                                         nn.UpsamplingBilinear2d(scale_factor=2),
#                                         nn.Conv2d(kernel_size=(1, 1), in_channels=basic_3, out_channels=classes,bias=False),
#                                         )
#
#     def forward(self, input):
#         '''
#         :param input: RGB image
#         :return: transformed feature map
#         '''
#
#         classifier = self.classifier(self.detail(input))
#         return classifier 
開發者ID:clovaai,項目名稱:ext_portrait_segmentation,代碼行數:38,代碼來源:extremeC3.py


注:本文中的torch.nn.UpsamplingBilinear2d方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。