本文整理汇总了Python中torch.nn.functional.upsample方法的典型用法代码示例。如果您正苦于以下问题:Python functional.upsample方法的具体用法?Python functional.upsample怎么用?Python functional.upsample使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.upsample方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _upsample_add
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import upsample [as 别名]
def _upsample_add(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
_,_,H,W = y.size()
return F.upsample(x, size=(H,W), mode='bilinear') + y
示例2: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import upsample [as 别名]
def forward(self, x):
output = self.firstconv(x)
output = self.layer1(output)
output_raw = self.layer2(output)
output = self.layer3(output_raw)
output_skip = self.layer4(output)
output_branch1 = self.branch1(output_skip)
output_branch1 = F.upsample(output_branch1, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear')
output_branch2 = self.branch2(output_skip)
output_branch2 = F.upsample(output_branch2, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear')
output_branch3 = self.branch3(output_skip)
output_branch3 = F.upsample(output_branch3, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear')
output_branch4 = self.branch4(output_skip)
output_branch4 = F.upsample(output_branch4, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear')
output_feature = torch.cat((output_raw, output_skip, output_branch4, output_branch3, output_branch2, output_branch1), 1)
output_feature = self.lastconv(output_feature)
return output_feature
示例3: _forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import upsample [as 别名]
def _forward(self, level, inp):
# Upper branch
up1 = inp
up1 = self._modules['b1_' + str(level)](up1)
# Lower branch
low1 = F.avg_pool2d(inp, 2, stride=2)
low1 = self._modules['b2_' + str(level)](low1)
if level > 1:
low2 = self._forward(level - 1, low1)
else:
low2 = low1
low2 = self._modules['b2_plus_' + str(level)](low2)
low3 = low2
low3 = self._modules['b3_' + str(level)](low3)
up2 = F.upsample(low3, scale_factor=2, mode='nearest')
return up1 + up2
示例4: _concatenation
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import upsample [as 别名]
def _concatenation(self, x, g):
input_size = x.size()
batch_size = input_size[0]
assert batch_size == g.size(0)
# theta => (b, c, t, h, w) -> (b, i_c, t, h, w) -> (b, i_c, thw)
# phi => (b, g_d) -> (b, i_c)
theta_x = self.theta(x)
theta_x_size = theta_x.size()
# g (b, c, t', h', w') -> phi_g (b, i_c, t', h', w')
# Relu(theta_x + phi_g + bias) -> f = (b, i_c, thw) -> (b, i_c, t/s1, h/s2, w/s3)
phi_g = F.upsample(self.phi(g), size=theta_x_size[2:], mode=self.upsample_mode)
f = F.relu(theta_x + phi_g, inplace=True)
# psi^T * f -> (b, psi_i_c, t/s1, h/s2, w/s3)
sigm_psi_f = F.sigmoid(self.psi(f))
# upsample the attentions and multiply
sigm_psi_f = F.upsample(sigm_psi_f, size=input_size[2:], mode=self.upsample_mode)
y = sigm_psi_f.expand_as(x) * x
W_y = self.W(y)
return W_y, sigm_psi_f
示例5: _concatenation_debug
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import upsample [as 别名]
def _concatenation_debug(self, x, g):
input_size = x.size()
batch_size = input_size[0]
assert batch_size == g.size(0)
# theta => (b, c, t, h, w) -> (b, i_c, t, h, w) -> (b, i_c, thw)
# phi => (b, g_d) -> (b, i_c)
theta_x = self.theta(x)
theta_x_size = theta_x.size()
# g (b, c, t', h', w') -> phi_g (b, i_c, t', h', w')
# Relu(theta_x + phi_g + bias) -> f = (b, i_c, thw) -> (b, i_c, t/s1, h/s2, w/s3)
phi_g = F.upsample(self.phi(g), size=theta_x_size[2:], mode=self.upsample_mode)
f = F.softplus(theta_x + phi_g)
# psi^T * f -> (b, psi_i_c, t/s1, h/s2, w/s3)
sigm_psi_f = F.sigmoid(self.psi(f))
# upsample the attentions and multiply
sigm_psi_f = F.upsample(sigm_psi_f, size=input_size[2:], mode=self.upsample_mode)
y = sigm_psi_f.expand_as(x) * x
W_y = self.W(y)
return W_y, sigm_psi_f
示例6: _concatenation_residual
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import upsample [as 别名]
def _concatenation_residual(self, x, g):
input_size = x.size()
batch_size = input_size[0]
assert batch_size == g.size(0)
# theta => (b, c, t, h, w) -> (b, i_c, t, h, w) -> (b, i_c, thw)
# phi => (b, g_d) -> (b, i_c)
theta_x = self.theta(x)
theta_x_size = theta_x.size()
# g (b, c, t', h', w') -> phi_g (b, i_c, t', h', w')
# Relu(theta_x + phi_g + bias) -> f = (b, i_c, thw) -> (b, i_c, t/s1, h/s2, w/s3)
phi_g = F.upsample(self.phi(g), size=theta_x_size[2:], mode=self.upsample_mode)
f = F.relu(theta_x + phi_g, inplace=True)
# psi^T * f -> (b, psi_i_c, t/s1, h/s2, w/s3)
f = self.psi(f).view(batch_size, 1, -1)
sigm_psi_f = F.softmax(f, dim=2).view(batch_size, 1, *theta_x.size()[2:])
# upsample the attentions and multiply
sigm_psi_f = F.upsample(sigm_psi_f, size=input_size[2:], mode=self.upsample_mode)
y = sigm_psi_f.expand_as(x) * x
W_y = self.W(y)
return W_y, sigm_psi_f
示例7: losses_pyramid0
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import upsample [as 别名]
def losses_pyramid0(self, disp_gt, disps, scale_disps, flag_smooth=False):
count = len(scale_disps)
_, _, h, w = disp_gt.shape
loss = 0
for i in range(0, count):
level = scale_disps[i]
weight = self.weight_levels[level]
if(weight <= 0):
continue
if(level > 0):
pred = F.upsample(disps[i], scale_factor=2**level, mode='bilinear')[:, :, :h, :w]
else:
pred = disps[i]
loss = loss + self.lossfun(disp_gt, pred, flag_smooth, factor=1)*weight
return loss
# losses for depthmono/common/Cap_ds_lr
示例8: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import upsample [as 别名]
def forward(self, x):
encoder2, encoder3, encoder4, encoder5 = self.encoders(x)
encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)
center = self.center(encoder5)
dec5 = self.dec5(center, encoder5)
dec4 = self.dec4(dec5, encoder4)
dec3 = self.dec3(dec4, encoder3)
dec2 = self.dec2(dec3, encoder2)
dec1 = self.dec1(dec2)
if self.use_hypercolumn:
dec1 = torch.cat([dec1,
F.upsample(dec2, scale_factor=2, mode='bilinear'),
F.upsample(dec3, scale_factor=4, mode='bilinear'),
F.upsample(dec4, scale_factor=8, mode='bilinear'),
F.upsample(dec5, scale_factor=16, mode='bilinear'),
], 1)
return self.final(dec1)
示例9: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import upsample [as 别名]
def forward(self, x, d=None):
encoder2, encoder3, encoder4, encoder5 = self.encoders(x)
encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)
center = self.center(encoder5)
dec5 = self.dec5(center, encoder5)
dec4 = self.dec4(dec5, encoder4)
dec3 = self.dec3(dec4, encoder3)
dec2 = self.dec2(dec3, encoder2)
dec1 = self.dec1(dec2)
if self.use_hypercolumn:
dec1 = torch.cat([dec1,
F.upsample(dec2, scale_factor=2, mode='bilinear'),
F.upsample(dec3, scale_factor=4, mode='bilinear'),
F.upsample(dec4, scale_factor=8, mode='bilinear'),
F.upsample(dec5, scale_factor=16, mode='bilinear'),
], 1)
depth_channel_excitation = self.depth_channel_excitation(dec1, d)
return self.final(depth_channel_excitation)
示例10: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import upsample [as 别名]
def forward(self, x):
encoder2, encoder3, encoder4, encoder5 = self.encoders(x)
encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)
psp = self.psp(encoder5)
up4 = self.up4(psp)
up3 = self.up3(up4)
up2 = self.up2(up3)
up1 = self.up1(up2)
if self.use_hypercolumn:
hypercolumn = torch.cat([up1,
F.upsample(up2, scale_factor=2, mode='bilinear'),
F.upsample(up3, scale_factor=4, mode='bilinear'),
F.upsample(up4, scale_factor=8, mode='bilinear'),
], 1)
drop = F.dropout2d(hypercolumn, p=self.dropout_2d)
else:
drop = F.dropout2d(up4, p=self.dropout_2d)
return self.final(drop)
示例11: _upsample_product
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import upsample [as 别名]
def _upsample_product(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
# Deprecation warning. align_corners=False default in 0.4.0, but in 0.3.0 it was True
# Original code was written in 0.3.1, I guess this is correct.
return y * F.interpolate(
x, size=y.shape[2:], mode="bilinear", align_corners=True)
示例12: _forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import upsample [as 别名]
def _forward(self, level, inp):
# Upper branch
up1 = inp
up1 = self._modules['b1_' + str(level)](up1)
up1 = self.dropout(up1)
# Lower branch
low1 = F.max_pool2d(inp, 2, stride=2)
low1 = self._modules['b2_' + str(level)](low1)
if level > 1:
low2 = self._forward(level - 1, low1)
else:
low2 = low1
low2 = self._modules['b2_plus_' + str(level)](low2)
low3 = low2
low3 = self._modules['b3_' + str(level)](low3)
up1size = up1.size()
rescale_size = (up1size[2], up1size[3])
up2 = F.upsample(low3, size=rescale_size, mode='bilinear')
return up1 + up2
示例13: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import upsample [as 别名]
def __init__(self, in_channel, out_channel, kernel_size=[3, 3],
padding=1, stride=1, n_class=None, bn=True,
activation=F.relu, upsample=True, self_attention=False):
super().__init__()
self.conv = spectral_init(nn.Conv2d(in_channel, out_channel,
kernel_size, stride, padding,
bias=False if bn else True))
self.upsample = upsample
self.activation = activation
self.bn = bn
if bn:
self.norm = ConditionalNorm(out_channel, n_class)
self.self_attention = self_attention
if self_attention:
self.attention = SelfAttention(out_channel, 1)
示例14: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import upsample [as 别名]
def forward(self, input, class_id=None):
out = input
if self.upsample:
out = F.upsample(out, scale_factor=2)
out = self.conv(out)
if self.bn:
out = self.norm(out, class_id)
if self.activation is not None:
out = self.activation(out)
if self.self_attention:
out = self.attention(out)
return out
示例15: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import upsample [as 别名]
def forward(self, x, labels, th=1.0):
x_size = x.size()
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.final(x)
x = F.upsample(x, x_size[2:], mode='bilinear')
if labels is not None:
losses, total_valid_pixel = self.mceloss(x, labels, th=th)
classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat([x], labels, self.num_classes)
# Need to perform this operation for MultiGPU
classwise_pixel_acc = Variable(torch.FloatTensor([classwise_pixel_acc]).cuda())
classwise_gtpixels = Variable(torch.FloatTensor([classwise_gtpixels]).cuda())
classwise_predpixels = Variable(torch.FloatTensor([classwise_predpixels]).cuda())
return x, losses, classwise_pixel_acc, classwise_gtpixels, classwise_predpixels, total_valid_pixel
else:
return x