本文整理汇总了Python中torch.nn.functional.pad方法的典型用法代码示例。如果您正苦于以下问题:Python functional.pad方法的具体用法?Python functional.pad怎么用?Python functional.pad使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.pad方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pad [as 别名]
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX//2,
diffY // 2, diffY - diffY//2))
# for padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
示例2: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pad [as 别名]
def forward(self, x, offset):
# To fix an assert error in deform_conv_cuda.cpp:128
# input image is smaller than kernel
input_pad = (
x.size(2) < self.kernel_size[0] or x.size(3) < self.kernel_size[1])
if input_pad:
pad_h = max(self.kernel_size[0] - x.size(2), 0)
pad_w = max(self.kernel_size[1] - x.size(3), 0)
x = F.pad(x, (0, pad_w, 0, pad_h), 'constant', 0).contiguous()
offset = F.pad(offset, (0, pad_w, 0, pad_h), 'constant',
0).contiguous()
out = deform_conv(x, offset, self.weight, self.stride, self.padding,
self.dilation, self.groups, self.deformable_groups)
if input_pad:
out = out[:, :, :out.size(2) - pad_h, :out.size(3) -
pad_w].contiguous()
return out
示例3: max_pool1d
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pad [as 别名]
def max_pool1d(inputs, kernel_size, stride=1, padding='same'):
'''
inputs: [N, T, C]
outputs: [N, T // stride, C]
'''
inputs = inputs.transpose(1, 2) # [N, C, T]
if padding == 'same':
left = (kernel_size - 1) // 2
right = (kernel_size - 1) - left
pad = (left, right)
else:
pad = (0, 0)
inputs = F.pad(inputs, pad)
outputs = F.max_pool1d(inputs, kernel_size, stride) # [N, C, T]
outputs = outputs.transpose(1, 2) # [N, T, C]
return outputs
示例4: pad_image
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pad [as 别名]
def pad_image(image, target_size):
"""
Parameters
----------
image : numpy.ndarray
shape [batch_size, c, h, w]
target_size : tuple or list
Description
-----------
Pad an image up to the target size.
"""
rows_missing = target_size[0] - image.shape[2]
cols_missing = target_size[1] - image.shape[3]
padded_img = F.pad(image, (0, cols_missing, 0, rows_missing), 'constant')
return padded_img
示例5: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pad [as 别名]
def __init__(self,in_dim,out_dim,stride=1,op="A"):
super(BasicBlock,self).__init__()
self.subconv_1 = nn.Sequential(
nn.Conv2d(in_dim,out_dim,3,stride,1,bias=False),
nn.BatchNorm2d(out_dim),
nn.ReLU(inplace=True),)
self.subconv_2 = nn.Sequential(
nn.Conv2d(out_dim,out_dim,3,1,1,bias=False),
nn.BatchNorm2d(out_dim))
if in_dim == out_dim and stride == 1:
self.downsample = nn.Sequential()
elif op == 'A':
self.downsample =LambdaLayer(lambda x: F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, out_dim//4, out_dim//4), "constant", 0))
elif op == 'B':
self.downsample = nn.Sequential(
nn.Conv2d(in_dim,out_dim,1,stride,0,bias=False),
nn.BatchNorm2d(out_dim),
)
else: raise ValueError
示例6: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pad [as 别名]
def __init__(self,in_dim,out_dim,stride=1,op="A"):
super(BasicBlock,self).__init__()
self.subconv_1 = nn.Sequential(
nn.Conv2d(in_dim,out_dim,3,stride,1,bias=False),
nn.BatchNorm2d(out_dim),
nn.ReLU(inplace=True),)
self.subconv_2 = nn.Sequential(
nn.Conv2d(out_dim,out_dim,3,1,1,bias=False),
nn.BatchNorm2d(out_dim))
if in_dim == out_dim and stride == 1:
self.downsample = nn.Sequential()
elif op == 'A':
self.downsample =LambdaLayer(lambda x: F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, out_dim//4, out_dim//4), "constant", 0))
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_dim,out_dim,1,stride,0,bias=False),
nn.BatchNorm2d(out_dim),
)
示例7: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pad [as 别名]
def forward(self, x):
# compute 'same' padding
(batch, channel, t, h, w) = x.size()
#print t,h,w
out_t = np.ceil(float(t) / float(self.stride[0]))
out_h = np.ceil(float(h) / float(self.stride[1]))
out_w = np.ceil(float(w) / float(self.stride[2]))
#print out_t, out_h, out_w
pad_t = self.compute_pad(0, t)
pad_h = self.compute_pad(1, h)
pad_w = self.compute_pad(2, w)
#print pad_t, pad_h, pad_w
pad_t_f = pad_t // 2
pad_t_b = pad_t - pad_t_f
pad_h_f = pad_h // 2
pad_h_b = pad_h - pad_h_f
pad_w_f = pad_w // 2
pad_w_b = pad_w - pad_w_f
pad = (pad_w_f, pad_w_b, pad_h_f, pad_h_b, pad_t_f, pad_t_b)
#print x.size()
#print pad
x = F.pad(x, pad)
return super(MaxPool3dSamePadding, self).forward(x)
示例8: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pad [as 别名]
def forward(self, x):
# compute 'same' padding
(batch, channel, t, h, w) = x.size()
# print t,h,w
out_t = np.ceil(float(t) / float(self.stride[0]))
out_h = np.ceil(float(h) / float(self.stride[1]))
out_w = np.ceil(float(w) / float(self.stride[2]))
# print out_t, out_h, out_w
pad_t = self.compute_pad(0, t)
pad_h = self.compute_pad(1, h)
pad_w = self.compute_pad(2, w)
# print pad_t, pad_h, pad_w
pad_t_f = pad_t // 2
pad_t_b = pad_t - pad_t_f
pad_h_f = pad_h // 2
pad_h_b = pad_h - pad_h_f
pad_w_f = pad_w // 2
pad_w_b = pad_w - pad_w_f
pad = (pad_w_f, pad_w_b, pad_h_f, pad_h_b, pad_t_f, pad_t_b)
# print x.size()
# print pad
x = F.pad(x, pad)
return super(MaxPool3dSamePadding, self).forward(x)
示例9: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pad [as 别名]
def __init__(self, in_channels, output_channels, kernel_shape=(1, 1, 1), stride=(1, 1, 1), padding=0, activation_fn=F.relu, use_batch_norm=True, use_bias=False, name='unit_3d'):
"""Initializes Unit3D module."""
super(Unit3D, self).__init__()
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
self._use_batch_norm = use_batch_norm
self._activation_fn = activation_fn
self._use_bias = use_bias
self.name = name
self.padding = padding
# we always want padding to be 0 here. We will dynamically pad based on input size in forward function
self.conv3d = nn.Conv3d(in_channels=in_channels, out_channels=self._output_channels, kernel_size=self._kernel_shape, stride=self._stride, padding=0, bias=self._use_bias)
if self._use_batch_norm:
self.bn = nn.BatchNorm3d(self._output_channels, eps=0.001, momentum=0.01)
示例10: padding3d
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pad [as 别名]
def padding3d(tensor, filter, mode=str('constant')):
"""
Input shape (BN, C, T, H, W)
"""
it, ih, iw = tensor.shape[2:]
ft, fh, fw = filter.shape
pt = max(0, (it - 1) + (ft - 1) + 1 - it)
ph = max(0, (ih - 1) + (fh - 1) + 1 - ih)
pw = max(0, (iw - 1) + (fw - 1) + 1 - iw)
oddt = (pt % 2 != 0)
oddh = (ph % 2 != 0)
oddw = (pw % 2 != 0)
if any([oddt, oddh, oddw]):
pad = [0, int(oddt), 0, int(oddh), 0, int(oddw)]
tensor = F.pad(tensor, pad, mode=mode)
padding = (pt // 2, ph // 2, pw // 2)
tensor = F.conv3d(tensor, filter, padding=padding)
return tensor
示例11: test
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pad [as 别名]
def test(imgL,imgR,disp_true):
model.eval()
if args.cuda:
imgL, imgR, disp_true = imgL.cuda(), imgR.cuda(), disp_true.cuda()
#---------
mask = disp_true < 192
#----
if imgL.shape[2] % 16 != 0:
times = imgL.shape[2]//16
top_pad = (times+1)*16 -imgL.shape[2]
else:
top_pad = 0
if imgL.shape[3] % 16 != 0:
times = imgL.shape[3]//16
right_pad = (times+1)*16-imgL.shape[3]
else:
right_pad = 0
imgL = F.pad(imgL,(0,right_pad, top_pad,0))
imgR = F.pad(imgR,(0,right_pad, top_pad,0))
with torch.no_grad():
output3 = model(imgL,imgR)
output3 = torch.squeeze(output3)
if top_pad !=0:
img = output3[:,top_pad:,:]
else:
img = output3
if len(disp_true[mask])==0:
loss = 0
else:
loss = F.l1_loss(img[mask],disp_true[mask]) #torch.mean(torch.abs(img[mask]-disp_true[mask])) # end-point-error
return loss.data.cpu()
示例12: stack_pad_tensor
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pad [as 别名]
def stack_pad_tensor(tensor_list):
max_len = max([t.size(0) for t in tensor_list])
for i,tensor in enumerate(tensor_list):
pad_len = max_len - tensor.size(0)
tensor_list[i] = F.pad( tensor, (0,0,0,pad_len) )
return torch.stack(tensor_list, dim=0)
示例13: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pad [as 别名]
def forward(self, x):
# pre-context
avg_x = F.adaptive_avg_pool2d(x, output_size=1)
avg_x = self.pre_context(avg_x)
avg_x = avg_x.expand_as(x)
x = x + avg_x
# switch
avg_x = F.pad(x, pad=(2, 2, 2, 2), mode='reflect')
avg_x = F.avg_pool2d(avg_x, kernel_size=5, stride=1, padding=0)
switch = self.switch(avg_x)
# sac
weight = self._get_weight(self.weight)
if self.use_deform:
offset = self.offset_s(avg_x)
out_s = deform_conv(x, offset, weight, self.stride, self.padding,
self.dilation, self.groups, 1)
else:
out_s = super().conv2d_forward(x, weight)
ori_p = self.padding
ori_d = self.dilation
self.padding = tuple(3 * p for p in self.padding)
self.dilation = tuple(3 * d for d in self.dilation)
weight = weight + self.weight_diff
if self.use_deform:
offset = self.offset_l(avg_x)
out_l = deform_conv(x, offset, weight, self.stride, self.padding,
self.dilation, self.groups, 1)
else:
out_l = super().conv2d_forward(x, weight)
out = switch * out_s + (1 - switch) * out_l
self.padding = ori_p
self.dilation = ori_d
# post-context
avg_x = F.adaptive_avg_pool2d(out, output_size=1)
avg_x = self.post_context(avg_x)
avg_x = avg_x.expand_as(out)
out = out + avg_x
return out
示例14: _output_size
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pad [as 别名]
def _output_size(input, weight, padding, dilation, stride):
channels = weight.size(0)
output_size = (input.size(0), channels)
for d in range(input.dim() - 2):
in_size = input.size(d + 2)
pad = padding[d]
kernel = dilation[d] * (weight.size(d + 2) - 1) + 1
stride_ = stride[d]
output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, )
if not all(map(lambda s: s > 0, output_size)):
raise ValueError('convolution input is too small (output would be '
f'{"x".join(map(str, output_size))})')
return output_size
示例15: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pad [as 别名]
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding='same'):
'''
inputs: [N, T, C_in]
outputs: [N, T, C_out]
'''
super().__init__()
if padding == 'same':
left = (kernel_size - 1) // 2
right = (kernel_size - 1) - left
self.pad = (left, right)
# pad = kernel_size // 2
else:
self.pad = (0, 0)
self.conv1d = nn.Conv1d(in_channels, out_channels, kernel_size, stride)