本文整理匯總了Python中torch.nn.ConstantPad2d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.ConstantPad2d方法的具體用法?Python nn.ConstantPad2d怎麽用?Python nn.ConstantPad2d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.ConstantPad2d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad2d [as 別名]
def __init__(self, config_channels, anchors, num_cls, channels=16):
nn.Module.__init__(self)
layers = []
bn = config_channels.config.getboolean('batch_norm', 'enable')
for _ in range(5):
layers.append(Conv2d(config_channels.channels, config_channels(channels, 'layers.%d.conv.weight' % len(layers)), 3, bn=bn, padding=True))
layers.append(nn.MaxPool2d(kernel_size=2))
channels *= 2
layers.append(Conv2d(config_channels.channels, config_channels(channels, 'layers.%d.conv.weight' % len(layers)), 3, bn=bn, padding=True))
layers.append(nn.ConstantPad2d((0, 1, 0, 1), float(np.finfo(np.float32).min)))
layers.append(nn.MaxPool2d(kernel_size=2, stride=1))
channels *= 2
for _ in range(2):
layers.append(Conv2d(config_channels.channels, config_channels(channels, 'layers.%d.conv.weight' % len(layers)), 3, bn=bn, padding=True))
layers.append(Conv2d(config_channels.channels, model.output_channels(len(anchors), num_cls), 1, bn=False, act=False))
self.layers = nn.Sequential(*layers)
self.init()
示例2: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad2d [as 別名]
def __init__(self, in_c, out_c, conv_num=2):
super().__init__()
additional_conv = []
layer_length = 4
for i in range(1, conv_num+1):
additional_conv += [
nn.ConstantPad2d((2, 1, 2, 1), 0),
nn.ConvTranspose2d(out_c, out_c, kernel_size=4, stride=1, padding=3, bias=False),
nn.BatchNorm2d(out_c, eps=0.001, momentum=0.001),
nn.ReLU(inplace=True)
]
self.main = nn.Sequential(
# nn.ConstantPad2d((0, 1, 0, 1), 0),
nn.ConvTranspose2d(in_c, out_c, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(out_c, eps=0.001, momentum=0.001),
nn.ReLU(inplace=True),
*additional_conv
)
示例3: _make_conv_block
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad2d [as 別名]
def _make_conv_block(
cls,
in_channels: int,
out_channels: int,
kernel_size: tuple
) -> nn.Module:
"""Make conv block."""
return nn.Sequential(
nn.ConstantPad2d(
(0, kernel_size[1] - 1, 0, kernel_size[0] - 1), 0
),
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size
),
nn.ReLU()
)
示例4: _make_conv_pool_block
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad2d [as 別名]
def _make_conv_pool_block(
cls,
in_channels: int,
out_channels: int,
kernel_size: tuple,
activation: nn.Module,
pool_size: tuple,
) -> nn.Module:
"""Make conv pool block."""
return nn.Sequential(
# Same padding
nn.ConstantPad2d(
(0, kernel_size[1] - 1, 0, kernel_size[0] - 1), 0
),
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size
),
activation,
nn.MaxPool2d(kernel_size=pool_size)
)
示例5: _make_conv_pool_block
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad2d [as 別名]
def _make_conv_pool_block(
cls,
in_channels: int,
out_channels: int,
kernel_size: tuple,
activation: nn.Module
) -> nn.Module:
"""Make conv pool block."""
return nn.Sequential(
# Same padding
nn.ConstantPad2d(
(0, kernel_size[1] - 1, 0, kernel_size[0] - 1), 0
),
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size
),
activation
)
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad2d [as 別名]
def __init__(self, in_dim, out_dim=None, kernel_size = 3, mask = 'B'):
super(GatedMaskedConv2d, self).__init__()
if out_dim is None:
out_dim = in_dim
self.dim = out_dim
self.size = kernel_size
self.mask = mask
pad = self.size // 2
#vertical stack
self.v_conv = nn.Conv2d(in_dim, 2*self.dim, kernel_size=(pad+1, self.size))
self.v_pad1 = nn.ConstantPad2d((pad, pad, pad, 0), 0)
self.v_pad2 = nn.ConstantPad2d((0, 0, 1, 0), 0)
self.vh_conv = nn.Conv2d(2*self.dim, 2*self.dim, kernel_size = 1)
#horizontal stack
self.h_conv = nn.Conv2d(in_dim, 2*self.dim, kernel_size=(1, pad+1))
self.h_pad1 = nn.ConstantPad2d((self.size // 2, 0, 0, 0), 0)
self.h_pad2 = nn.ConstantPad2d((1, 0, 0, 0), 0)
self.h_conv_res = nn.Conv2d(self.dim, self.dim, 1)
示例7: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad2d [as 別名]
def __init__(self, in_channels, out_channels=1, min_depth=0.5):
"""
Initializes an InvDepth object.
Parameters
----------
in_channels : int
Number of input channels
out_channels : int
Number of output channels
min_depth : float
Minimum depth value to calculate
"""
super().__init__()
self.min_depth = min_depth
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1)
self.pad = nn.ConstantPad2d([1] * 4, value=0)
self.activ = nn.Sigmoid()
示例8: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad2d [as 別名]
def __init__(self, C_in, C_out, stride, affine, track_running_stats):
super(FactorizedReduce, self).__init__()
self.stride = stride
self.C_in = C_in
self.C_out = C_out
self.relu = nn.ReLU(inplace=False)
if stride == 2:
#assert C_out % 2 == 0, 'C_out : {:}'.format(C_out)
C_outs = [C_out // 2, C_out - C_out // 2]
self.convs = nn.ModuleList()
for i in range(2):
self.convs.append( nn.Conv2d(C_in, C_outs[i], 1, stride=stride, padding=0, bias=False) )
self.pad = nn.ConstantPad2d((0, 1, 0, 1), 0)
elif stride == 1:
self.conv = nn.Conv2d(C_in, C_out, 1, stride=stride, padding=0, bias=False)
else:
raise ValueError('Invalid stride : {:}'.format(stride))
self.bn = nn.BatchNorm2d(C_out, affine=affine, track_running_stats=track_running_stats)
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad2d [as 別名]
def __init__(self, C_in, C_out, stride, affine=True):
super(FactorizedReduce, self).__init__()
self.stride = stride
self.C_in = C_in
self.C_out = C_out
self.relu = nn.ReLU(inplace=False)
if stride == 2:
#assert C_out % 2 == 0, 'C_out : {:}'.format(C_out)
C_outs = [C_out // 2, C_out - C_out // 2]
self.convs = nn.ModuleList()
for i in range(2):
self.convs.append( nn.Conv2d(C_in, C_outs[i], 1, stride=stride, padding=0, bias=False) )
self.pad = nn.ConstantPad2d((0, 1, 0, 1), 0)
elif stride == 4:
assert C_out % 4 == 0, 'C_out : {:}'.format(C_out)
self.convs = nn.ModuleList()
for i in range(4):
self.convs.append( nn.Conv2d(C_in, C_out // 4, 1, stride=stride, padding=0, bias=False) )
self.pad = nn.ConstantPad2d((0, 3, 0, 3), 0)
else:
raise ValueError('Invalid stride : {:}'.format(stride))
self.bn = nn.BatchNorm2d(C_out, affine=affine)
示例10: rebuild_features_average
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad2d [as 別名]
def rebuild_features_average(self, features, mask, target_edges):
self.prepare_groups(features, mask)
fe = torch.matmul(features.squeeze(-1), self.groups)
occurrences = torch.sum(self.groups, 0).expand(fe.shape)
fe = fe / occurrences
padding_b = target_edges - fe.shape[1]
if padding_b > 0:
padding_b = ConstantPad2d((0, padding_b, 0, 0), 0)
fe = padding_b(fe)
return fe
示例11: prepare_groups
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad2d [as 別名]
def prepare_groups(self, features, mask):
tensor_mask = torch.from_numpy(mask)
self.groups = torch.clamp(self.groups[tensor_mask, :], 0, 1).transpose_(1, 0)
padding_a = features.shape[1] - self.groups.shape[0]
if padding_a > 0:
padding_a = ConstantPad2d((0, 0, 0, padding_a), 0)
self.groups = padding_a(self.groups)
示例12: pad_groups
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad2d [as 別名]
def pad_groups(self, group, unroll_start):
start, end = group.shape
padding_rows = unroll_start - start
padding_cols = self.unroll_target - end
if padding_rows != 0 or padding_cols !=0:
padding = nn.ConstantPad2d((0, padding_cols, 0, padding_rows), 0)
group = padding(group)
return group
示例13: get_pad_operation
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad2d [as 別名]
def get_pad_operation(self):
if self.op in ['Conv2d']:
lr = (self.dilation[1]) * (self.kernel_size[1] // 2)
hw = (self.dilation[0]) * (self.kernel_size[0] // 2)
self.pad_op = nn.ConstantPad2d((lr, lr, hw, hw), 0)
if self.op in ['Conv3d']:
lr = (self.dilation[2]) * (self.kernel_size[2] // 2)
hw = (self.dilation[1]) * (self.kernel_size[1] // 2)
fb = (self.dilation[0]) * (self.kernel_size[0] // 2) # (front, back) => depth dimension
self.pad_op = nn.ConstantPad3d((lr, lr, hw, hw, fb, fb), 0)
示例14: padding_same_conv2d
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad2d [as 別名]
def padding_same_conv2d(input_size, in_c, out_c, kernel_size=4, stride=1):
output_size = input_size // stride
padding_num = stride * (output_size - 1) - input_size + kernel_size
if padding_num % 2 == 0:
return nn.Sequential(nn.Conv2d(in_c, out_c, kernel_size=kernel_size, stride=stride, padding=padding_num // 2, bias=False))
else:
return nn.Sequential(
nn.ConstantPad2d((padding_num // 2, padding_num // 2 + 1, padding_num // 2, padding_num // 2 + 1), 0),
nn.Conv2d(in_c, out_c, kernel_size=kernel_size, stride=stride, padding=0, bias=False)
)
示例15: _init_modules
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad2d [as 別名]
def _init_modules(self):
vgg = models.vgg16()
if self.pretrained:
print("Loading pretrained weights from %s" %(self.model_path))
state_dict = torch.load(self.model_path)
vgg.load_state_dict({k:v for k,v in state_dict.items() if k in vgg.state_dict()})
vgg.classifier = nn.Sequential(*list(vgg.classifier._modules.values())[:-1])
# not using the last maxpool layer
self.RCNN_base = nn.Sequential(*list(vgg.features._modules.values())[:-1])
# Fix the layers before conv3:
for layer in range(10):
for p in self.RCNN_base[layer].parameters(): p.requires_grad = False
# self.RCNN_base = _RCNN_base(vgg.features, self.classes, self.dout_base_model)
self.RCNN_top = vgg.classifier
# not using the last maxpool layer
self.RCNN_cls_score = nn.Linear(4096, self.n_classes)
# self.stu_feature_adap = nn.Sequential(nn.Conv2d(512, 1024, kernel_size=3, padding=1),
# nn.ReLU(),
# nn.ConstantPad2d((0,1,0,1), 0.))
#
# self.stu_mask_pad = nn.ConstantPad2d((0, 1, 0, 1), 0.)
if self.class_agnostic:
self.RCNN_bbox_pred = nn.Linear(4096, 4)
else:
self.RCNN_bbox_pred = nn.Linear(4096, 4 * self.n_classes)