本文整理汇总了Python中torch.nn.modules.conv._ConvNd方法的典型用法代码示例。如果您正苦于以下问题:Python conv._ConvNd方法的具体用法?Python conv._ConvNd怎么用?Python conv._ConvNd使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.modules.conv
的用法示例。
在下文中一共展示了conv._ConvNd方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: init_module
# 需要导入模块: from torch.nn.modules import conv [as 别名]
# 或者: from torch.nn.modules.conv import _ConvNd [as 别名]
def init_module(module, nonlinearity=None):
"""Initializes pytorch modules
Args:
module (torch.nn.Module): module to initialize
nonlinearity (str, optional): linearity to initialize convolutions for
"""
for m in module.modules():
if isinstance(m, _ConvNd):
nn.init.kaiming_normal_(m.weight.data, mode='fan_out', nonlinearity=nonlinearity)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
示例2: __init__
# 需要导入模块: from torch.nn.modules import conv [as 别名]
# 或者: from torch.nn.modules.conv import _ConvNd [as 别名]
def __init__(self, in_channels, out_channels, kernel_size, pos_fn='softplus', init_method='k', stride=1, padding=0, dilation=1, groups=1, bias=True):
# Call _ConvNd constructor
super(NConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False, 0, groups, bias)
self.eps = 1e-20
self.pos_fn = pos_fn
self.init_method = init_method
# Initialize weights and bias
self.init_parameters()
if self.pos_fn is not None :
EnforcePos.apply(self, 'weight', pos_fn)
示例3: _get_conv
# 需要导入模块: from torch.nn.modules import conv [as 别名]
# 或者: from torch.nn.modules.conv import _ConvNd [as 别名]
def _get_conv():
if TORCH_VERSION == 'parrots':
from parrots.nn.modules.conv import _ConvNd, _ConvTransposeMixin
else:
from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin
return _ConvNd, _ConvTransposeMixin
示例4: init_parameters
# 需要导入模块: from torch.nn.modules import conv [as 别名]
# 或者: from torch.nn.modules.conv import _ConvNd [as 别名]
def init_parameters(net):
for m in net.modules():
if isinstance(m, _ConvNd):
init.kaiming_normal_(m.weight, 0, 'fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
# Kaiming initialisation for linear layers
init.normal_(m.weight, 0, sqrt(2.0 / m.weight.size(0)))
if m.bias is not None:
init.normal_(m.bias, 0, sqrt(2.0 / m.bias.size(0)))
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
if m.bias is not None:
init.constant_(m.bias, 0)
示例5: count_convNd
# 需要导入模块: from torch.nn.modules import conv [as 别名]
# 或者: from torch.nn.modules.conv import _ConvNd [as 别名]
def count_convNd(m: _ConvNd, x: (torch.Tensor,), y: torch.Tensor):
x = x[0]
kernel_ops = torch.zeros(m.weight.size()[2:]).numel() # Kw x Kh
bias_ops = 1 if m.bias is not None else 0
# N x Cout x H x W x (Cin x Kw x Kh + bias)
total_ops = y.nelement() * (m.in_channels // m.groups * kernel_ops + bias_ops)
m.total_ops += torch.DoubleTensor([int(total_ops)])
示例6: count_convNd_ver2
# 需要导入模块: from torch.nn.modules import conv [as 别名]
# 或者: from torch.nn.modules.conv import _ConvNd [as 别名]
def count_convNd_ver2(m: _ConvNd, x: (torch.Tensor,), y: torch.Tensor):
x = x[0]
# N x H x W (exclude Cout)
output_size = torch.zeros((y.size()[:1] + y.size()[2:])).numel()
# Cout x Cin x Kw x Kh
kernel_ops = m.weight.nelement()
if m.bias is not None:
# Cout x 1
kernel_ops += + m.bias.nelement()
# x N x H x W x Cout x (Cin x Kw x Kh + bias)
m.total_ops += torch.DoubleTensor([int(output_size * kernel_ops)])
示例7: init_weights
# 需要导入模块: from torch.nn.modules import conv [as 别名]
# 或者: from torch.nn.modules.conv import _ConvNd [as 别名]
def init_weights(model, conv='kaiming', batchnorm='normal', linear='kaiming', lstm='kaiming'):
"""
:param model: Pytorch Model which is nn.Module
:param conv: 'kaiming' or 'xavier'
:param batchnorm: 'normal' or 'constant'
:param linear: 'kaiming' or 'xavier'
:param lstm: 'kaiming' or 'xavier'
"""
for m in model.modules():
if isinstance(m, (_ConvNd)):
if conv == 'kaiming':
initer.kaiming_normal_(m.weight)
elif conv == 'xavier':
initer.xavier_normal_(m.weight)
else:
raise ValueError("init type of conv error.\n")
if m.bias is not None:
initer.constant_(m.bias, 0)
elif isinstance(m, _BatchNorm):
if batchnorm == 'normal':
initer.normal_(m.weight, 1.0, 0.02)
elif batchnorm == 'constant':
initer.constant_(m.weight, 1.0)
else:
raise ValueError("init type of batchnorm error.\n")
initer.constant_(m.bias, 0.0)
elif isinstance(m, nn.Linear):
if linear == 'kaiming':
initer.kaiming_normal_(m.weight)
elif linear == 'xavier':
initer.xavier_normal_(m.weight)
else:
raise ValueError("init type of linear error.\n")
if m.bias is not None:
initer.constant_(m.bias, 0)
elif isinstance(m, nn.LSTM):
for name, param in m.named_parameters():
if 'weight' in name:
if lstm == 'kaiming':
initer.kaiming_normal_(param)
elif lstm == 'xavier':
initer.xavier_normal_(param)
else:
raise ValueError("init type of lstm error.\n")
elif 'bias' in name:
initer.constant_(param, 0)