本文整理汇总了Python中torch.nn.init.constant方法的典型用法代码示例。如果您正苦于以下问题:Python init.constant方法的具体用法?Python init.constant怎么用?Python init.constant使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.init
的用法示例。
在下文中一共展示了init.constant方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: conv
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import constant [as 别名]
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, bias=False, transposed=False):
if transposed:
layer = nn.ConvTranspose2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=1, output_padding=1, dilation=dilation, bias=bias)
# Bilinear interpolation init
w = torch.Tensor(kernel_size, kernel_size)
centre = kernel_size % 2 == 1 and stride - 1 or stride - 0.5
for y in range(kernel_size):
for x in range(kernel_size):
w[y, x] = (1 - abs((x - centre) / stride)) * (1 - abs((y - centre) / stride))
layer.weight.data.copy_(w.div(in_planes).repeat(in_planes, out_planes, 1, 1))
else:
padding = (kernel_size + 2 * (dilation - 1)) // 2
layer = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)
if bias:
init.constant(layer.bias, 0)
return layer
# Returns 2D batch normalisation layer
示例2: __init__
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import constant [as 别名]
def __init__(self, num_classes, pretrained_net):
super().__init__()
self.pretrained_net = pretrained_net
self.relu = nn.ReLU(inplace=True)
self.conv5 = conv(512, 256, stride=2, transposed=True)
self.bn5 = bn(256)
self.conv6 = conv(256, 128, stride=2, transposed=True)
self.bn6 = bn(128)
self.conv7 = conv(128, 64, stride=2, transposed=True)
self.bn7 = bn(64)
self.conv8 = conv(64, 64, stride=2, transposed=True)
self.bn8 = bn(64)
self.conv9 = conv(64, 32, stride=2, transposed=True)
self.bn9 = bn(32)
self.conv10 = conv(32, num_classes, kernel_size=7)
init.constant(self.conv10.weight, 0) # Zero init
示例3: __init__
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import constant [as 别名]
def __init__(self, dim, dropout=0.2, slope=0.0):
super(SDAE, self).__init__()
self.in_dim = dim[0]
self.nlayers = len(dim)-1
self.reluslope = slope
self.enc, self.dec = [], []
for i in range(self.nlayers):
self.enc.append(nn.Linear(dim[i], dim[i+1]))
setattr(self, 'enc_{}'.format(i), self.enc[-1])
self.dec.append(nn.Linear(dim[i+1], dim[i]))
setattr(self, 'dec_{}'.format(i), self.dec[-1])
self.base = []
for i in range(self.nlayers):
self.base.append(nn.Sequential(*self.enc[:i]))
self.dropmodule1 = nn.Dropout(p=dropout)
self.dropmodule2 = nn.Dropout(p=dropout)
self.loss = nn.MSELoss(size_average=True)
# initialization
for m in self.modules():
if isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-2)
if m.bias.data is not None:
init.constant(m.bias, 0)
示例4: init_params
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import constant [as 别名]
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
#_, term_width = os.popen('stty size', 'r').read().split()
# term_width = int(term_width)
示例5: reset_parameters
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import constant [as 别名]
def reset_parameters(self):
"""
Initialize parameters following the way proposed in the paper.
"""
# The input-to-hidden weight matrix is initialized orthogonally.
init.orthogonal(self.weight_ih.data)
# The hidden-to-hidden weight matrix is initialized as an identity
# matrix.
weight_hh_data = torch.eye(self.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 4)
self.weight_hh.data.set_(weight_hh_data)
# The bias is just set to zero vectors.
init.constant(self.bias.data, val=0)
# Initialization of BN parameters.
self.bn_ih.reset_parameters()
self.bn_hh.reset_parameters()
self.bn_c.reset_parameters()
self.bn_ih.bias.data.fill_(0)
self.bn_hh.bias.data.fill_(0)
self.bn_ih.weight.data.fill_(0.1)
self.bn_hh.weight.data.fill_(0.1)
self.bn_c.weight.data.fill_(0.1)
示例6: __init__
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import constant [as 别名]
def __init__(self, dim, slope=0.0):
super(extractSDAE, self).__init__()
self.in_dim = dim[0]
self.nlayers = len(dim)-1
self.reluslope = slope
self.enc, self.dec = [], []
for i in range(self.nlayers):
self.enc.append(nn.Linear(dim[i], dim[i+1]))
setattr(self, 'enc_{}'.format(i), self.enc[-1])
self.dec.append(nn.Linear(dim[i+1], dim[i]))
setattr(self, 'dec_{}'.format(i), self.dec[-1])
self.base = []
for i in range(self.nlayers):
self.base.append(nn.Sequential(*self.enc[:i]))
# initialization
for m in self.modules():
if isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-2)
if m.bias.data is not None:
init.constant(m.bias, 0)
示例7: reset_params
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import constant [as 别名]
def reset_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_in')
if m.bias is not None:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.kaiming_normal(m.weight, mode='fan_in')
if m.bias is not None:
init.constant(m.bias, 0)
示例8: weights_init
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import constant [as 别名]
def weights_init(init_type='xavier'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0) and hasattr(m, 'weight'):
if init_type == 'normal':
init.normal(m.weight.data, 0.0, 0.02)
elif init_type == 'xavier':
init.xavier_normal(m.weight.data, gain=math.sqrt(2))
elif init_type == 'kaiming':
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal(m.weight.data, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, "Unsupported initialization: {}".format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant(m.bias.data, 0.0)
elif (classname.find('Norm') == 0):
if hasattr(m, 'weight') and m.weight is not None:
init.constant(m.weight.data, 1.0)
if hasattr(m, 'bias') and m.bias is not None:
init.constant(m.bias.data, 0.0)
return init_fun
示例9: init_params
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import constant [as 别名]
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
示例10: conv
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import constant [as 别名]
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, bias=False,
transposed=False):
"""
Returns 2D convolutional layer with space-preserving padding
"""
if transposed:
layer = nn.ConvTranspose2d(
in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=1, output_padding=1, dilation=dilation, bias=bias)
# Bilinear interpolation init
w = torch.Tensor(kernel_size, kernel_size)
centre = kernel_size % 2 == 1 and stride - 1 or stride - 0.5
for y in range(kernel_size):
for x in range(kernel_size):
w[y, x] = (1 - abs((x - centre) / stride)) * (1 - abs(
(y - centre) / stride))
layer.weight.data.copy_(
w.div(in_planes).repeat(in_planes, out_planes, 1, 1))
else:
padding = (kernel_size + 2 * (dilation - 1)) // 2
layer = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation,
bias=bias)
if bias:
init.constant(layer.bias, 0)
return layer
示例11: conv_init
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import constant [as 别名]
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform(m.weight, gain=np.sqrt(2))
init.constant(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant(m.weight, 1)
init.constant(m.bias, 0)
示例12: init_weights_xavier
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import constant [as 别名]
def init_weights_xavier(model):
if isinstance(model, nn.Conv2d):
init.xavier_normal(model.weight)
init.constant(model.bias, 0)
示例13: init_weights_he
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import constant [as 别名]
def init_weights_he(model):
if isinstance(model, nn.Conv2d):
init.kaiming_normal(model.weight)
init.constant(model.bias, 0)
示例14: reset_parameters
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import constant [as 别名]
def reset_parameters(self):
init.constant(self.weight,self.gamma)
示例15: init_weights
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import constant [as 别名]
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
init.normal(m.weight.data, 0.0, gain)
if hasattr(m, 'bias') and m.bias is not None:
init.constant(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal(m.weight.data, 1.0, gain)
init.constant(m.bias.data, 0.0)
print('Network initialized with weights sampled from N(0,0.02).')
net.apply(init_func)