本文整理汇总了Python中torch.nn.init.normal方法的典型用法代码示例。如果您正苦于以下问题:Python init.normal方法的具体用法?Python init.normal怎么用?Python init.normal使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.init
的用法示例。
在下文中一共展示了init.normal方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: define_D
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import normal [as 别名]
def define_D(input_nc, ndf, which_model_netD,
n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', gpu_ids=[]):
netD = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
if which_model_netD == 'basic':
netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif which_model_netD == 'n_layers':
netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' %
which_model_netD)
if use_gpu:
netD.cuda(gpu_ids[0])
init_weights(netD, init_type=init_type)
return netD
示例2: reset_parameters
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import normal [as 别名]
def reset_parameters(self,
init_shared=lambda x: normal(x, std=0.1),
init_importance=lambda x: normal(x, std=0.0005)):
"""Resets the trainable parameters."""
def set_constant_row(parameters, iRow=0, value=0):
"""Return `parameters` with row `iRow` as s constant `value`."""
data = parameters.data
data[iRow, :] = value
return torch.nn.Parameter(data, requires_grad=parameters.requires_grad)
np.random.seed(self.seed)
if self.seed is not None:
torch.manual_seed(self.seed)
self.shared_embeddings.weight = init_shared(self.shared_embeddings.weight)
self.importance_weights.weight = init_importance(self.importance_weights.weight)
if self.padding_idx is not None:
# Unfortunately has to set weight to 0 even when paddingIdx = 0
self.shared_embeddings.weight = set_constant_row(self.shared_embeddings.weight)
self.importance_weights.weight = set_constant_row(self.importance_weights.weight)
self.shared_embeddings.weight.requires_grad = self.train_sharedEmbed
self.importance_weights.weight.requires_grad = self.train_weight
示例3: define_D
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import normal [as 别名]
def define_D(input_nc, ndf, which_model_netD,
n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', gpu_ids=[]):
netD = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
if which_model_netD == 'basic':
netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif which_model_netD == 'n_layers':
netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif which_model_netD == 'pixel':
netD = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' %
which_model_netD)
if use_gpu:
netD.cuda(gpu_ids[0])
init_weights(netD, init_type=init_type)
return netD
示例4: init_params
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import normal [as 别名]
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
#_, term_width = os.popen('stty size', 'r').read().split()
# term_width = int(term_width)
示例5: __init__
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import normal [as 别名]
def __init__(self, dim, dropout=0.2, slope=0.0):
super(SDAE, self).__init__()
self.in_dim = dim[0]
self.nlayers = len(dim)-1
self.reluslope = slope
self.enc, self.dec = [], []
for i in range(self.nlayers):
self.enc.append(nn.Linear(dim[i], dim[i+1]))
setattr(self, 'enc_{}'.format(i), self.enc[-1])
self.dec.append(nn.Linear(dim[i+1], dim[i]))
setattr(self, 'dec_{}'.format(i), self.dec[-1])
self.base = []
for i in range(self.nlayers):
self.base.append(nn.Sequential(*self.enc[:i]))
self.dropmodule1 = nn.Dropout(p=dropout)
self.dropmodule2 = nn.Dropout(p=dropout)
self.loss = nn.MSELoss(size_average=True)
# initialization
for m in self.modules():
if isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-2)
if m.bias.data is not None:
init.constant(m.bias, 0)
示例6: __init__
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import normal [as 别名]
def __init__(self, dim, slope=0.0):
super(extractSDAE, self).__init__()
self.in_dim = dim[0]
self.nlayers = len(dim)-1
self.reluslope = slope
self.enc, self.dec = [], []
for i in range(self.nlayers):
self.enc.append(nn.Linear(dim[i], dim[i+1]))
setattr(self, 'enc_{}'.format(i), self.enc[-1])
self.dec.append(nn.Linear(dim[i+1], dim[i]))
setattr(self, 'dec_{}'.format(i), self.dec[-1])
self.base = []
for i in range(self.nlayers):
self.base.append(nn.Sequential(*self.enc[:i]))
# initialization
for m in self.modules():
if isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-2)
if m.bias.data is not None:
init.constant(m.bias, 0)
示例7: __init__
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import normal [as 别名]
def __init__(self, input_nc, ngf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(FeatureExtraction, self).__init__()
downconv = nn.Conv2d(input_nc, ngf, kernel_size=4, stride=2, padding=1)
model = [downconv, nn.ReLU(True), norm_layer(ngf)]
for i in range(n_layers):
in_ngf = 2**i * ngf if 2**i * ngf < 512 else 512
out_ngf = 2**(i+1) * ngf if 2**i * ngf < 512 else 512
downconv = nn.Conv2d(in_ngf, out_ngf, kernel_size=4, stride=2, padding=1)
model += [downconv, nn.ReLU(True)]
model += [norm_layer(out_ngf)]
model += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.ReLU(True)]
model += [norm_layer(512)]
model += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.ReLU(True)]
self.model = nn.Sequential(*model)
init_weights(self.model, init_type='normal')
示例8: weights_init
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import normal [as 别名]
def weights_init(init_type='xavier'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0) and hasattr(m, 'weight'):
if init_type == 'normal':
init.normal(m.weight.data, 0.0, 0.02)
elif init_type == 'xavier':
init.xavier_normal(m.weight.data, gain=math.sqrt(2))
elif init_type == 'kaiming':
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal(m.weight.data, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, "Unsupported initialization: {}".format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant(m.bias.data, 0.0)
elif (classname.find('Norm') == 0):
if hasattr(m, 'weight') and m.weight is not None:
init.constant(m.weight.data, 1.0)
if hasattr(m, 'bias') and m.bias is not None:
init.constant(m.bias.data, 0.0)
return init_fun
示例9: define_G
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import normal [as 别名]
def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, init_type='normal', gpu_ids=[]):
netG = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
if which_model_netG == 'reflrmnetwork':
netG = ReflRmNetwork(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % which_model_netG)
if len(gpu_ids) > 0:
netG.cuda(gpu_ids[0])
init_weights(netG, init_type=init_type)
return netG
示例10: init_params
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import normal [as 别名]
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
示例11: generate_dataset
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import normal [as 别名]
def generate_dataset(true_w, true_b):
num_examples = 1000
features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)
# 真实 label
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
# 添加噪声
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
# 展示下分布
plt.scatter(features[:, 1].numpy(), labels.numpy(), 1)
plt.show()
return features, labels
示例12: init_weights
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import normal [as 别名]
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
init.normal(m.weight.data, 0.0, gain)
if hasattr(m, 'bias') and m.bias is not None:
init.constant(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal(m.weight.data, 1.0, gain)
init.constant(m.bias.data, 0.0)
print('Network initialized with weights sampled from N(0,0.02).')
net.apply(init_func)
示例13: reset_params
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import normal [as 别名]
def reset_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias is not None:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=0.001)
if m.bias is not None:
init.constant(m.bias, 0)
示例14: weights_init_normal
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import normal [as 别名]
def weights_init_normal(m):
classname = m.__class__.__name__
#print(classname)
if classname.find('Conv') != -1:
init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('Linear') != -1:
init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
示例15: weights_init_xavier
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import normal [as 别名]
def weights_init_xavier(m):
classname = m.__class__.__name__
#print(classname)
if classname.find('Conv') != -1:
init.xavier_normal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.xavier_normal(m.weight.data, gain=1)
elif classname.find('BatchNorm') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)