本文整理汇总了Python中torch.nn.init.xavier_normal_方法的典型用法代码示例。如果您正苦于以下问题:Python init.xavier_normal_方法的具体用法?Python init.xavier_normal_怎么用?Python init.xavier_normal_使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.init
的用法示例。
在下文中一共展示了init.xavier_normal_方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import xavier_normal_ [as 别名]
def __init__(self, batch=32, bidirectional=True):
super(BLSTM, self).__init__()
self.bidirectional = bidirectional
self.hidden = self.init_hidden(batch)
self.lstm = nn.LSTM(257, 50, num_layers=2, bidirectional=True)
self.fc = nn.Linear(50*2,1)
## Weights initialization
def _weights_init(m):
if isinstance(m, nn.Conv2d or nn.Linear or nn.GRU or nn.LSTM):
init.xavier_normal_(m.weight)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d or nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.apply(_weights_init)
示例2: __init__
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import xavier_normal_ [as 别名]
def __init__(self, dims, classifier_net=None):
[x_dim, self.y_dim, z_dim, h_dim] = dims
super(DeepGenerativeModel, self).__init__([x_dim, z_dim, h_dim])
self.encoder = Encoder([x_dim + self.y_dim, h_dim, z_dim])
self.decoder = Decoder([z_dim + self.y_dim, list(reversed(h_dim)), x_dim])
if classifier_net is None:
self.classifier = Classifier(net=None, dims=[x_dim, h_dim[0], self.y_dim])
else:
self.classifier = Classifier(classifier_net)
# Init linear layers
for m in self.modules():
if isinstance(m, nn.Linear):
init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
示例3: __init__
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import xavier_normal_ [as 别名]
def __init__(self, dims):
super(VariationalAutoencoder, self).__init__()
[x_dim, z_dim, h_dim] = dims
self.z_dim = z_dim
self.flow = None
self.encoder = Encoder([x_dim, h_dim, z_dim])
self.decoder = Decoder([z_dim, list(reversed(h_dim)), x_dim])
self.kl_divergence = 0
# Init linear layers
for m in self.modules():
if isinstance(m, nn.Linear):
init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
示例4: init_weights
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import xavier_normal_ [as 别名]
def init_weights(net, init_type, init_gain):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
net.apply(init_func)
示例5: weights_init
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import xavier_normal_ [as 别名]
def weights_init(m):
'''
Code from https://gist.github.com/jeasinema/ed9236ce743c8efaf30fa2ff732749f5
Usage:
model = Model()
model.apply(weight_init)
'''
if isinstance(m, nn.Linear):
init.xavier_normal_(m.weight.data)
init.normal_(m.bias.data)
elif isinstance(m, nn.GRUCell):
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
示例6: init_splitted
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import xavier_normal_ [as 别名]
def init_splitted(layer, nb_clusters, sz_embedding):
# initialize splitted embedding parts separately
from math import ceil
for c in range(nb_clusters):
i = torch.arange(
c * ceil(sz_embedding / nb_clusters),
# cut off remaining indices, e.g. if > embedding size
min(
(c + 1) * ceil(
sz_embedding / nb_clusters
),
sz_embedding
)
).long()
_layer = torch.nn.Linear(layer.weight.shape[1], len(i))
layer.weight.data[i] = xavier_normal_(_layer.weight.data, gain = 1)
layer.bias.data[i] = _layer.bias.data
示例7: init_weights
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import xavier_normal_ [as 别名]
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
# this will apply to each layer
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('conv')!=-1 or classname.find('Linear')!=-1):
if init_type=='normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')#good for relu
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
#print('initialize network with %s' % init_type)
net.apply(init_func)
示例8: __init__
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import xavier_normal_ [as 别名]
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1,
use_residual=True):
super(MultiHeadAttention, self).__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Parameter(torch.FloatTensor(n_head, d_model, d_k))
self.w_ks = nn.Parameter(torch.FloatTensor(n_head, d_model, d_k))
self.w_vs = nn.Parameter(torch.FloatTensor(n_head, d_model, d_v))
self.attention = ScaledDotProductAttention(d_model)
self.layer_norm = LayerNormalization(d_model)
self.proj = BottleLinear(n_head * d_v, d_model)
self.dropout = nn.Dropout(dropout)
self.use_residual = use_residual
init.xavier_normal_(self.w_qs)
init.xavier_normal_(self.w_ks)
init.xavier_normal_(self.w_vs)
示例9: init_weights
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import xavier_normal_ [as 别名]
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func)
示例10: __init__
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import xavier_normal_ [as 别名]
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super(MultiHeadAttention, self).__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Parameter(torch.FloatTensor(n_head, d_model, d_k))
self.w_ks = nn.Parameter(torch.FloatTensor(n_head, d_model, d_k))
self.w_vs = nn.Parameter(torch.FloatTensor(n_head, d_model, d_v))
self.attention = ScaledDotProductAttention(d_model)
self.layer_norm = LayerNormalization(d_model)
self.proj = nn.Linear(n_head*d_v, d_model)
init.xavier_normal_(self.proj.weight)
self.dropout = nn.Dropout(dropout)
init.xavier_normal_(self.w_qs)
init.xavier_normal_(self.w_ks)
init.xavier_normal_(self.w_vs)
示例11: init_weights
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import xavier_normal_ [as 别名]
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func)
示例12: init_weights
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import xavier_normal_ [as 别名]
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
# this will apply to each layer
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('conv')!=-1 or classname.find('Linear')!=-1):
if init_type=='normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')#good for relu
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
#print('initialize network with %s' % init_type)
net.apply(init_func)
############################################
# save checkpoint and resume
############################################
示例13: initialize_weight
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import xavier_normal_ [as 别名]
def initialize_weight(self):
for m in self.modules():
#print('need check init')
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight)
#init.normal_(m.weight, std = 0.01)
if m.bias is not None:
init.constant_(m.bias, 0.0)
else:
try:init.constant_(m.weight,0.0)
except:pass
示例14: initilization
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import xavier_normal_ [as 别名]
def initilization(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight)
#init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0.0)
else:
try:init.constant_(m.weight,0.0)
except:pass
示例15: setup_layers
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import xavier_normal_ [as 别名]
def setup_layers(self):
"""
Adding Base Layers, Deep Signed GraphSAGE layers.
Assing Regression Parameters if the model is not a single layer model.
"""
self.nodes = range(self.X.shape[0])
self.neurons = self.args.layers
self.layers = len(self.neurons)
self.positive_base_aggregator = SignedSAGEConvolutionBase(self.X.shape[1]*2,
self.neurons[0]).to(self.device)
self.negative_base_aggregator = SignedSAGEConvolutionBase(self.X.shape[1]*2,
self.neurons[0]).to(self.device)
self.positive_aggregators = []
self.negative_aggregators = []
for i in range(1, self.layers):
self.positive_aggregators.append(SignedSAGEConvolutionDeep(3*self.neurons[i-1],
self.neurons[i]).to(self.device))
self.negative_aggregators.append(SignedSAGEConvolutionDeep(3*self.neurons[i-1],
self.neurons[i]).to(self.device))
self.positive_aggregators = ListModule(*self.positive_aggregators)
self.negative_aggregators = ListModule(*self.negative_aggregators)
self.regression_weights = Parameter(torch.Tensor(4*self.neurons[-1], 3))
init.xavier_normal_(self.regression_weights)