本文整理匯總了Python中torch.nn.init.xavier_uniform_方法的典型用法代碼示例。如果您正苦於以下問題:Python init.xavier_uniform_方法的具體用法?Python init.xavier_uniform_怎麽用?Python init.xavier_uniform_使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn.init
的用法示例。
在下文中一共展示了init.xavier_uniform_方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _nn_embed
# 需要導入模塊: from torch.nn import init [as 別名]
# 或者: from torch.nn.init import xavier_uniform_ [as 別名]
def _nn_embed(self, embed_dict, words_dict):
"""
:param embed_dict:
:param words_dict:
"""
print("loading pre_train embedding by nn.Embedding for out of vocabulary.")
embed = nn.Embedding(int(self.words_count), int(self.dim))
init.xavier_uniform_(embed.weight.data)
embeddings = np.array(embed.weight.data)
for word in words_dict:
if word in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
self.exact_count += 1
elif word.lower() in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
self.fuzzy_count += 1
else:
self.oov_count += 1
embeddings[self.padID] = 0
final_embed = torch.from_numpy(embeddings).float()
return final_embed
示例2: __init__
# 需要導入模塊: from torch.nn import init [as 別名]
# 或者: from torch.nn.init import xavier_uniform_ [as 別名]
def __init__(self, n_head, f_in, f_out, attn_dropout, bias=True):
super(MultiHeadGraphAttention, self).__init__()
self.n_head = n_head
self.w = Parameter(torch.Tensor(n_head, f_in, f_out))
self.a_src = Parameter(torch.Tensor(n_head, f_out, 1))
self.a_dst = Parameter(torch.Tensor(n_head, f_out, 1))
self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(attn_dropout)
if bias:
self.bias = Parameter(torch.Tensor(f_out))
init.constant_(self.bias, 0)
else:
self.register_parameter('bias', None)
init.xavier_uniform_(self.w)
init.xavier_uniform_(self.a_src)
init.xavier_uniform_(self.a_dst)
示例3: make_model
# 需要導入模塊: from torch.nn import init [as 別名]
# 或者: from torch.nn.init import xavier_uniform_ [as 別名]
def make_model(src_vocab, tgt_vocab, N=6,
dim_model=512, dim_ff=2048, h=8, dropout=0.1, universal=False):
if universal:
return make_universal_model(src_vocab, tgt_vocab, dim_model, dim_ff, h, dropout)
c = copy.deepcopy
attn = MultiHeadAttention(h, dim_model)
ff = PositionwiseFeedForward(dim_model, dim_ff)
pos_enc = PositionalEncoding(dim_model, dropout)
encoder = Encoder(EncoderLayer(dim_model, c(attn), c(ff), dropout), N)
decoder = Decoder(DecoderLayer(dim_model, c(attn), c(attn), c(ff), dropout), N)
src_embed = Embeddings(src_vocab, dim_model)
tgt_embed = Embeddings(tgt_vocab, dim_model)
generator = Generator(dim_model, tgt_vocab)
model = Transformer(
encoder, decoder, src_embed, tgt_embed, pos_enc, generator, h, dim_model // h)
# xavier init
for p in model.parameters():
if p.dim() > 1:
INIT.xavier_uniform_(p)
return model
示例4: make_universal_model
# 需要導入模塊: from torch.nn import init [as 別名]
# 或者: from torch.nn.init import xavier_uniform_ [as 別名]
def make_universal_model(src_vocab, tgt_vocab, dim_model=512, dim_ff=2048, h=8, dropout=0.1):
c = copy.deepcopy
attn = MultiHeadAttention(h, dim_model)
ff = PositionwiseFeedForward(dim_model, dim_ff)
pos_enc = PositionalEncoding(dim_model, dropout)
time_enc = PositionalEncoding(dim_model, dropout)
encoder = UEncoder(EncoderLayer((dim_model), c(attn), c(ff), dropout))
decoder = UDecoder(DecoderLayer((dim_model), c(attn), c(attn), c(ff), dropout))
src_embed = Embeddings(src_vocab, dim_model)
tgt_embed = Embeddings(tgt_vocab, dim_model)
generator = Generator(dim_model, tgt_vocab)
model = UTransformer(
encoder, decoder, src_embed, tgt_embed, pos_enc, time_enc, generator, h, dim_model // h)
# xavier init
for p in model.parameters():
if p.dim() > 1:
INIT.xavier_uniform_(p)
return model
示例5: __init__
# 需要導入模塊: from torch.nn import init [as 別名]
# 或者: from torch.nn.init import xavier_uniform_ [as 別名]
def __init__(self, embedding_tokens, embedding_features, lstm_features, drop=0.0):
super(TextProcessor, self).__init__()
self.embedding = nn.Embedding(embedding_tokens, embedding_features, padding_idx=0)
self.drop = nn.Dropout(drop)
self.tanh = nn.Tanh()
self.lstm = nn.GRU(input_size=embedding_features,
hidden_size=lstm_features,
num_layers=1)
self.features = lstm_features
self._init_lstm(self.lstm.weight_ih_l0)
self._init_lstm(self.lstm.weight_hh_l0)
self.lstm.bias_ih_l0.data.zero_()
self.lstm.bias_hh_l0.data.zero_()
init.xavier_uniform_(self.embedding.weight)
示例6: init_weights
# 需要導入模塊: from torch.nn import init [as 別名]
# 或者: from torch.nn.init import xavier_uniform_ [as 別名]
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for G''s initialized parameters: %d' % self.param_count)
# Note on this forward function: we pass in a y vector which has
# already been passed through G.shared to enable easy class-wise
# interpolation later. If we passed in the one-hot and then ran it through
# G.shared in this forward function, it would be harder to handle.
# NOTE: The z vs y dichotomy here is for compatibility with not-y
示例7: init_weights
# 需要導入模塊: from torch.nn import init [as 別名]
# 或者: from torch.nn.init import xavier_uniform_ [as 別名]
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for G''s initialized parameters: %d' % self.param_count)
# Note on this forward function: we pass in a y vector which has
# already been passed through G.shared to enable easy class-wise
# interpolation later. If we passed in the one-hot and then ran it through
# G.shared in this forward function, it would be harder to handle.
示例8: initialize_weights
# 需要導入模塊: from torch.nn import init [as 別名]
# 或者: from torch.nn.init import xavier_uniform_ [as 別名]
def initialize_weights(modules, method='xavier'):
for m in modules:
if isinstance(m, nn.Conv2d):
if m.bias is not None:
m.bias.data.zero_()
if method == 'xavier':
init.xavier_uniform_(m.weight)
elif method == 'kaiming':
init.kaiming_uniform_(m.weight)
if isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
m.bias.data.zero_()
if method == 'xavier':
init.xavier_uniform_(m.weight)
elif method == 'kaiming':
init.kaiming_uniform_(m.weight)
示例9: __init__
# 需要導入模塊: from torch.nn import init [as 別名]
# 或者: from torch.nn.init import xavier_uniform_ [as 別名]
def __init__(self, set_encoder, set_decoder, input_encoder=None):
"""
In the auto-encoder setting, don't pass an input_encoder because the target set and mask is
assumed to be the input.
In the general prediction setting, must pass all three.
"""
super().__init__()
self.set_encoder = set_encoder
self.input_encoder = input_encoder
self.set_decoder = set_decoder
for m in self.modules():
if (
isinstance(m, nn.Linear)
or isinstance(m, nn.Conv2d)
or isinstance(m, nn.Conv1d)
):
init.xavier_uniform_(m.weight)
if m.bias is not None:
m.bias.data.zero_()
示例10: init_param
# 需要導入模塊: from torch.nn import init [as 別名]
# 或者: from torch.nn.init import xavier_uniform_ [as 別名]
def init_param(self):
if self.upsample:
init.xavier_uniform_(self.W_branch.data, gain=init.calculate_gain('relu'))
stdv = 1. / math.sqrt(self.out_feature)
self.bias.data.uniform_(-stdv, stdv)
示例11: __init__
# 需要導入模塊: from torch.nn import init [as 別名]
# 或者: from torch.nn.init import xavier_uniform_ [as 別名]
def __init__(self, in_features, out_features, bias=True):
super(BatchGraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
init.constant_(self.bias, 0)
else:
self.register_parameter('bias', None)
init.xavier_uniform_(self.weight)
示例12: _initialize
# 需要導入模塊: from torch.nn import init [as 別名]
# 或者: from torch.nn.init import xavier_uniform_ [as 別名]
def _initialize(self):
init.xavier_uniform_(self.c1.weight.data, math.sqrt(2))
init.xavier_uniform_(self.c2.weight.data, math.sqrt(2))
if self.learnable_sc:
init.xavier_uniform_(self.c_sc.weight.data)
示例13: _initialize
# 需要導入模塊: from torch.nn import init [as 別名]
# 或者: from torch.nn.init import xavier_uniform_ [as 別名]
def _initialize(self):
init.xavier_uniform_(self.l7.weight.data)
optional_l_y = getattr(self, 'l_y', None)
if optional_l_y is not None:
init.xavier_uniform_(optional_l_y.weight.data)
示例14: _initialize
# 需要導入模塊: from torch.nn import init [as 別名]
# 或者: from torch.nn.init import xavier_uniform_ [as 別名]
def _initialize(self):
init.xavier_uniform_(self.l6.weight.data)
optional_l_y = getattr(self, 'l_y', None)
if optional_l_y is not None:
init.xavier_uniform_(optional_l_y.weight.data)
示例15: _initialize
# 需要導入模塊: from torch.nn import init [as 別名]
# 或者: from torch.nn.init import xavier_uniform_ [as 別名]
def _initialize(self):
init.xavier_uniform_(self.c1.weight.tensor, gain=math.sqrt(2))
init.xavier_uniform_(self.c2.weight.tensor, gain=math.sqrt(2))
if self.learnable_sc:
init.xavier_uniform_(self.c_sc.weight.tensor, gain=1)