本文整理汇总了Python中torch.nn.init.orthogonal方法的典型用法代码示例。如果您正苦于以下问题:Python init.orthogonal方法的具体用法?Python init.orthogonal怎么用?Python init.orthogonal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.init
的用法示例。
在下文中一共展示了init.orthogonal方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: weights_init
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import orthogonal [as 别名]
def weights_init(init_type='xavier'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0) and hasattr(m, 'weight'):
if init_type == 'normal':
init.normal(m.weight.data, 0.0, 0.02)
elif init_type == 'xavier':
init.xavier_normal(m.weight.data, gain=math.sqrt(2))
elif init_type == 'kaiming':
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal(m.weight.data, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, "Unsupported initialization: {}".format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant(m.bias.data, 0.0)
elif (classname.find('Norm') == 0):
if hasattr(m, 'weight') and m.weight is not None:
init.constant(m.weight.data, 1.0)
if hasattr(m, 'bias') and m.bias is not None:
init.constant(m.bias.data, 0.0)
return init_fun
示例2: reset_parameters
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import orthogonal [as 别名]
def reset_parameters(self):
"""
Initialize parameters following the way proposed in the paper.
"""
# The input-to-hidden weight matrix is initialized orthogonally.
init.orthogonal(self.weight_ih.data)
# The hidden-to-hidden weight matrix is initialized as an identity
# matrix.
weight_hh_data = torch.eye(self.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 4)
self.weight_hh.data.set_(weight_hh_data)
# The bias is just set to zero vectors.
init.constant(self.bias.data, val=0)
# Initialization of BN parameters.
self.bn_ih.reset_parameters()
self.bn_hh.reset_parameters()
self.bn_c.reset_parameters()
self.bn_ih.bias.data.fill_(0)
self.bn_hh.bias.data.fill_(0)
self.bn_ih.weight.data.fill_(0.1)
self.bn_hh.weight.data.fill_(0.1)
self.bn_c.weight.data.fill_(0.1)
示例3: reset_parameters
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import orthogonal [as 别名]
def reset_parameters(self):
init.orthogonal(self.W_i)
init.orthogonal(self.U_i)
init.orthogonal(self.U_i_p)
init.orthogonal(self.W_f)
init.orthogonal(self.U_f)
init.orthogonal(self.U_f_p)
init.orthogonal(self.W_c)
init.orthogonal(self.U_c)
init.orthogonal(self.U_c_p)
init.orthogonal(self.W_o)
init.orthogonal(self.U_o)
init.orthogonal(self.U_o_p)
self.b_i.data.fill_(0.)
self.b_c.data.fill_(0.)
self.b_o.data.fill_(0.)
# forget bias set to 1.
self.b_f.data.fill_(1.)
self.b_f_p.data.fill_(1.)
示例4: weights_init
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import orthogonal [as 别名]
def weights_init(init_type='xavier'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0) and hasattr(m, 'weight'):
if init_type == 'normal':
init.normal(m.weight.data, 0.0, 0.02)
elif init_type == 'xavier':
init.xavier_normal(m.weight.data, gain=math.sqrt(2))
elif init_type == 'kaiming':
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal(m.weight.data, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, "Unsupported initialization: {}".format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant(m.bias.data, 0.0)
return init_fun
示例5: __init__
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import orthogonal [as 别名]
def __init__(self, vocab_dict, dropout_rate, embed_dim, hidden_dim, bidirectional=True):
super(AOA, self).__init__()
self.vocab_dict = vocab_dict
self.hidden_dim = hidden_dim
self.embed_dim = embed_dim
self.dropout_rate = dropout_rate
self.embedding = nn.Embedding(vocab_dict.size(),
self.embed_dim,
padding_idx=PAD)
self.embedding.weight.data.uniform_(-0.05, 0.05)
input_size = self.embed_dim
self.gru = nn.GRU(input_size, hidden_size=self.hidden_dim, dropout=dropout_rate,
bidirectional=bidirectional, batch_first=True)
for weight in self.gru.parameters():
if len(weight.size()) > 1:
weigth_init.orthogonal(weight.data)
示例6: __init__
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import orthogonal [as 别名]
def __init__(self, vocab_dict, dropout_rate, embed_dim, hidden_dim, bidirectional=True):
super(AoAReader, self).__init__()
self.vocab_dict = vocab_dict
self.hidden_dim = hidden_dim
self.embed_dim = embed_dim
self.dropout_rate = dropout_rate
self.embedding = nn.Embedding(vocab_dict.size(),
self.embed_dim,
padding_idx=Constants.PAD)
self.embedding.weight.data.uniform_(-0.05, 0.05)
input_size = self.embed_dim
self.gru = nn.GRU(input_size, hidden_size=self.hidden_dim, dropout=dropout_rate,
bidirectional=bidirectional, batch_first=True)
# try independent gru
#self.query_gru = nn.GRU(input_size, hidden_size=self.hidden_dim, dropout=dropout_rate,
# bidirectional=bidirectional, batch_first=True)
for weight in self.gru.parameters():
if len(weight.size()) > 1:
weigth_init.orthogonal(weight.data)
示例7: reset_parameters
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import orthogonal [as 别名]
def reset_parameters(self):
"""
Initialize parameters following the way proposed in the paper.
"""
init.orthogonal(self.weight_ih.data)
init.orthogonal(self.alpha_weight_ih.data)
weight_hh_data = torch.eye(self.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 3)
self.weight_hh.data.set_(weight_hh_data)
alpha_weight_hh_data = torch.eye(self.hidden_size)
alpha_weight_hh_data = alpha_weight_hh_data.repeat(1, 1)
self.alpha_weight_hh.data.set_(alpha_weight_hh_data)
# The bias is just set to zero vectors.
if self.use_bias:
init.constant(self.bias.data, val=0)
init.constant(self.alpha_bias.data, val=0)
示例8: reset_parameters
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import orthogonal [as 别名]
def reset_parameters(self):
if self.use_leaf_rnn:
init.kaiming_normal(self.leaf_rnn_cell.weight_ih.data)
init.orthogonal(self.leaf_rnn_cell.weight_hh.data)
init.constant(self.leaf_rnn_cell.bias_ih.data, val=0)
init.constant(self.leaf_rnn_cell.bias_hh.data, val=0)
# Set forget bias to 1
self.leaf_rnn_cell.bias_ih.data.chunk(4)[1].fill_(1)
if self.bidirectional:
init.kaiming_normal(self.leaf_rnn_cell_bw.weight_ih.data)
init.orthogonal(self.leaf_rnn_cell_bw.weight_hh.data)
init.constant(self.leaf_rnn_cell_bw.bias_ih.data, val=0)
init.constant(self.leaf_rnn_cell_bw.bias_hh.data, val=0)
# Set forget bias to 1
self.leaf_rnn_cell_bw.bias_ih.data.chunk(4)[1].fill_(1)
else:
init.kaiming_normal(self.word_linear.weight.data)
init.constant(self.word_linear.bias.data, val=0)
self.treelstm_layer.reset_parameters()
示例9: init_weights
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import orthogonal [as 别名]
def init_weights(net, init_type='normal'):
print('initialization method [%s]' % init_type)
if init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
elif init_type == 'orthogonal':
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
# __init__: load dataset
# __call__: training the CNN defined by CGP list
示例10: init_weights
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import orthogonal [as 别名]
def init_weights(net, init_type='normal'):
print('initialization method [%s]' % init_type)
if init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
elif init_type == 'orthogonal':
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
# __init__: load dataset
# __call__: training the CNN defined by CGP list
示例11: init_weights
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import orthogonal [as 别名]
def init_weights(self):
for name, parameter in itertools.chain(self.gru_f.named_parameters(), self.gru_b.named_parameters()):
if name.startswith('weight'):
init.orthogonal(parameter.data)
elif name.startswith('bias'):
parameter.data.zero_()
else:
raise ValueError('Unknown parameter type: {}'.format(name))
示例12: weights_init_orthogonal
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import orthogonal [as 别名]
def weights_init_orthogonal(m):
classname = m.__class__.__name__
#print(classname)
if classname.find('Conv') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('BatchNorm') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
示例13: init_weights
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import orthogonal [as 别名]
def init_weights(net, init_type='normal'):
#print('initialization method [%s]' % init_type)
if init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
elif init_type == 'orthogonal':
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
示例14: weights_init_orthogonal
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import orthogonal [as 别名]
def weights_init_orthogonal(m):
classname = m.__class__.__name__
print(classname)
if classname.find('Conv') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('BatchNorm2d') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
示例15: init_weights
# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import orthogonal [as 别名]
def init_weights(net, init_type='normal'):
print('initialization method [%s]' % init_type)
if init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
elif init_type == 'orthogonal':
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)