本文整理匯總了Python中weight_drop.WeightDrop方法的典型用法代碼示例。如果您正苦於以下問題:Python weight_drop.WeightDrop方法的具體用法?Python weight_drop.WeightDrop怎麽用?Python weight_drop.WeightDrop使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類weight_drop
的用法示例。
在下文中一共展示了weight_drop.WeightDrop方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: import weight_drop [as 別名]
# 或者: from weight_drop import WeightDrop [as 別名]
def __init__(self, module, weights, dropout=0, variational=False):
super(WeightDrop, self).__init__()
self.module = module
self.weights = weights
self.dropout = dropout
self.variational = variational
self._setup()
示例2: __init__
# 需要導入模塊: import weight_drop [as 別名]
# 或者: from weight_drop import WeightDrop [as 別名]
def __init__(self, ntoken, ninp, dropout=0.5, dropouti=0.5, dropoute=0.1, wdrop=0, tie_weights=False):
super(RNNModel, self).__init__()
self.idrop = fixMaskDropout(dropouti)
self.drop = fixMaskDropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp, padding_idx=0)
self.embedded_dropout = fixMaskEmbeddedDropout(self.encoder, dropoute)
self.lstm = WeightDrop(torch.nn.LSTM(ninp, ninp), ['weight_hh_l0'], dropout=wdrop)
self.decoder = nn.Linear(ninp, ntoken)
self.decoder.weight = self.encoder.weight_raw
self.init_weights()
self.ninp = ninp
self.dropoute = dropoute
示例3: __init__
# 需要導入模塊: import weight_drop [as 別名]
# 或者: from weight_drop import WeightDrop [as 別名]
def __init__(self, module, weights, dropout=0):
super(WeightDrop, self).__init__()
self.module = module
self.weights = weights
self.dropout = dropout
self._setup()
示例4: __init__
# 需要導入模塊: import weight_drop [as 別名]
# 或者: from weight_drop import WeightDrop [as 別名]
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, dropouth=0.5, dropouti=0.5, dropoute=0.1, wdrop=0, tie_weights=False):
super(RNNModel, self).__init__()
self.lockdrop = LockedDropout()
self.idrop = nn.Dropout(dropouti)
self.hdrop = nn.Dropout(dropouth)
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
assert rnn_type in ['LSTM', 'QRNN', 'GRU'], 'RNN type is not supported'
if rnn_type == 'LSTM':
self.rnns = [torch.nn.LSTM(ninp if l == 0 else nhid, nhid if l != nlayers - 1 else (ninp if tie_weights else nhid), 1, dropout=0) for l in range(nlayers)]
if wdrop:
self.rnns = [WeightDrop(rnn, ['weight_hh_l0'], dropout=wdrop) for rnn in self.rnns]
if rnn_type == 'GRU':
self.rnns = [torch.nn.GRU(ninp if l == 0 else nhid, nhid if l != nlayers - 1 else ninp, 1, dropout=0) for l in range(nlayers)]
if wdrop:
self.rnns = [WeightDrop(rnn, ['weight_hh_l0'], dropout=wdrop) for rnn in self.rnns]
elif rnn_type == 'QRNN':
from torchqrnn import QRNNLayer
self.rnns = [QRNNLayer(input_size=ninp if l == 0 else nhid, hidden_size=nhid if l != nlayers - 1 else (ninp if tie_weights else nhid), save_prev_x=True, zoneout=0, window=2 if l == 0 else 1, output_gate=True) for l in range(nlayers)]
for rnn in self.rnns:
rnn.linear = WeightDrop(rnn.linear, ['weight'], dropout=wdrop)
print(self.rnns)
self.rnns = torch.nn.ModuleList(self.rnns)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
#if nhid != ninp:
# raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.ninp = ninp
self.nhid = nhid
self.nlayers = nlayers
self.dropout = dropout
self.dropouti = dropouti
self.dropouth = dropouth
self.dropoute = dropoute
self.tie_weights = tie_weights
示例5: __init__
# 需要導入模塊: import weight_drop [as 別名]
# 或者: from weight_drop import WeightDrop [as 別名]
def __init__(self, rnn_type, ntoken, ninp, nhid, nhidlast, nlayers,
dropout=0.5, dropouth=0.5, dropouti=0.5, dropoute=0.1, wdrop=0,
tie_weights=False, ldropout=0.5, n_experts=10):
super(RNNModel, self).__init__()
self.use_dropout = True
self.lockdrop = LockedDropout()
self.encoder = nn.Embedding(ntoken, ninp)
self.rnns = [torch.nn.LSTM(ninp if l == 0 else nhid, nhid if l != nlayers - 1 else nhidlast, 1, dropout=0) for l in range(nlayers)]
if wdrop:
self.rnns = [WeightDrop(rnn, ['weight_hh_l0'], dropout=wdrop if self.use_dropout else 0) for rnn in self.rnns]
self.rnns = torch.nn.ModuleList(self.rnns)
self.prior = nn.Linear(nhidlast, n_experts, bias=False)
self.latent = nn.Sequential(nn.Linear(nhidlast, n_experts*ninp), nn.Tanh())
self.decoder = nn.Linear(ninp, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
#if nhid != ninp:
# raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.ninp = ninp
self.nhid = nhid
self.nhidlast = nhidlast
self.nlayers = nlayers
self.dropout = dropout
self.dropouti = dropouti
self.dropouth = dropouth
self.dropoute = dropoute
self.ldropout = ldropout
self.dropoutl = ldropout
self.n_experts = n_experts
self.ntoken = ntoken
size = 0
for p in self.parameters():
size += p.nelement()
print('param size: {}'.format(size))