本文整理汇总了Python中torch.nn.functional.dropout函数的典型用法代码示例。如果您正苦于以下问题:Python dropout函数的具体用法?Python dropout怎么用?Python dropout使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dropout函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
def forward(self, input, format ='index'):
if format == 'onehot':
out = F.dropout(self.Linear(input), self.d, training=self.training)
elif format == 'index':
out = F.dropout(self.word_embed(input), self.d, training=self.training)
return out
示例2: forward
def forward(self, inp, hidden=None, schedule=None, **kwargs):
"""
Parameters:
-----------
inp: torch.Tensor (seq_len x batch_size)
Returns:
--------
outs: torch.Tensor (seq_len * batch_size x vocab)
hidden: see output of RNN, GRU, LSTM in torch.nn
weights: None or list of weights (batch_size x seq_len),
It will only be not None if attention is provided.
"""
inp = word_dropout(
inp, self.target_code, p=self.word_dropout,
reserved_codes=self.reserved_codes, training=self.training)
emb = self.embeddings(inp)
if self.has_dropout:
emb = F.dropout(emb, p=self.dropout, training=self.training)
outs, hidden = self.rnn(emb, hidden or self.init_hidden_for(emb))
if self.has_dropout:
outs = F.dropout(outs, p=self.dropout, training=self.training)
weights = None
if self.add_attn:
outs, weights = self.attn(outs, emb)
seq_len, batch, hid_dim = outs.size()
outs = outs.view(seq_len * batch, hid_dim)
if self.add_deepout:
outs = self.deepout(outs)
outs = F.log_softmax(self.project(outs))
return outs, hidden, weights
示例3: forward
def forward(self, x):
x = F.relu(self.linear1(x))
x = F.dropout(x, 0.8)
x = F.relu(self.linear2(x))
x = F.dropout(x, 0.8)
x = F.log_softmax(self.linear3(x))
return x
示例4: forward
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = F.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1))
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = F.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
示例5: forward
def forward(self, x):
y = F.dropout(F.relu(self.linears[0](x)), self.training)
for layer in self.linears[1:-1]:
y = F.relu(layer(y))
y = F.dropout(y, self.training)
y = F.log_softmax(self.linears[-1](y))
return y
示例6: _forward_unpadded
def _forward_unpadded(self, x, x_mask):
"""Faster encoding that ignores any padding."""
# Transpose batch and sequence dims
x = x.transpose(0, 1)
# Encode all layers
outputs = [x]
for i in range(self.num_layers):
rnn_input = outputs[-1]
# Apply dropout to hidden input
if self.dropout_rate > 0:
rnn_input = F.dropout(rnn_input,
p=self.dropout_rate,
training=self.training)
# Forward
rnn_output = self.rnns[i](rnn_input)[0]
outputs.append(rnn_output)
# Concat hidden layers
if self.concat_layers:
output = torch.cat(outputs[1:], 2)
else:
output = outputs[-1]
# Transpose back
output = output.transpose(0, 1)
# Dropout on output layer
if self.dropout_output and self.dropout_rate > 0:
output = F.dropout(output,
p=self.dropout_rate,
training=self.training)
return output
示例7: forward
def forward(self, x, encoder_padding_mask):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
x = self.maybe_layer_norm(0, x, before=True)
x, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(0, x, after=True)
residual = x
x = self.maybe_layer_norm(1, x, before=True)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=self.relu_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(1, x, after=True)
return x
示例8: forward
def forward(self, src_tokens):
bsz, seqlen = src_tokens.size()
num_layers = len(self.layers)
# embed tokens
x = self.embed_tokens(src_tokens)
x = F.dropout(x, p=self.dropout_in, training=self.training)
embed_dim = x.size(2)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
final_hiddens, final_cells = [], []
outs = [x[j] for j in range(seqlen)]
for i, rnn in enumerate(self.layers):
hidden = Variable(x.data.new(bsz, embed_dim).zero_())
cell = Variable(x.data.new(bsz, embed_dim).zero_())
for j in range(seqlen):
# recurrent cell
hidden, cell = rnn(outs[j], (hidden, cell))
# store the most recent hidden state in outs, either to be used
# as the input for the next layer, or as the final output
outs[j] = F.dropout(hidden, p=self.dropout_out, training=self.training)
# save the final hidden and cell states for every layer
final_hiddens.append(hidden)
final_cells.append(cell)
# collect outputs across time steps
x = torch.cat(outs, dim=0).view(seqlen, bsz, embed_dim)
final_hiddens = torch.cat(final_hiddens, dim=0).view(num_layers, bsz, embed_dim)
final_cells = torch.cat(final_cells, dim=0).view(num_layers, bsz, embed_dim)
return x, final_hiddens, final_cells
示例9: forward
def forward(self, x):
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=0.8, training=self.training)
x = F.relu(self.fc2(x))
x = F.dropout(x, p=0.8, training=self.training)
x = self.fc3(x)
return x
示例10: hidden_to_idx
def hidden_to_idx(self, hidden, is_training=False):
"""Convert hidden state vectors into indices into the dictionary."""
# dropout at each step
e = F.dropout(self.h2e(hidden), p=self.dropout, training=is_training)
scores = F.dropout(self.e2o(e), p=self.dropout, training=is_training)
# skip zero (null_idx) when selecting a score
_max_score, idx = scores.narrow(2, 1, scores.size(2) - 1).max(2)
# add one back to index since we removed first option
return idx.add_(1), scores
示例11: forward
def forward(self, inputs): # inputs (batch size, "sentence" length) bs,n
embeds = self.embeddings(inputs) # bs,n,300
embeds = embeds.view(-1,n*300) # bs,n*300
out = F.tanh(self.h(embeds)) # bs,hidden_size
out = self.u(F.dropout(out,p=dropout_rate)) # bs,|V|
embeds = F.dropout(embeds,p=dropout_rate)
out += self.w(embeds) # bs,|V|
#out = F.softmax(out,dim=1)
return out
示例12: forward
def forward(self, input):
x = F.leaky_relu(self.fc1(input), 0.2)
x = F.dropout(x, 0.3)
x = F.leaky_relu(self.fc2(x), 0.2)
x = F.dropout(x, 0.3)
x = F.leaky_relu(self.fc3(x), 0.2)
x = F.dropout(x, 0.3)
x = F.sigmoid(self.fc4(x))
return x
示例13: forward
def forward(self, x):
x = F.relu(self.conv1(x)) # 28x28x32 -> 26x26x32
x = F.relu(self.conv2(x)) # 26x26x32 -> 24x24x64
x = F.max_pool2d(x, 2) # 24x24x64 -> 12x12x64
x = F.dropout(x, p=0.25, training=self.training)
x = x.view(-1, 12*12*64) # flatten 12x12x64 = 9216
x = F.relu(self.fc1(x)) # fc 9216 -> 128
x = F.dropout(x, p=0.5, training=self.training)
x = self.fc2(x) # fc 128 -> 10
return F.log_softmax(x, dim=1) # to 10 logits
示例14: _forward_padded
def _forward_padded(self, x, x_mask):
"""Slower (significantly), but more precise,
encoding that handles padding."""
# Compute sorted sequence lengths
lengths = x_mask.data.eq(0).long().sum(1).squeeze()
_, idx_sort = torch.sort(lengths, dim=0, descending=True)
_, idx_unsort = torch.sort(idx_sort, dim=0)
lengths = list(lengths[idx_sort])
idx_sort = Variable(idx_sort)
idx_unsort = Variable(idx_unsort)
# Sort x
x = x.index_select(0, idx_sort)
# Transpose batch and sequence dims
x = x.transpose(0, 1)
# Pack it up
rnn_input = nn.utils.rnn.pack_padded_sequence(x, lengths)
# Encode all layers
outputs = [rnn_input]
for i in range(self.num_layers):
rnn_input = outputs[-1]
# Apply dropout to input
if self.dropout_rate > 0:
dropout_input = F.dropout(rnn_input.data,
p=self.dropout_rate,
training=self.training)
rnn_input = nn.utils.rnn.PackedSequence(dropout_input,
rnn_input.batch_sizes)
outputs.append(self.rnns[i](rnn_input)[0])
# Unpack everything
for i, o in enumerate(outputs[1:], 1):
outputs[i] = nn.utils.rnn.pad_packed_sequence(o)[0]
# Concat hidden layers or take final
if self.concat_layers:
output = torch.cat(outputs[1:], 2)
else:
output = outputs[-1]
# Transpose and unsort
output = output.transpose(0, 1)
output = output.index_select(0, idx_unsort)
# Dropout on output layer
if self.dropout_output and self.dropout_rate > 0:
output = F.dropout(output,
p=self.dropout_rate,
training=self.training)
return output
示例15: forward
def forward(self, input, hidden):
# input is (sentence length, batch size) n,bs
# hidden is ((n_layers,bs,hidden_size),(n_layers,bs,hidden_size))
embeds = self.embedding(input) # n,bs,300
# batch goes along the second dimension
out = F.dropout(embeds,p=dropout_rate)
out, hidden = self.lstm(out, hidden)
out = F.dropout(out,p=dropout_rate)
# apply the linear and the softmax
out = self.linear(out) # n,bs,|V|
#out = self.softmax(out) # This was originally the output. (SG: I see this is LogSoftmax)
return out, hidden