本文整理汇总了Python中torch.nn.functional.dropout2d方法的典型用法代码示例。如果您正苦于以下问题:Python functional.dropout2d方法的具体用法?Python functional.dropout2d怎么用?Python functional.dropout2d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.dropout2d方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout2d [as 别名]
def forward(self, x):
encoder2, encoder3, encoder4, encoder5 = self.encoders(x)
encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)
center = self.center(encoder5)
dec5 = self.dec5(center, encoder5)
dec4 = self.dec4(dec5, encoder4)
dec3 = self.dec3(dec4, encoder3)
dec2 = self.dec2(dec3, encoder2)
dec1 = self.dec1(dec2)
if self.use_hypercolumn:
dec1 = torch.cat([dec1,
F.upsample(dec2, scale_factor=2, mode='bilinear'),
F.upsample(dec3, scale_factor=4, mode='bilinear'),
F.upsample(dec4, scale_factor=8, mode='bilinear'),
F.upsample(dec5, scale_factor=16, mode='bilinear'),
], 1)
return self.final(dec1)
示例2: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout2d [as 别名]
def forward(self, x, d=None):
encoder2, encoder3, encoder4, encoder5 = self.encoders(x)
encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)
center = self.center(encoder5)
dec5 = self.dec5(center, encoder5)
dec4 = self.dec4(dec5, encoder4)
dec3 = self.dec3(dec4, encoder3)
dec2 = self.dec2(dec3, encoder2)
dec1 = self.dec1(dec2)
if self.use_hypercolumn:
dec1 = torch.cat([dec1,
F.upsample(dec2, scale_factor=2, mode='bilinear'),
F.upsample(dec3, scale_factor=4, mode='bilinear'),
F.upsample(dec4, scale_factor=8, mode='bilinear'),
F.upsample(dec5, scale_factor=16, mode='bilinear'),
], 1)
depth_channel_excitation = self.depth_channel_excitation(dec1, d)
return self.final(depth_channel_excitation)
示例3: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout2d [as 别名]
def forward(self, x):
encoder2, encoder3, encoder4, encoder5 = self.encoders(x)
encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)
psp = self.psp(encoder5)
up4 = self.up4(psp)
up3 = self.up3(up4)
up2 = self.up2(up3)
up1 = self.up1(up2)
if self.use_hypercolumn:
hypercolumn = torch.cat([up1,
F.upsample(up2, scale_factor=2, mode='bilinear'),
F.upsample(up3, scale_factor=4, mode='bilinear'),
F.upsample(up4, scale_factor=8, mode='bilinear'),
], 1)
drop = F.dropout2d(hypercolumn, p=self.dropout_2d)
else:
drop = F.dropout2d(up4, p=self.dropout_2d)
return self.final(drop)
示例4: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout2d [as 别名]
def forward(self, X, mask=None, training=False):
# Simple Pooling layers
max_masked = self.replace_masked_values(X, mask.unsqueeze(2), -1e7)
max_pool = torch.max(max_masked, 1)[0]
min_masked = self.replace_masked_values(X, mask.unsqueeze(2), +1e7)
min_pool = torch.min(min_masked, 1)[0]
mean_pool = torch.sum(X, 1) / torch.sum((1-mask).float(), 1, keepdim=True)
# Self-attentive pooling layer
# Run through linear projection. Shape: (batch_size, sequence length, 1)
# Then remove the last dimension to get the proper attention shape (batch_size, sequence length).
# X = X.permute(0, 2, 1) # convert to [batch, channels, time]
# X = F.dropout2d(X, 0.5, training=training)
# X = X.permute(0, 2, 1) # back to [batch, time, channels]
self_attentive_logits = self._self_attentive_pooling_projection(X).squeeze(2)
self_weights = self.masked_softmax(self_attentive_logits, 1-mask)
self_attentive_pool = self.weighted_sum(X, self_weights)
pooled_representations = torch.cat([max_pool, min_pool, self_attentive_pool], 1)
pooled_representations_dropped = self._integrator_dropout(self_attentive_pool)
outputs = self._output_layer(pooled_representations_dropped)
return outputs, self_weights
示例5: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout2d [as 别名]
def forward(self, x):
task = config_task.task
y = self.conv(x)
if self.second == 0:
if config_task.isdropout1:
x = F.dropout2d(x, p=0.5, training = self.training)
else:
if config_task.isdropout2:
x = F.dropout2d(x, p=0.5, training = self.training)
if config_task.mode == 'parallel_adapters' and self.is_proj:
y = y + self.parallel_conv[task](x)
y = self.bns[task](y)
return y
# No projection: identity shortcut
示例6: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout2d [as 别名]
def forward(self, x, gts=None):
feats = self.feature_extractor(x)
x_in = self.stage_0(feats[-1])
# up conv
n_feats = self.feature_extractor.n_feats[1:]
for i in range(1, len(n_feats)):
x_depth_out = getattr(self, 'upconv_{}'.format(i))(x_in)
x_project = getattr(self, 'proj_{}'.format(i))(feats[-1-i])
x_in = torch.cat((x_depth_out, x_project), 1)
# cls features
x_cls_in = x_in
# x_cls_in = F.dropout2d(x_cls_in, training=self.training, inplace=True)
cls_feat = self.cls_conv(x_cls_in)
return cls_feat
示例7: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout2d [as 别名]
def forward(self, x):
encoder2, encoder3, encoder4, encoder5 = self.encoder(x)
encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)
gcn2 = self.enc_br2(self.gcn2(encoder2))
gcn3 = self.enc_br3(self.gcn3(encoder3))
gcn4 = self.enc_br4(self.gcn4(encoder4))
gcn5 = self.enc_br5(self.gcn5(encoder5))
decoder5 = self.deconv5(gcn5)
decoder4 = self.deconv4(self.dec_br4(decoder5 + gcn4))
decoder3 = self.deconv3(self.dec_br3(decoder4 + gcn3))
decoder2 = self.dec_br1(self.deconv2(self.dec_br2(decoder3 + gcn2)))
if self.pool0:
decoder2 = self.dec_br0_2(self.deconv1(self.dec_br0_1(decoder2)))
return self.final(decoder2)
示例8: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout2d [as 别名]
def forward(self, x):
encoder2, encoder3, encoder4, encoder5 = self.encoder(x)
encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)
center = self.center(encoder5)
dec5 = self.dec5(center, encoder5)
dec4 = self.dec4(dec5, encoder4)
dec3 = self.dec3(dec4, encoder3)
dec2 = self.dec2(dec3, encoder2)
dec1 = self.dec1(dec2)
if self.use_hypercolumn:
dec1 = torch.cat([dec1,
F.upsample(dec2, scale_factor=2, mode='bilinear'),
F.upsample(dec3, scale_factor=4, mode='bilinear'),
F.upsample(dec4, scale_factor=8, mode='bilinear'),
F.upsample(dec5, scale_factor=16, mode='bilinear'),
], 1)
if self.pool0:
dec1 = self.dec0(dec1)
return self.final(dec1)
示例9: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout2d [as 别名]
def forward(self, x):
encoder2, encoder3, encoder4, encoder5 = self.encoder(x)
encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)
psp = self.psp(encoder5)
up4 = self.up4(psp)
up3 = self.up3(up4)
up2 = self.up2(up3)
up1 = self.up1(up2)
if self.use_hypercolumn:
hypercolumn = torch.cat([up1,
F.upsample(up2, scale_factor=2, mode='bilinear'),
F.upsample(up3, scale_factor=4, mode='bilinear'),
F.upsample(up4, scale_factor=8, mode='bilinear'),
], 1)
drop = F.dropout2d(hypercolumn, p=self.dropout_2d)
else:
drop = F.dropout2d(up1, p=self.dropout_2d)
if self.pool0:
drop = self.up0(drop)
return self.final(drop)
示例10: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout2d [as 别名]
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(self.pool(conv1))
conv3 = self.conv3(self.pool(conv2))
conv4 = self.conv4(self.pool(conv3))
conv5 = self.conv5(self.pool(conv4))
center = self.center(self.pool(conv5))
dec5 = self.dec5(torch.cat([center, conv5], 1))
dec4 = self.dec4(torch.cat([dec5, conv4], 1))
dec3 = self.dec3(torch.cat([dec4, conv3], 1))
dec2 = self.dec2(torch.cat([dec3, conv2], 1))
dec1 = self.dec1(torch.cat([dec2, conv1], 1))
return self.final(F.dropout2d(dec1, p=self.dropout_2d))
示例11: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout2d [as 别名]
def forward(self, inputs_mean, inputs_variance):
if self.training:
binary_mask = torch.ones_like(inputs_mean)
binary_mask = F.dropout2d(binary_mask, self.p, self.training, self.inplace)
outputs_mean = inputs_mean*binary_mask
outputs_variance = inputs_variance*binary_mask**2
if self._keep_variance_fn is not None:
outputs_variance = self._keep_variance_fn(outputs_variance)
return outputs_mean, outputs_variance
outputs_variance = inputs_variance
if self._keep_variance_fn is not None:
outputs_variance = self._keep_variance_fn(outputs_variance)
return inputs_mean, outputs_variance
示例12: init
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout2d [as 别名]
def init(self, tgt_sents, tgt_masks, src_enc, src_masks, init_scale=1.0, init_mu=True, init_var=True):
with torch.no_grad():
x = self.embed_scale * self.tgt_embed(tgt_sents)
x = F.dropout2d(x, p=self.dropword, training=self.training)
x += self.pos_enc(tgt_sents)
x = F.dropout(x, p=0.2, training=self.training)
mask = tgt_masks.eq(0)
key_mask = src_masks.eq(0)
for layer in self.layers:
x = layer.init(x, mask, src_enc, key_mask, init_scale=init_scale)
x = x * tgt_masks.unsqueeze(2)
mu = self.mu.init(x, init_scale=0.05 * init_scale) if init_mu else self.mu(x)
logvar = self.logvar.init(x, init_scale=0.05 * init_scale) if init_var else self.logvar(x)
mu = mu * tgt_masks.unsqueeze(2)
logvar = logvar * tgt_masks.unsqueeze(2)
return mu, logvar
示例13: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout2d [as 别名]
def forward(self, input, att, word):
## FC q
word_W = F.dropout(word, self.dropout, training = self.training)
weight = F.tanh(self.fcq_w(word_W)).view(-1,self.num_features,1,1)
## FC v
v = F.dropout2d(input, self.dropout, training = self.training)
v = v * F.relu(1-att).unsqueeze(1).expand_as(input)
v = F.tanh(self.conv1(v))
## attMap
inputAttShift = F.tanh(self.fcShift1(torch.cat((att.view(-1,self.num_outputs*14*14),word),1)))
inputAttShift = F.tanh(self.fcShift2(inputAttShift)).view(-1,self.num_features,1,1)
## v * q_tile
v = v * weight.expand_as(v) * inputAttShift.expand_as(v) # v = self.cbn1(F.tanh(v),word) #apply non-linear before cbn equal to MLB
# no tanh shoulb be here
v = self.conv2(v)
# Normalize to single area
return F.softmax(v.view(-1,14*14), dim=1).view(-1,self.num_outputs,14,14)
示例14: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout2d [as 别名]
def forward(self, x, time = 0, **kargs):
if self.training:
with torch.no_grad():
p = self.p.getVal(time = time)
mask = (F.dropout2d if self.use_2d else F.dropout)(h.ones(x.size()),p=p, training=True)
if self.alpha_dropout:
with torch.no_grad():
keep_prob = 1 - p
alpha = -1.7580993408473766
a = math.pow(keep_prob + alpha * alpha * keep_prob * (1 - keep_prob), -0.5)
b = -a * alpha * (1 - keep_prob)
mask = mask * a
return x * mask + b
else:
return x * mask
else:
return x
示例15: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout2d [as 别名]
def forward(self, x):
res = x
# step 1. Expansion phase/Point-wise convolution
if self.expand_ratio != 1:
x = self.expansion(x)
# step 2. Depth-wise convolution phase
x = self.depth_wise(x)
# step 3. Squeeze and Excitation
if self.use_se:
x = self.se_block(x)
# step 4. Point-wise convolution phase
x = self.point_wise(x)
# step 5. Skip connection and drop connect
if self.use_residual:
if self.training and (self.dropout_rate is not None):
x = F.dropout2d(input=x, p=self.dropout_rate,
training=self.training, inplace=True)
x = x + res
return x