本文整理匯總了Python中torch.nn.AlphaDropout方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.AlphaDropout方法的具體用法?Python nn.AlphaDropout怎麽用?Python nn.AlphaDropout使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.AlphaDropout方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _get_layer
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AlphaDropout [as 別名]
def _get_layer(self, idx:int, fan_in:Optional[int]=None, fan_out:Optional[int]=None) -> nn.Module:
fan_in = self.width if fan_in is None else fan_in
fan_out = self.width if fan_out is None else fan_out
if fan_in < 1: fan_in = 1
if fan_out < 1: fan_out = 1
layers = []
for i in range(2 if self.res and idx > 0 else 1):
layers.append(nn.Linear(fan_in, fan_out))
self.lookup_init(self.act, fan_in, fan_out)(layers[-1].weight)
nn.init.zeros_(layers[-1].bias)
if self.act != 'linear': layers.append(self.lookup_act(self.act))
if self.bn and i == 0: layers.append(nn.BatchNorm1d(fan_out)) # In case of residual, BN will be added after addition
if self.do:
if self.act == 'selu': layers.append(nn.AlphaDropout(self.do))
else: layers.append(nn.Dropout(self.do))
return nn.Sequential(*layers)
示例2: fix_batchnorm
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AlphaDropout [as 別名]
def fix_batchnorm(model):
if isinstance(model, list):
for m in model:
fix_batchnorm(m)
else:
for m in model.modules():
if isinstance(m, nn.BatchNorm1d):
#print('Fix BatchNorm1d')
m.eval()
elif isinstance(m, nn.BatchNorm2d):
#print('Fix BatchNorm2d')
m.eval()
elif isinstance(m, nn.BatchNorm3d):
#print('Fix BatchNorm3d')
m.eval()
elif isinstance(m, nn.Dropout):
#print('Fix Dropout')
m.eval()
elif isinstance(m, nn.AlphaDropout):
#print('Fix AlphaDropout')
m.eval()
示例3: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AlphaDropout [as 別名]
def __init__(self, input_dim=5, hidden_dim=1024):
"""
Averaged embeddings of ending -> label
:param embed_dim: dimension to use
"""
super(LMFeatsModel, self).__init__()
self.mapping = nn.Sequential(
nn.Linear(input_dim, hidden_dim, bias=True),
nn.SELU(),
nn.AlphaDropout(p=0.2),
)
self.prediction = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim, bias=True),
nn.SELU(),
nn.AlphaDropout(p=0.2),
nn.Linear(hidden_dim, 1, bias=False),
)
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AlphaDropout [as 別名]
def __init__(self, input_dim):
super(FeedForward, self).__init__()
self.classifier = nn.Sequential(
nn.Linear(input_dim, 256),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.Linear(256, 256),
nn.BatchNorm1d(256),
nn.AlphaDropout(p=0.5),
nn.ReLU(),
nn.Linear(256, 256),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.Linear(256, 256),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.Linear(256, 256),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.Linear(256, 1),
nn.Sigmoid()
)
示例5: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AlphaDropout [as 別名]
def __init__(self, p=0.5):
super(AlphaDropout, self).__init__()
if p < 0 or p > 1:
raise ValueError("dropout probability has to be between 0 and 1, "
"but got {}".format(p))
self.p = p
示例6: alpha_dropout
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AlphaDropout [as 別名]
def alpha_dropout(input, p=0.5, training=False):
r"""Applies alpha dropout to the input.
See :class:`~torch.nn.AlphaDropout` for details.
Args:
p (float, optional): the drop probability
training (bool, optional): switch between training and evaluation mode
"""
if p < 0 or p > 1:
raise ValueError("dropout probability has to be between 0 and 1, "
"but got {}".format(p))
if p == 0 or not training:
return input
alpha = -1.7580993408473766
keep_prob = 1 - p
noise = input.data.new().resize_(input.size())
noise.bernoulli_(p)
noise = Variable(noise.byte())
output = input.masked_fill(noise, alpha)
a = (keep_prob + alpha ** 2 * keep_prob * (1 - keep_prob)) ** (-0.5)
b = -a * alpha * (1 - keep_prob)
return output.mul_(a).add_(b)
示例7: _get_layer
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AlphaDropout [as 別名]
def _get_layer(self, fan_in:int, fan_out:int) -> nn.Module:
layers = []
layers.append(nn.Linear(fan_in, fan_out))
self.lookup_init(self.act, fan_in, fan_out)(layers[-1].weight)
nn.init.zeros_(layers[-1].bias)
if self.act != 'linear': layers.append(self.lookup_act(self.act))
if self.bn: layers.append(nn.BatchNorm1d(fan_out))
if self.do:
if self.act == 'selu': layers.append(nn.AlphaDropout(self.do))
else: layers.append(nn.Dropout(self.do))
return nn.Sequential(*layers)
示例8: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AlphaDropout [as 別名]
def __init__(self):
super(MLPModel, self).__init__()
# self.mapping = nn.Linear(train_data.feats.shape[2], 1, bias=False)
self.mapping = nn.Sequential(
nn.Linear(all_data.shape[-1], 2048, bias=True),
nn.SELU(),
nn.AlphaDropout(p=0.2),
nn.Linear(2048, 2048, bias=True),
nn.SELU(),
nn.AlphaDropout(p=0.2),
nn.Linear(2048, 1, bias=False),
)
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AlphaDropout [as 別名]
def __init__(self, drop_type):
super(Drop, self).__init__()
if drop_type is None:
self.drop = keep_origin
elif drop_type == 'alpha':
self.drop = nn.AlphaDropout(p=0.5)
elif drop_type == 'dropout':
self.drop = nn.Dropout3d(p=0.5)
elif drop_type == 'drop_block':
self.drop = DropBlock3D(drop_prob=0.2, block_size=2)
else:
raise NotImplementedError('{} not implemented'.format(drop_type))
示例10: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AlphaDropout [as 別名]
def __init__(self, classes, mode='rpntrain', num_gpus=1, nms_filter_duplicates=True,
max_per_img=64, use_resnet=False, thresh=0.05):
"""
:param classes: Object classes
:param rel_classes: Relationship classes. None if were not using rel mode
:param num_gpus: how many GPUS 2 use
"""
super(ObjectDetector, self).__init__()
if mode not in self.MODES:
raise ValueError("invalid mode")
self.mode = mode
self.classes = classes
self.num_gpus = num_gpus
self.pooling_size = 7
self.nms_filter_duplicates = nms_filter_duplicates
self.max_per_img = max_per_img
self.use_resnet = use_resnet
self.thresh = thresh
if not self.use_resnet:
vgg_model = load_vgg()
self.features = vgg_model.features
self.roi_fmap = vgg_model.classifier
rpn_input_dim = 512
output_dim = 4096
else: # Deprecated
self.features = load_resnet()
self.compress = nn.Sequential(
nn.Conv2d(1024, 256, kernel_size=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(256),
)
self.roi_fmap = nn.Sequential(
nn.Linear(256 * 7 * 7, 2048),
nn.SELU(inplace=True),
nn.AlphaDropout(p=0.05),
nn.Linear(2048, 2048),
nn.SELU(inplace=True),
nn.AlphaDropout(p=0.05),
)
rpn_input_dim = 1024
output_dim = 2048
self.score_fc = nn.Linear(output_dim, self.num_classes)
self.bbox_fc = nn.Linear(output_dim, self.num_classes * 4)
self.rpn_head = RPNHead(dim=512, input_dim=rpn_input_dim)
示例11: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AlphaDropout [as 別名]
def __init__(self, classes, mode='rpntrain', num_gpus=1, nms_filter_duplicates=True,
max_per_img=64, use_resnet=False, thresh=0.05, use_rl_tree = False):
"""
:param classes: Object classes
:param rel_classes: Relationship classes. None if were not using rel mode
:param num_gpus: how many GPUS 2 use
"""
super(ObjectDetector, self).__init__()
if mode not in self.MODES:
raise ValueError("invalid mode")
self.mode = mode
self.classes = classes
self.num_gpus = num_gpus
self.pooling_size = 7
self.nms_filter_duplicates = nms_filter_duplicates
self.max_per_img = max_per_img
self.use_resnet = use_resnet
self.thresh = thresh
self.use_rl_tree = use_rl_tree
if not self.use_resnet:
vgg_model = load_vgg()
self.features = vgg_model.features
self.roi_fmap = vgg_model.classifier
rpn_input_dim = 512
output_dim = 4096
else: # Deprecated
self.features = load_resnet()
self.compress = nn.Sequential(
nn.Conv2d(1024, 256, kernel_size=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(256),
)
self.roi_fmap = nn.Sequential(
nn.Linear(256 * 7 * 7, 2048),
nn.SELU(inplace=True),
#nn.AlphaDropout(p=0.05),
nn.Linear(2048, 2048),
nn.SELU(inplace=True),
#nn.AlphaDropout(p=0.05),
)
rpn_input_dim = 1024
output_dim = 2048
self.score_fc = nn.Linear(output_dim, self.num_classes)
self.bbox_fc = nn.Linear(output_dim, self.num_classes * 4)
self.rpn_head = RPNHead(dim=512, input_dim=rpn_input_dim)