本文整理匯總了Python中torch.nn.__dict__方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.__dict__方法的具體用法?Python nn.__dict__怎麽用?Python nn.__dict__使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.__dict__方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import __dict__ [as 別名]
def __init__(self, N, branches, in_channels, preprocess=None, activation=func.tanh):
"""Pixel-wise branch selection layer using attention.
Args:
N (int): dimensionality of convolutions.
branches (iterable nn.Module): neural network branches to choose from.
in_channels (int): number of input channels.
preprocess (nn.Module): module performing feature preprocessing for attention.
activation (nn.Module): activation function for attention computation.
"""
super(AttentionBranch, self).__init__()
self.is_module = False
if isinstance(branches, nn.Module):
self.branches = branches
self.is_module = True
else:
self.branches = nn.ModuleList(branches)
branch_size = len(self.branches)
self.attention_preprocess = preprocess
if self.attention_preprocess == None:
self.attention_preprocess = nn.__dict__[f"Conv{N}d"](in_channels, in_channels, 3)
self.attention_activation = activation
self.attention_calculation = nn.__dict__[f"Conv{N}d"](in_channels, branch_size, 1)
示例2: generic_load
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import __dict__ [as 別名]
def generic_load(arch, pretrained, weights, args):
if arch in tmodels.__dict__: # torchvision models
if pretrained:
print("=> using pre-trained model '{}'".format(arch))
model = tmodels.__dict__[arch](pretrained=True)
model = model.cuda()
else:
print("=> creating model '{}'".format(arch))
model = tmodels.__dict__[arch]()
else: # defined as script in this directory
model = importlib.import_module('.' + arch, package='models')
model = model.__dict__[arch](args)
if not weights == '':
print('loading pretrained-weights from {}'.format(weights))
chkpoint = torch.load(weights)
if isinstance(chkpoint, dict) and 'state_dict' in chkpoint:
chkpoint = chkpoint['state_dict']
load_partial_state(model, chkpoint)
return model
示例3: get_nn_name
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import __dict__ [as 別名]
def get_nn_name(uncased_name):
'''Helper to get the proper name in PyTorch nn given a case-insensitive name'''
for nn_name in nn.__dict__:
if uncased_name.lower() == nn_name.lower():
return nn_name
raise ValueError(f'Name {uncased_name} not found in {nn.__dict__}')
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import __dict__ [as 別名]
def __init__(self, width, stride, input, kernels, kernels11,
activation=func.leaky_relu,
activation_1x1=func.leaky_relu,
dim=2):
super(Conv1x1, self).__init__()
assert(dim in [1, 2, 3])
self.conv_op = nn.__dict__[f"Conv{dim}d"]
self.bn_op = nn.__dict__[f"BatchNorm{dim}d"]
self.conv = self.conv_op(input, kernels, width, stride, 1)
self.bn = self.bn_op(kernels)
self.x11 = self.conv_op(kernels, kernels11, 1, 1)
self.bn11 = self.bn_op(kernels11)
self.activation = activation
self.activation_1x1 = activation_1x1
示例5: load_criterion
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import __dict__ [as 別名]
def load_criterion(args):
if hasattr(nn, args.loss):
criterion = nn.__dict__[args.loss]().cuda()
else:
criterion = importlib.import_module('models.layers.' + args.loss)
criterion = criterion.__dict__[args.loss](args).cuda()
return criterion
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import __dict__ [as 別名]
def __init__(
self,
num_features: int,
activation: str = "leaky_relu",
batchnorm_params: Dict = None,
activation_params: Dict = None,
use_batchnorm: bool = True,
):
"""
Args:
num_features (int): number of feature channels
in the input and output
activation (str): name of the activation functions, one of:
``'leaky_relu'``, ``'elu'`` or ``'none'``.
batchnorm_params (dict): additional ``nn.BatchNorm2d`` params
activation_params (dict): additional params for activation fucntion
use_batchnorm (bool): @TODO: Docs. Contribution is welcome
"""
super().__init__()
batchnorm_params = batchnorm_params or {}
activation_params = activation_params or {}
layers = []
if use_batchnorm:
layers.append(
nn.BatchNorm2d(num_features=num_features, **batchnorm_params)
)
if activation is not None and activation.lower() != "none":
layers.append(
nn.__dict__[activation](inplace=True, **activation_params)
)
self.net = nn.Sequential(*layers)