當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.Module方法代碼示例

本文整理匯總了Python中torch.nn.Module方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.Module方法的具體用法?Python nn.Module怎麽用?Python nn.Module使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.Module方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: build

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Module [as 別名]
def build(cfg, registry, default_args=None):
    """Build a module.

    Args:
        cfg (dict, list[dict]): The config of modules, is is either a dict
            or a list of configs.
        registry (:obj:`Registry`): A registry the module belongs to.
        default_args (dict, optional): Default arguments to build the module.
            Defaults to None.

    Returns:
        nn.Module: A built nn module.
    """
    if isinstance(cfg, list):
        modules = [
            build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
        ]
        return nn.Sequential(*modules)
    else:
        return build_from_cfg(cfg, registry, default_args) 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:22,代碼來源:builder.py

示例2: train

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Module [as 別名]
def train(self, mode=True):
    # Override train so that the training mode is set as we want
    nn.Module.train(self, mode)
    if mode:
      # Set fixed blocks to be in eval mode (not really doing anything)
      self.resnet.eval()
      if cfg.RESNET.FIXED_BLOCKS <= 3:
        self.resnet.layer4.train()
      if cfg.RESNET.FIXED_BLOCKS <= 2:
        self.resnet.layer3.train()
      if cfg.RESNET.FIXED_BLOCKS <= 1:
        self.resnet.layer2.train()
      if cfg.RESNET.FIXED_BLOCKS == 0:
        self.resnet.layer1.train()

      # Set batchnorm always in eval mode during training
      def set_bn_eval(m):
        classname = m.__class__.__name__
        if classname.find('BatchNorm') != -1:
          m.eval()

      self.resnet.apply(set_bn_eval) 
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:24,代碼來源:resnet_v1.py

示例3: wrap_fp16_model

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Module [as 別名]
def wrap_fp16_model(model):
    """Wrap the FP32 model to FP16.

    1. Convert FP32 model to FP16.
    2. Remain some necessary layers to be FP32, e.g., normalization layers.

    Args:
        model (nn.Module): Model in FP32.
    """
    # convert model to fp16
    model.half()
    # patch the normalization layers to make it work in fp32 mode
    patch_norm_fp32(model)
    # set `fp16_enabled` flag
    for m in model.modules():
        if hasattr(m, 'fp16_enabled'):
            m.fp16_enabled = True 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:19,代碼來源:hooks.py

示例4: num_flat_features

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Module [as 別名]
def num_flat_features(self, x):
        size = x.size()[1:]  # all dimensions except the batch dimension
        num_features = 1
        for s in size:
            num_features *= s
        return num_features

# class NNetM(nn.Module):
#
#     def __init__(self, n_in, n_out):
#         super(NNetM, self).__init__()
#
#         self.fc1 = nn.Linear(n_in, 120)
#         self.fc2 = nn.Linear(120, 84)
#         self.fc3 = nn.Linear(84, n_out[0]*n_out[1])
#
#     def forward(self, x):
#
#         x = F.relu(self.fc1(x))
#         x = F.relu(self.fc2(x))
#         x = self.fc3(x)
#         return x 
開發者ID:priba,項目名稱:nmp_qc,代碼行數:24,代碼來源:nnet.py

示例5: evaluate_accuracy

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Module [as 別名]
def evaluate_accuracy(data_iter, net,
                      device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')):
    acc_sum, n = 0.0, 0
    with torch.no_grad():
        for X, y in data_iter:
            if isinstance(net, torch.nn.Module):
                net.eval() # 評估模式,會關閉 dropout
                acc_sum += (net(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item()
                net.train() # 改回訓練模式
            else:
                # 如果是自定義的模型
                if 'is_training' in net.__code__.co_varnames:
                    acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item()
                else:
                    acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
            n += y.shape[0]
    return acc_sum / n 
開發者ID:wdxtub,項目名稱:deep-learning-note,代碼行數:19,代碼來源:utils.py

示例6: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Module [as 別名]
def __init__(self, base_model: torch.nn.Module, num_classes: int, weights_url: str = None):
        super().__init__()
        if not hasattr(self, 'decoder_block'):
            self.decoder_block = UnetDecoderBlock
        if not hasattr(self, 'bottleneck_type'):
            self.bottleneck_type = ConvBottleneck

        if weights_url is not None:
            print("Model weights inited by url")

            pretrained_weights = model_zoo.load_url(weights_url)
            model_state_dict = base_model.state_dict()
            pretrained_weights = {k: v for k, v in pretrained_weights.items() if k in model_state_dict}
            base_model.load_state_dict(pretrained_weights)

        filters = [64, 64, 128, 256, 512]

        self.bottlenecks = nn.ModuleList([self.bottleneck_type(f * 2, f) for f in reversed(filters[:-1])])
        self.decoder_stages = nn.ModuleList([self.get_decoder(filters, idx) for idx in range(1, len(filters))])

        self.encoder_stages = nn.ModuleList([self.get_encoder(base_model, idx) for idx in range(len(filters))])

        self.last_upsample = self.decoder_block(filters[0], filters[0])
        self.final = self.make_final_classifier(filters[0], num_classes) 
開發者ID:toodef,項目名稱:neural-pipeline,代碼行數:26,代碼來源:albunet.py

示例7: _reconstruct_inception

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Module [as 別名]
def _reconstruct_inception(self, basemodel):
        model = nn.Module()
        model.features = nn.Sequential(basemodel.Conv2d_1a_3x3,
                                       basemodel.Conv2d_2a_3x3,
                                       basemodel.Conv2d_2b_3x3,
                                       nn.MaxPool2d(kernel_size=3, stride=2),
                                       basemodel.Conv2d_3b_1x1,
                                       basemodel.Conv2d_4a_3x3,
                                       nn.MaxPool2d(kernel_size=3, stride=2),
                                       basemodel.Mixed_5b,
                                       basemodel.Mixed_5c,
                                       basemodel.Mixed_5d,
                                       basemodel.Mixed_6a,
                                       basemodel.Mixed_6b,
                                       basemodel.Mixed_6c,
                                       basemodel.Mixed_6d,
                                       basemodel.Mixed_6e,
                                       basemodel.Mixed_7a,
                                       basemodel.Mixed_7b,
                                       basemodel.Mixed_7c)
        model.representation = nn.AdaptiveAvgPool2d((1, 1))
        model.classifier = basemodel.fc
        model.representation_dim=basemodel.fc.weight.size(1)
        return model 
開發者ID:jiangtaoxie,項目名稱:fast-MPN-COV,代碼行數:26,代碼來源:base.py

示例8: forward

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Module [as 別名]
def forward(self, conv_in):
        """ Module forward pass

        Args:
            conv_in (Variable): convolutional input, shaped [N x 4 x 84 x 84]

        Returns:
            pi (Variable): action probability logits, shaped [N x self.num_actions]
            v (Variable): value predictions, shaped [N x 1]
        """
        N = conv_in.size()[0]

        conv_out = self.conv(conv_in).view(N, 64 * 7 * 7)

        fc_out = self.fc(conv_out)

        pi_out = self.pi(fc_out)
        v_out = self.v(fc_out)

        return pi_out, v_out 
開發者ID:lnpalmer,項目名稱:A2C,代碼行數:22,代碼來源:models.py

示例9: features

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Module [as 別名]
def features(self) -> Tuple[nn.Module, nn.Module, int, int]:
        resnet101 = torchvision.models.resnet101(pretrained=self._pretrained)

        # list(resnet101.children()) consists of following modules
        #   [0] = Conv2d, [1] = BatchNorm2d, [2] = ReLU,
        #   [3] = MaxPool2d, [4] = Sequential(Bottleneck...),
        #   [5] = Sequential(Bottleneck...),
        #   [6] = Sequential(Bottleneck...),
        #   [7] = Sequential(Bottleneck...),
        #   [8] = AvgPool2d, [9] = Linear
        children = list(resnet101.children())
        features = children[:-3]
        num_features_out = 1024

        hidden = children[-3]
        num_hidden_out = 2048

        for parameters in [feature.parameters() for i, feature in enumerate(features) if i <= 4]:
            for parameter in parameters:
                parameter.requires_grad = False

        features = nn.Sequential(*features)

        return features, hidden, num_features_out, num_hidden_out 
開發者ID:potterhsu,項目名稱:easy-faster-rcnn.pytorch,代碼行數:26,代碼來源:resnet101.py

示例10: features

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Module [as 別名]
def features(self) -> Tuple[nn.Module, nn.Module, int, int]:
        resnet18 = torchvision.models.resnet18(pretrained=self._pretrained)

        # list(resnet18.children()) consists of following modules
        #   [0] = Conv2d, [1] = BatchNorm2d, [2] = ReLU,
        #   [3] = MaxPool2d, [4] = Sequential(Bottleneck...),
        #   [5] = Sequential(Bottleneck...),
        #   [6] = Sequential(Bottleneck...),
        #   [7] = Sequential(Bottleneck...),
        #   [8] = AvgPool2d, [9] = Linear
        children = list(resnet18.children())
        features = children[:-3]
        num_features_out = 256

        hidden = children[-3]
        num_hidden_out = 512

        for parameters in [feature.parameters() for i, feature in enumerate(features) if i <= 4]:
            for parameter in parameters:
                parameter.requires_grad = False

        features = nn.Sequential(*features)

        return features, hidden, num_features_out, num_hidden_out 
開發者ID:potterhsu,項目名稱:easy-faster-rcnn.pytorch,代碼行數:26,代碼來源:resnet18.py

示例11: features

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Module [as 別名]
def features(self) -> Tuple[nn.Module, nn.Module, int, int]:
        resnet50 = torchvision.models.resnet50(pretrained=self._pretrained)

        # list(resnet50.children()) consists of following modules
        #   [0] = Conv2d, [1] = BatchNorm2d, [2] = ReLU,
        #   [3] = MaxPool2d, [4] = Sequential(Bottleneck...),
        #   [5] = Sequential(Bottleneck...),
        #   [6] = Sequential(Bottleneck...),
        #   [7] = Sequential(Bottleneck...),
        #   [8] = AvgPool2d, [9] = Linear
        children = list(resnet50.children())
        features = children[:-3]
        num_features_out = 1024

        hidden = children[-3]
        num_hidden_out = 2048

        for parameters in [feature.parameters() for i, feature in enumerate(features) if i <= 4]:
            for parameter in parameters:
                parameter.requires_grad = False

        features = nn.Sequential(*features)

        return features, hidden, num_features_out, num_hidden_out 
開發者ID:potterhsu,項目名稱:easy-faster-rcnn.pytorch,代碼行數:26,代碼來源:resnet50.py

示例12: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Module [as 別名]
def __init__(self, num_features, activation=nn.ReLU(inplace=True), **kwargs):
        """Creates an Activated Batch Normalization module

        Parameters
        ----------
        num_features : int
            Number of feature channels in the input and output.
        activation : nn.Module
            Module used as an activation function.
        kwargs
            All other arguments are forwarded to the `BatchNorm2d` constructor.
        """
        super(ABN, self).__init__(OrderedDict([
            ("bn", nn.BatchNorm2d(num_features, **kwargs)),
            ("act", activation)
        ])) 
開發者ID:speedinghzl,項目名稱:pytorch-segmentation-toolbox,代碼行數:18,代碼來源:bn.py

示例13: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Module [as 別名]
def __init__(
            self, model, bn_lambda, last_epoch=-1,
            setter=set_bn_momentum_default
    ):
        if not isinstance(model, nn.Module):
            raise RuntimeError(
                "Class '{}' is not a PyTorch nn Module".format(
                    type(model).__name__
                )
            )

        self.model = model
        self.setter = setter
        self.lmbd = bn_lambda

        self.step(last_epoch + 1)
        self.last_epoch = last_epoch 
開發者ID:zaiweizhang,項目名稱:H3DNet,代碼行數:19,代碼來源:pytorch_utils.py

示例14: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Module [as 別名]
def __init__(self, input_size, query_size, value_size, head_num, dropout=0.0, concatenate=True, configurable=False,
                 use_dot=True):
        nn.Module.__init__(self)
        self.use_dot = use_dot
        if use_dot is True:
            self.query_heads = nn.Linear(input_size, head_num * query_size, bias=True)
        else:
            self.query_heads = nn.Linear(query_size + input_size, head_num, bias=False)
        self.head_num = head_num
        self.concatenate = concatenate
        self.input_size = input_size
        self.value_size = value_size
        if concatenate:
            self.value_proj = nn.Linear(value_size, input_size)
        else:
            self.value_proj = nn.Linear(value_size, input_size * head_num)
        if configurable:
            self.param_divide(self.query_heads, with_query=True)
            self.param_divide(self.value_proj, with_query=True)
        if dropout > 0.0:
            self.attn_dropout = nn.Dropout(dropout)
        else:
            self.attn_dropout = None
        self.attn = None 
開發者ID:THUDM,項目名稱:ScenarioMeta,代碼行數:26,代碼來源:modules.py

示例15: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Module [as 別名]
def __init__(self, hidden_size, layer_norm=False, input_gate=True, forget_gate=True):
            nn.Module.__init__(self)
            self.hidden_size = hidden_size
            # gradient(2), param(2), loss
            self.lstm = nn.LSTMCell(input_size=5, hidden_size=hidden_size)
            if layer_norm:
                self.layer_norm = nn.LayerNorm(hidden_size)
            else:
                self.layer_norm = None
            self.input_gate = input_gate
            self.forget_gate = forget_gate
            if self.input_gate:
                self.lr_layer = nn.Linear(hidden_size, 1)
                self.lrs = []
            else:
                self.output_layer = nn.Linear(hidden_size, 1)
                self.dets = []
            if forget_gate:
                self.fg_layer = nn.Linear(hidden_size, 1)
                self.fgs = []
            self.h_0 = nn.Parameter(torch.randn((hidden_size,), requires_grad=True))
            self.c_0 = nn.Parameter(torch.randn((hidden_size,), requires_grad=True)) 
開發者ID:THUDM,項目名稱:ScenarioMeta,代碼行數:24,代碼來源:meta.py


注:本文中的torch.nn.Module方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。