當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.Identity方法代碼示例

本文整理匯總了Python中torch.nn.Identity方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.Identity方法的具體用法?Python nn.Identity怎麽用?Python nn.Identity使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.Identity方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: fuse_module

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Identity [as 別名]
def fuse_module(m):
    last_conv = None
    last_conv_name = None

    for name, child in m.named_children():
        if isinstance(child, (nn.BatchNorm2d, nn.SyncBatchNorm)):
            if last_conv is None:  # only fuse BN that is after Conv
                continue
            fused_conv = fuse_conv_bn(last_conv, child)
            m._modules[last_conv_name] = fused_conv
            # To reduce changes, set BN as Identity instead of deleting it.
            m._modules[name] = nn.Identity()
            last_conv = None
        elif isinstance(child, nn.Conv2d):
            last_conv = child
            last_conv_name = name
        else:
            fuse_module(child)
    return m 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:21,代碼來源:fuse_conv_bn.py

示例2: mlp

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Identity [as 別名]
def mlp(sizes, activation, output_activation=nn.Identity):
    """
    Build a multi-layer perceptron in PyTorch.

    Args:
        sizes: Tuple, list, or other iterable giving the number of units
            for each layer of the MLP. 

        activation: Activation function for all layers except last.

        output_activation: Activation function for last layer.

    Returns:
        A PyTorch module that can be called to give the output of the MLP.
        (Use an nn.Sequential module.)

    """
    #######################
    #                     #
    #   YOUR CODE HERE    #
    #                     #
    #######################
    pass 
開發者ID:openai,項目名稱:spinningup,代碼行數:25,代碼來源:exercise1_2.py

示例3: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Identity [as 別名]
def __init__(self, in_size, out_size, hidden_size=128, depth=2,
               input_kwargs=None, internal_kwargs=None):
    super().__init__()
    self.depth = depth
    self.input_blocks = nn.ModuleList([
      self.make_block(in_size, hidden_size, **input_kwargs)
      for idx in range(depth)
    ])
    self.internal_blocks = nn.ModuleList([
      nn.Identity()
    ] + [
      self.make_block(hidden_size, hidden_size, **internal_kwargs)
      for idx in range(depth - 1)
    ])
    self.internal_constants = nn.ParameterList([
      self.make_constant(hidden_size)
      for idx in range(depth)
    ])

    self.output_block = self.make_block(hidden_size, out_size, **internal_kwargs)
    self.output_constant = self.make_constant(out_size) 
開發者ID:mjendrusch,項目名稱:torchsupport,代碼行數:23,代碼來源:polynomial.py

示例4: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Identity [as 別名]
def __init__(
        self,
        name: str = "resnet50",
        visual_feature_size: int = 2048,
        pretrained: bool = False,
        frozen: bool = False,
    ):
        super().__init__(visual_feature_size)

        self.cnn = getattr(torchvision.models, name)(
            pretrained, zero_init_residual=True
        )
        # Do nothing after the final residual stage.
        self.cnn.fc = nn.Identity()

        # Freeze all weights if specified.
        if frozen:
            for param in self.cnn.parameters():
                param.requires_grad = False
            self.cnn.eval()

        # Keep a list of intermediate layer names.
        self._stage_names = [f"layer{i}" for i in range(1, 5)] 
開發者ID:kdexd,項目名稱:virtex,代碼行數:25,代碼來源:visual_backbones.py

示例5: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Identity [as 別名]
def __init__(self, name, **params):

        super().__init__()

        if name is None or name == 'identity':
            self.activation = nn.Identity(**params)
        elif name == 'sigmoid':
            self.activation = nn.Sigmoid()
        elif name == 'softmax2d':
            self.activation = nn.Softmax(dim=1, **params)
        elif name == 'softmax':
            self.activation = nn.Softmax(**params)
        elif name == 'logsoftmax':
            self.activation = nn.LogSoftmax(**params)
        elif name == 'argmax':
            self.activation = ArgMax(**params)
        elif name == 'argmax2d':
            self.activation = ArgMax(dim=1, **params)
        elif callable(name):
            self.activation = name(**params)
        else:
            raise ValueError('Activation should be callable/sigmoid/softmax/logsoftmax/None; got {}'.format(name)) 
開發者ID:qubvel,項目名稱:segmentation_models.pytorch,代碼行數:24,代碼來源:modules.py

示例6: forward

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Identity [as 別名]
def forward(self, x):
        stages = self.get_stages()

        block_number = 0.
        drop_connect_rate = self._global_params.drop_connect_rate

        features = []
        for i in range(self._depth + 1):

            # Identity and Sequential stages
            if i < 2:
                x = stages[i](x)

            # Block stages need drop_connect rate
            else:
                for module in stages[i]:
                    drop_connect = drop_connect_rate * block_number / len(self._blocks)
                    block_number += 1.
                    x = module(x, drop_connect)

            features.append(x)

        return features 
開發者ID:qubvel,項目名稱:segmentation_models.pytorch,代碼行數:25,代碼來源:efficientnet.py

示例7: body

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Identity [as 別名]
def body(cls, inputs, **kwargs):
        kwargs = cls.get_defaults('body', kwargs)
        encoder = kwargs.pop('encoder')
        embedding = kwargs.pop('embedding')
        decoder = kwargs.pop('decoder')

        layers = []
        encoder = cls.encoder(inputs=inputs, **{**kwargs, **encoder})
        encoder_outputs = encoder(inputs)
        layers.append(('encoder', encoder))

        if embedding is not None:
            embedding = cls.embedding(inputs=encoder_outputs, **{**kwargs, **embedding})
        else:
            embedding = nn.Identity()
        encoder_outputs = embedding(encoder_outputs)
        layers.append(('embedding', embedding))

        decoder = cls.decoder(inputs=encoder_outputs, **{**kwargs, **decoder})
        layers.append(('decoder', decoder))

        return nn.Sequential(OrderedDict(layers)) 
開發者ID:analysiscenter,項目名稱:batchflow,代碼行數:24,代碼來源:encoder_decoder.py

示例8: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Identity [as 別名]
def __init__(self, in_channels, out_channels, expansion=1, kernel_size=3,
                 stride=1, padding=1, se_ratio=0.25, hard_act=False):
        expanded = in_channels * expansion
        super(MBConv, self).__init__()
        self.add_res = stride == 1 and in_channels == out_channels
        self.block = nn.Sequential(
            ConvBNAct(in_channels, expanded, 1,
                      hard_act=hard_act) if expanded != in_channels else nn.Identity(),
            ConvBNAct(expanded, expanded, kernel_size,
                      stride=stride, padding=padding, groups=expanded, hard_act=hard_act),
            SESwishBlock(expanded, expanded, int(in_channels*se_ratio),
                         hard_act=hard_act) if se_ratio > 0 else nn.Identity(),
            nn.Conv2d(expanded, out_channels, 1, bias=False),
            nn.BatchNorm2d(out_channels)
        )
        self.drop_prob = 0 
開發者ID:eladhoffer,項目名稱:convNet.pytorch,代碼行數:18,代碼來源:efficientnet.py

示例9: replace_logits

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Identity [as 別名]
def replace_logits(self, num_classes):
        self.cnn.fc = nn.Identity() # delete the fully connected layer
        self.logits = Unit3D(in_channels=self.logits_in_channels, output_channels=num_classes,
                             kernel_shape=[1, 1, 1],
                             padding=0,
                             activation_fn=None,
                             use_batch_norm=False,
                             use_bias=True,
                             name='logits') 
開發者ID:CMU-CREATE-Lab,項目名稱:deep-smoke-machine,代碼行數:11,代碼來源:pytorch_cnn_tc.py

示例10: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Identity [as 別名]
def __init__(self, embedding_size, activation_function='relu'):
    super().__init__()
    self.act_fn = getattr(F, activation_function)
    self.embedding_size = embedding_size
    self.conv1 = nn.Conv2d(3, 32, 4, stride=2)
    self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
    self.conv3 = nn.Conv2d(64, 128, 4, stride=2)
    self.conv4 = nn.Conv2d(128, 256, 4, stride=2)
    self.fc = nn.Identity() if embedding_size == 1024 else nn.Linear(1024, embedding_size) 
開發者ID:Kaixhin,項目名稱:PlaNet,代碼行數:11,代碼來源:models.py

示例11: forward

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Identity [as 別名]
def forward(self, observation):
    hidden = self.act_fn(self.conv1(observation))
    hidden = self.act_fn(self.conv2(hidden))
    hidden = self.act_fn(self.conv3(hidden))
    hidden = self.act_fn(self.conv4(hidden))
    hidden = hidden.view(-1, 1024)
    hidden = self.fc(hidden)  # Identity if embedding size is 1024 else linear projection
    return hidden 
開發者ID:Kaixhin,項目名稱:PlaNet,代碼行數:10,代碼來源:models.py

示例12: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Identity [as 別名]
def __init__(self, *args, **kwargs):
            super(Identity, self).__init__() 
開發者ID:plkmo,項目名稱:BERT-Relation-Extraction,代碼行數:4,代碼來源:modeling_utils.py

示例13: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Identity [as 別名]
def __init__(self, config):
        super().__init__()

        self.summary_type = config.summary_type if hasattr(config, "summary_type") else "last"
        if self.summary_type == "attn":
            # We should use a standard multi-head attention module with absolute positional embedding for that.
            # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
            # We can probably just use the multi-head attention module of PyTorch >=1.1.0
            raise NotImplementedError

        self.summary = Identity()
        if hasattr(config, "summary_use_proj") and config.summary_use_proj:
            if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
                num_classes = config.num_labels
            else:
                num_classes = config.hidden_size
            self.summary = nn.Linear(config.hidden_size, num_classes)

        self.activation = Identity()
        if hasattr(config, "summary_activation") and config.summary_activation == "tanh":
            self.activation = nn.Tanh()

        self.first_dropout = Identity()
        if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0:
            self.first_dropout = nn.Dropout(config.summary_first_dropout)

        self.last_dropout = Identity()
        if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0:
            self.last_dropout = nn.Dropout(config.summary_last_dropout) 
開發者ID:plkmo,項目名稱:BERT-Relation-Extraction,代碼行數:31,代碼來源:modeling_utils.py

示例14: mlp

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Identity [as 別名]
def mlp(sizes, activation=nn.Tanh, output_activation=nn.Identity):
    # Build a feedforward neural network.
    layers = []
    for j in range(len(sizes)-1):
        act = activation if j < len(sizes)-2 else output_activation
        layers += [nn.Linear(sizes[j], sizes[j+1]), act()]
    return nn.Sequential(*layers) 
開發者ID:openai,項目名稱:spinningup,代碼行數:9,代碼來源:1_simple_pg.py

示例15: mlp

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Identity [as 別名]
def mlp(sizes, activation, output_activation=nn.Identity):
    layers = []
    for j in range(len(sizes)-1):
        act = activation if j < len(sizes)-2 else output_activation
        layers += [nn.Linear(sizes[j], sizes[j+1]), act()]
    return nn.Sequential(*layers) 
開發者ID:openai,項目名稱:spinningup,代碼行數:8,代碼來源:core.py


注:本文中的torch.nn.Identity方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。