當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.html方法代碼示例

本文整理匯總了Python中torch.nn.html方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.html方法的具體用法?Python nn.html怎麽用?Python nn.html使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.html方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import html [as 別名]
def test(net, test_loader):
    """Test the DNN"""
    net.eval()
    criterion = nn.BCELoss()  # https://pytorch.org/docs/stable/nn.html#bceloss
    test_loss = 0
    correct = 0

    with torch.no_grad():
        for i, data in enumerate(test_loader, 0):
            features = data['features']
            target = data['target']
            output = net(features)
            # Binarize the output
            pred = output.apply_(lambda x: 0.0 if x < 0.5 else 1.0)

            test_loss += criterion(output, target)  # sum up batch loss

            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    print('\nTest set:\n\tAverage loss: {:.4f}'.format(test_loss))
    print('\tAccuracy: {}/{} ({:.0f}%)\n'.format(
            correct,
            (len(test_loader) * test_loader.batch_size),
            100. * correct / (len(test_loader) * test_loader.batch_size))) 
開發者ID:GoogleCloudPlatform,項目名稱:cloudml-samples,代碼行數:27,代碼來源:task.py

示例2: test

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import html [as 別名]
def test(net, test_loader):
    """Test the DNN"""
    net.eval()
    criterion = nn.BCELoss()  # https://pytorch.org/docs/stable/nn.html#bceloss
    test_loss = 0
    correct = 0

    with torch.no_grad():
        for i, data in enumerate(test_loader, 0):
            features = data['features']
            target = data['target']
            output = net(features)
            # Binarize the output
            pred = output.apply_(lambda x: 0.0 if x < 0.5 else 1.0)
            test_loss += criterion(output, target)  # sum up batch loss
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    print('\nTest set:\n\tAverage loss: {:.4f}'.format(test_loss))
    print('\tAccuracy: {}/{} ({:.0f}%)\n'.format(
            correct,
            (len(test_loader) * test_loader.batch_size),
            100. * correct / (len(test_loader) * test_loader.batch_size))) 
開發者ID:GoogleCloudPlatform,項目名稱:cloudml-samples,代碼行數:25,代碼來源:task.py

示例3: test

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import html [as 別名]
def test(net, test_loader):
    """Test the DNN"""
    net.eval()
    criterion = nn.BCELoss()  # https://pytorch.org/docs/stable/nn.html#bceloss
    test_loss = 0
    correct = 0

    with torch.no_grad():
        for i, data in enumerate(test_loader, 0):
            features = data['features']
            target = data['target']
            output = net(features)
            # Binarize the output
            pred = output.apply_(lambda x: 0.0 if x < 0.5 else 1.0)
            test_loss += criterion(output, target)  # sum up batch loss
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    total = len(test_loader) * test_loader.batch_size
    accuracy = 100. * correct / total
    return accuracy 
開發者ID:GoogleCloudPlatform,項目名稱:cloudml-samples,代碼行數:23,代碼來源:task.py

示例4: to

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import html [as 別名]
def to(self, *args, **kwargs):
        """ Moves and/or casts the parameters and buffers.

        Example: ::
            >>> from torchbearer import Trial
            >>> t = Trial(None).to('cuda:1')

        Args:
            args: See: `torch.nn.Module.to <https://pytorch.org/docs/stable/nn.html?highlight=#torch.nn.Module.to>`_
            kwargs: See: `torch.nn.Module.to <https://pytorch.org/docs/stable/nn.html?highlight=#torch.nn.Module.to>`_

        Returns:
            Trial: self
        """
        self.state[torchbearer.MODEL].to(*args, **kwargs)

        for state in self.state[torchbearer.OPTIMIZER].state.values():
            for k, v in state.items():
                if torch.is_tensor(v):
                    state[k] = v.to(*args, **kwargs)

        self.state = update_device_and_dtype(self.state, *args, **kwargs)

        return self 
開發者ID:pytorchbearer,項目名稱:torchbearer,代碼行數:26,代碼來源:trial.py

示例5: state_dict

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import html [as 別名]
def state_dict(self, **kwargs):
        """Get a dict containing the model and optimizer states, as well as the model history.

        Example: ::
            >>> from torchbearer import Trial
            >>> t = Trial(None)
            >>> state = t.state_dict() # State dict that can now be saved with torch.save

        Args:
            kwargs: See: `torch.nn.Module.state_dict <https://pytorch.org/docs/stable/nn.html?highlight=#torch.nn.Module.state_dict>`_

        Returns:
            dict: A dict containing parameters and persistent buffers.
        """
        state_dict = {
            torchbearer.VERSION: torchbearer.__version__.replace('.dev', ''),
            torchbearer.MODEL: self.state[torchbearer.MODEL].state_dict(**kwargs),
            torchbearer.OPTIMIZER: self.state[torchbearer.OPTIMIZER].state_dict(),
            torchbearer.HISTORY: self.state[torchbearer.HISTORY],
            torchbearer.CALLBACK_LIST: self.state[torchbearer.CALLBACK_LIST].state_dict()
        }
        return state_dict 
開發者ID:pytorchbearer,項目名稱:torchbearer,代碼行數:24,代碼來源:trial.py

示例6: loss_function

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import html [as 別名]
def loss_function(recon_x, x, mu, logvar):
    # next 2 lines are equivalent
    BCE = -F.binary_cross_entropy(recon_x, x.view(-1, 784), reduction='sum')
    #BCE = -F.binary_cross_entropy(recon_x, x.view(-1, 784), size_average=False) # deprecated
    # for binary_cross_entropy, see https://pytorch.org/docs/stable/nn.html
    
    # KLD is Kullback–Leibler divergence -- how much does one learned
    # distribution deviate from another, in this specific case the
    # learned distribution from the unit Gaussian
    
    # see Appendix B from VAE paper:
    # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
    # https://arxiv.org/abs/1312.6114
    # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
    KLD = 0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
    
    # JVS: Kingma's repo = https://github.com/dpkingma/examples/blob/master/vae/main.py
    # BCE tries to make our reconstruction as accurate as possible
    # KLD tries to push the distributions as close as possible to unit Gaussian
    
    ELBO = BCE + KLD
    loss = -ELBO
    return loss 
開發者ID:jgvfwstone,項目名稱:ArtificialIntelligenceEngines,代碼行數:25,代碼來源:main.py

示例7: main

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import html [as 別名]
def main(args: Namespace) -> None:
    # ------------------------
    # 1 INIT LIGHTNING MODEL
    # ------------------------
    model = GAN(**vars(args))

    # ------------------------
    # 2 INIT TRAINER
    # ------------------------
    # If use distubuted training  PyTorch recommends to use DistributedDataParallel.
    # See: https://pytorch.org/docs/stable/nn.html#torch.nn.DataParallel
    trainer = Trainer()

    # ------------------------
    # 3 START TRAINING
    # ------------------------
    trainer.fit(model) 
開發者ID:PyTorchLightning,項目名稱:pytorch-lightning,代碼行數:19,代碼來源:generative_adversarial_net.py

示例8: get_pooling_layer_hparams

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import html [as 別名]
def get_pooling_layer_hparams(hparams: Union[HParams, Dict[str, Any]]) \
        -> Dict[str, Any]:
    r"""Creates pooling layer hyperparameters `dict` for :func:`get_layer`.

    If the :attr:`hparams` sets `'pool_size'` to `None`, the layer will be
    changed to the respective reduce-pooling layer. For example,
    :torch_docs:`torch.conv.MaxPool1d <nn.html#torch.nn.Conv1d>` is replaced
    with :class:`~texar.torch.core.MaxReducePool1d`.
    """
    if isinstance(hparams, HParams):
        hparams = hparams.todict()

    new_hparams = copy.copy(hparams)
    kwargs = new_hparams.get('kwargs', None)

    if kwargs and kwargs.get('kernel_size', None) is None:
        pool_type = hparams['type']
        new_hparams['type'] = _POOLING_TO_REDUCE.get(pool_type, pool_type)
        kwargs.pop('kernel_size', None)
        kwargs.pop('stride', None)
        kwargs.pop('padding', None)

    return new_hparams 
開發者ID:asyml,項目名稱:texar-pytorch,代碼行數:25,代碼來源:layers.py

示例9: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import html [as 別名]
def __init__(self, in_features, out_features, std_init=0.5):
        super(NoisyLinear, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.std_init = std_init

        self.weight_mu = nn.Parameter(torch.empty(out_features, in_features))
        self.weight_sigma = nn.Parameter(torch.empty(out_features, in_features))
        """ This is typically used to register a buffer that should not to be considered a 
        model parameter. For example, BatchNorm’s running_mean is not a parameter, but is part of 
        the persistent state.
        Source:  https://pytorch.org/docs/stable/nn.html#torch.nn.Module.register_buffer """
        self.register_buffer('weight_epsilon', torch.empty(out_features, in_features))

        self.bias_mu = nn.Parameter(torch.empty(out_features))
        self.bias_sigma = nn.Parameter(torch.empty(out_features))
        self.register_buffer('bias_epsilon', torch.empty(out_features))

        self.reset_parameters()
        self.reset_noise() 
開發者ID:TheMTank,項目名稱:cups-rl,代碼行數:22,代碼來源:model.py

示例10: cross_entropy2d

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import html [as 別名]
def cross_entropy2d(logit, target, ignore_index=255, weight=None, size_average=True, batch_average=True):
    """
    logit 是網絡輸出 (batchsize, 21, 512, 512) 值應該為任意(沒經曆歸一化)
    target是gt      (batchsize,  1, 512, 512) 值應該是背景為0,其他類分別為1-20,忽略為255
    return 經過h*w*batchsize平均的loss
    這裏的loss相當於對每個像素點求分類交叉熵
    ignore_index 是指target中有些忽略的(非背景也非目標,是不屬於數據集類別的其他物體,不計算loss) 表現為白色
    最後要注意:crossentropy是已經經過softmax,所以網絡最後一層不需要處理
    https://pytorch.org/docs/stable/nn.html#torch.nn.CrossEntropyLoss
    """
    n, c, h, w = logit.size()
    # logit = logit.permute(0, 2, 3, 1)
    target = target.squeeze(1)# (batchsize, 1, 512, 512) -> (batchsize, 512, 512)
    if weight is None:
        criterion = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, size_average=False)
    else:
        criterion = nn.CrossEntropyLoss(weight=torch.from_numpy(np.array(weight)).float().cuda(), ignore_index=ignore_index, size_average=False)
    loss = criterion(logit, target.long())

    if size_average:
        loss /= (h * w)

    if batch_average:
        loss /= n

    return loss 
開發者ID:songdejia,項目名稱:DeepLab_v3_plus,代碼行數:28,代碼來源:util.py

示例11: load_state_dict

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import html [as 別名]
def load_state_dict(self, state_dict, resume=True, **kwargs):
        """Resume this trial from the given state. Expects that this trial was constructed in the same way. Optionally,
        just load the model state when resume=False.

        Example: ::
            >>> from torchbearer import Trial
            >>> t = Trial(None)
            >>> state = torch.load('some_state.pt')
            >>> t.load_state_dict(state)

        Args:
            state_dict (dict): The state dict to reload
            resume (bool): If True, resume from the given state. Else, just load in the model weights.
            kwargs: See: `torch.nn.Module.load_state_dict <https://pytorch.org/docs/stable/nn.html?highlight=#torch.nn.Module.load_state_dict>`_

        Returns:
            Trial: self
        """
        if resume and torchbearer.MODEL in state_dict:  # torchbearer dict
            if torchbearer.VERSION in state_dict and state_dict[torchbearer.VERSION] != torchbearer.__version__.replace('.dev', ''):
                warnings.warn('This state dict was saved with a different torchbearer version, loading available keys. Consider setting resume=False')

            if torchbearer.MODEL in state_dict:
                self.state[torchbearer.MODEL].load_state_dict(state_dict[torchbearer.MODEL], **kwargs)

            if torchbearer.OPTIMIZER in state_dict:
                self.state[torchbearer.OPTIMIZER].load_state_dict(state_dict[torchbearer.OPTIMIZER])

            if torchbearer.HISTORY in state_dict:
                self.state[torchbearer.HISTORY] = state_dict[torchbearer.HISTORY]

            if torchbearer.CALLBACK_LIST in state_dict:
                self.state[torchbearer.CALLBACK_LIST].load_state_dict(state_dict[torchbearer.CALLBACK_LIST])
        elif torchbearer.MODEL in state_dict:
            self.state[torchbearer.MODEL].load_state_dict(state_dict[torchbearer.MODEL], **kwargs)
        else:  # something else
            warnings.warn('Not a torchbearer state dict, passing to model')
            self.state[torchbearer.MODEL].load_state_dict(state_dict, **kwargs)

        return self 
開發者ID:pytorchbearer,項目名稱:torchbearer,代碼行數:42,代碼來源:trial.py

示例12: set_dataloader_mp_context

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import html [as 別名]
def set_dataloader_mp_context(self, dataloader_mp_context: str):
        """Set the multiprocessing context used by the dataloader.

        The context can be either 'spawn', 'fork' or 'forkserver'. See
        https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context
        for more details."""

        self.dataloader_mp_context = dataloader_mp_context
        return self 
開發者ID:facebookresearch,項目名稱:ClassyVision,代碼行數:11,代碼來源:classification_task.py

示例13: set_amp_args

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import html [as 別名]
def set_amp_args(self, amp_args: Optional[Dict[str, Any]]):
        """Disable / enable apex.amp and set the automatic mixed precision parameters.

        apex.amp can be utilized for mixed / half precision training.

        Args:
            amp_args: Dictionary containing arguments to be passed to
            amp.initialize. Set to None to disable amp.  To enable mixed
            precision training, pass amp_args={"opt_level": "O1"} here.
            See https://nvidia.github.io/apex/amp.html for more info.

        Raises:
            RuntimeError: If opt_level is not None and apex is not installed.

        Warning: apex needs to be installed to utilize this feature.
        """
        self.amp_args = amp_args

        if amp_args is None:
            logging.info(f"AMP disabled")
        else:
            if not apex_available:
                raise RuntimeError("apex is not installed, cannot enable amp")

            logging.info(f"AMP enabled with args {amp_args}")
        return self 
開發者ID:facebookresearch,項目名稱:ClassyVision,代碼行數:28,代碼來源:classification_task.py

示例14: init_distributed_data_parallel_model

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import html [as 別名]
def init_distributed_data_parallel_model(self):
        """
        Initialize
        `torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/
        docs/stable/nn.html#distributeddataparallel>`_.

        Needed for distributed training. This is where a model should be wrapped by DDP.
        """
        if not is_distributed_training_run():
            return
        assert (
            self.distributed_model is None
        ), "init_ddp_non_elastic must only be called once"

        broadcast_buffers = (
            self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS
        )
        self.distributed_model = init_distributed_data_parallel_model(
            self.base_model,
            broadcast_buffers=broadcast_buffers,
            find_unused_parameters=self.find_unused_parameters,
        )
        if isinstance(self.loss, ClassyLoss) and self.loss.has_learned_parameters():
            logging.info("Initializing distributed loss")
            self.loss = init_distributed_data_parallel_model(
                self.loss,
                broadcast_buffers=broadcast_buffers,
                find_unused_parameters=self.find_unused_parameters,
            ) 
開發者ID:facebookresearch,項目名稱:ClassyVision,代碼行數:31,代碼來源:classification_task.py

示例15: get_heads

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import html [as 別名]
def get_heads(self):
        """Returns the heads on the model

        Function returns the heads a dictionary of block names to
        `nn.Modules <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_
        attached to that block.

        """
        return {
            block_name: list(heads.values())
            for block_name, heads in self._heads.items()
        } 
開發者ID:facebookresearch,項目名稱:ClassyVision,代碼行數:14,代碼來源:classy_model.py


注:本文中的torch.nn.html方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。