當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.NLLLoss方法代碼示例

本文整理匯總了Python中torch.nn.NLLLoss方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.NLLLoss方法的具體用法?Python nn.NLLLoss怎麽用?Python nn.NLLLoss使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.NLLLoss方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: trainIters

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss [as 別名]
def trainIters(encoder, decoder, epochs, dataset, init_epochs, learning_rate=0.01):
    plot_losses = []

    encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
    decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
    criterion = nn.NLLLoss()

    for epoch in range(init_epochs, epochs+init_epochs):
        for i, (input_tensor, target_tensor) in enumerate(dataset.gen()):
            loss = train(input_tensor, target_tensor, encoder,
                         decoder, encoder_optimizer, decoder_optimizer, criterion)
            if loss:
                plot_losses.append(loss)
                if i % 1000==0:
                    print("epoch {}, step: {}, loss: {}".format(
                        epoch, i, loss
                    ))
            else:
                print(input_tensor, target_tensor)
        print("save model")
        torch.save(encoder.state_dict(), "epoch_{}_step_{}_encoder_loss_{}.pkl".format(epoch, i, loss))
        torch.save(decoder.state_dict(), "epoch_{}_step_{}_decoder_loss_{}.pkl".format(epoch, i, loss)) 
開發者ID:EvilPsyCHo,項目名稱:TaskBot,代碼行數:24,代碼來源:tutorial.py

示例2: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss [as 別名]
def __init__(self, generator, tgt_vocab, normalization="sents",
                 label_smoothing=0.0):
        super(NMTLossCompute, self).__init__(generator, tgt_vocab)
        assert (label_smoothing >= 0.0 and label_smoothing <= 1.0)
        if label_smoothing > 0:
            # When label smoothing is turned on,
            # KL-divergence between q_{smoothed ground truth prob.}(w)
            # and p_{prob. computed by model}(w) is minimized.
            # If label smoothing value is set to zero, the loss
            # is equivalent to NLLLoss or CrossEntropyLoss.
            # All non-true labels are uniformly set to low-confidence.
            self.criterion = nn.KLDivLoss(size_average=False)
            one_hot = torch.randn(1, len(tgt_vocab))
            one_hot.fill_(label_smoothing / (len(tgt_vocab) - 2))
            one_hot[0][self.padding_idx] = 0
            self.register_buffer('one_hot', one_hot)
        else:
            weight = torch.ones(len(tgt_vocab))
            weight[self.padding_idx] = 0
            self.criterion = nn.NLLLoss(weight, size_average=False)
        self.confidence = 1.0 - label_smoothing 
開發者ID:xiadingZ,項目名稱:video-caption-openNMT.pytorch,代碼行數:23,代碼來源:Loss.py

示例3: train

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss [as 別名]
def train(config, argv, name, ROOT_DIR,  model, dataset):
    _batchop = partial(batchop, VOCAB=dataset.input_vocab, LABELS=dataset.output_vocab)
    predictor_feed = DataFeed(name, dataset.testset, batchop=_batchop, batch_size=1)
    train_feed     = DataFeed(name, portion(dataset.trainset, config.HPCONFIG.trainset_size),
                              batchop=_batchop, batch_size=config.CONFIG.batch_size)
    
    predictor = Predictor(name,
                          model=model,
                          directory=ROOT_DIR,
                          feed=predictor_feed,
                          repr_function=partial(repr_function
                                                , VOCAB=dataset.input_vocab
                                                , LABELS=dataset.output_vocab
                                                , dataset=dataset.testset_dict))

    loss_ = partial(loss, loss_function=nn.NLLLoss()) 
開發者ID:vanangamudi,項目名稱:tamil-lm2,代碼行數:18,代碼來源:utilz.py

示例4: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss [as 別名]
def __init__(self, vocabulary, tokenizer, network_params=None, max_sequence_length=256, no_cuda=False,
                 mode="train"):
        """
        Implements an RNN.
        :param vocabulary: Vocabulary to use.
        :param tokenizer: Tokenizer to use.
        :param network_params: Network params to initialize the RNN.
        :param max_sequence_length: Sequences longer than this value will not be processed.
        :param no_cuda: The model is explicitly initialized as not using cuda, even if cuda is available.
        :param mode: Training or eval mode.
        """
        self.vocabulary = vocabulary
        self.tokenizer = tokenizer
        self.max_sequence_length = max_sequence_length

        if not isinstance(network_params, dict):
            network_params = {}

        self.network = RNN(**network_params)
        if torch.cuda.is_available() and not no_cuda:
            self.network.cuda()

        self.nll_loss = tnn.NLLLoss(reduction="none", ignore_index=0)

        self.set_mode(mode) 
開發者ID:undeadpixel,項目名稱:reinvent-randomized,代碼行數:27,代碼來源:model.py

示例5: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss [as 別名]
def __init__(self, generator, tgt_vocab, label_smoothing=0.0):
        super(NMTLossCompute, self).__init__(generator, tgt_vocab)
        assert (label_smoothing >= 0.0 and label_smoothing <= 1.0)

        self.tgt_vocab_len = len(tgt_vocab)

        if label_smoothing > 0:
            # When label smoothing is turned on,
            # KL-divergence between q_{smoothed ground truth prob.}(w)
            # and p_{prob. computed by model}(w) is minimized.
            # If label smoothing value is set to zero, the loss
            # is equivalent to NLLLoss or CrossEntropyLoss.
            # All non-true labels are uniformly set to low-confidence.
            self.criterion = nn.KLDivLoss(size_average=False)
            one_hot = torch.randn(1, len(tgt_vocab))
            one_hot.fill_(label_smoothing / (len(tgt_vocab) - 2))
            one_hot[0][self.padding_idx] = 0
            self.register_buffer('one_hot', one_hot)
        else:
            weight = torch.ones(len(tgt_vocab))
            weight[self.padding_idx] = 0
            self.criterion = nn.NLLLoss(weight, size_average=False)  # IMPORTANT: NLLLoss is what we use. Interesting that size_average=False
            # ipdb.set_trace()
        self.confidence = 1.0 - label_smoothing 
開發者ID:matthewmackay,項目名稱:reversible-rnn,代碼行數:26,代碼來源:Loss.py

示例6: test_criterion_training_set_correctly

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss [as 別名]
def test_criterion_training_set_correctly(self, net_cls, module_cls, data):
        # check that criterion's training attribute is set correctly

        X, y = data[0][:50], data[1][:50]  # don't need all the data
        side_effect = []

        class MyCriterion(nn.NLLLoss):
            """Criterion that records its training attribute"""
            def forward(self, *args, **kwargs):
                side_effect.append(self.training)
                return super().forward(*args, **kwargs)

        net = net_cls(module_cls, criterion=MyCriterion, max_epochs=1)
        net.fit(X, y)

        # called once with training=True for train step, once with
        # training=False for validation step
        assert side_effect == [True, False]

        net.partial_fit(X, y)
        # same logic as before
        assert side_effect == [True, False, True, False] 
開發者ID:skorch-dev,項目名稱:skorch,代碼行數:24,代碼來源:test_net.py

示例7: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss [as 別名]
def __init__(self, parameter):
        super(LSTMTagger, self).__init__()
        self.hidden_dim = parameter['hidden_dim']

        self.word_embeddings = nn.Embedding(parameter['vocab_size'],
                                            parameter['embedding_dim'])

        self.embedding_dim = parameter['embedding_dim']

        # The LSTM takes word embeddings and captical embedding as inputs, and outputs hidden states
        # with dimensionality hidden_dim.
        self.lstm = nn.LSTM(self.embedding_dim, parameter['hidden_dim'])

        # The linear layer that maps from hidden state space to tag space
        self.hidden2tag = nn.Linear(parameter['hidden_dim'], parameter['tagset_size'])
        self.hidden = self.init_hidden()
        self.loss_function = nn.NLLLoss() 
開發者ID:fangwater,項目名稱:Medical-named-entity-recognition-for-ccks2017,代碼行數:19,代碼來源:LstmModel.py

示例8: train

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss [as 別名]
def train(output, input, ann,learning_rate=.005):
    # function for training the neural net
    criterion = nn.NLLLoss()
    ann.zero_grad()  # initializing gradients with zeros
    # predicting the output
    output_p = ann(input)  # input --> hidden_layer --> output
    loss = criterion(output_p, output)
    # comparing the guessed output with actual output
    loss.backward()  # backpropagating to compute gradients with respect to loss

    for p in ann.parameters():
        # adding learning rate to slow down the network
        p.data.add_(-learning_rate, p.grad.data)
    return output, loss.data[0]  # returning predicted output and loss

#n_iters=100000 
開發者ID:RITct,項目名稱:Rita,代碼行數:18,代碼來源:action_models.py

示例9: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss [as 別名]
def __init__(self, generator, tgt_vocab, normalization="sents",
                 label_smoothing=0.0):
        super(NMTLossCompute, self).__init__(generator, tgt_vocab)
        assert (label_smoothing >= 0.0 and label_smoothing <= 1.0)

        if label_smoothing > 0:
            # When label smoothing is turned on,
            # KL-divergence between q_{smoothed ground truth prob.}(w)
            # and p_{prob. computed by model}(w) is minimized.
            # If label smoothing value is set to zero, the loss
            # is equivalent to NLLLoss or CrossEntropyLoss.
            # All non-true labels are uniformly set to low-confidence.
            self.criterion = nn.KLDivLoss(size_average=False)
            one_hot = torch.randn(1, len(tgt_vocab))
            one_hot.fill_(label_smoothing / (len(tgt_vocab) - 2))
            one_hot[0][self.padding_idx] = 0
            self.register_buffer('one_hot', one_hot)
        else:
            weight = torch.ones(len(tgt_vocab))
            weight[self.padding_idx] = 0
            self.criterion = nn.NLLLoss(weight, size_average=False)
        self.confidence = 1.0 - label_smoothing 
開發者ID:abaheti95,項目名稱:DC-NeuralConversation,代碼行數:24,代碼來源:Loss.py

示例10: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss [as 別名]
def __init__(self, net_enc, net_dec, labeldata, loss_scale=None):
        super(SegmentationModule, self).__init__()
        self.encoder = net_enc
        self.decoder = net_dec
        self.crit_dict = nn.ModuleDict()
        if loss_scale is None:
            self.loss_scale = {"object": 1, "part": 0.5, "scene": 0.25, "material": 1}
        else:
            self.loss_scale = loss_scale

        # criterion
        self.crit_dict["object"] = nn.NLLLoss(ignore_index=0)  # ignore background 0
        self.crit_dict["material"] = nn.NLLLoss(ignore_index=0)  # ignore background 0
        self.crit_dict["scene"] = nn.NLLLoss(ignore_index=-1)  # ignore unlabelled -1

        # Label data - read from json
        self.labeldata = labeldata
        object_to_num = {k: v for v, k in enumerate(labeldata['object'])}
        part_to_num = {k: v for v, k in enumerate(labeldata['part'])}
        self.object_part = {object_to_num[k]:
                [part_to_num[p] for p in v]
                for k, v in labeldata['object_part'].items()}
        self.object_with_part = sorted(self.object_part.keys())
        self.decoder.object_part = self.object_part
        self.decoder.object_with_part = self.object_with_part 
開發者ID:CSAILVision,項目名稱:gandissect,代碼行數:27,代碼來源:models.py

示例11: fit_batch

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss [as 別名]
def fit_batch(self, premise_batch, hypothesis_batch, y_batch):
        if not hasattr(self, 'criterion'):
            self.criterion = nn.NLLLoss()
        if not hasattr(self, 'optimizer'):
            self.optimizer = optim.Adam(self.parameters(), lr=self.options['LR'], betas=(0.9, 0.999), eps=1e-08, weight_decay=self.options['L2'])

        self.optimizer.zero_grad()
        preds = self.__call__(premise_batch, hypothesis_batch, training=True)
        loss = self.criterion(preds, y_batch)
        loss.backward()
        self.optimizer.step()

        _, pred_labels = torch.max(preds, dim=-1, keepdim=True)
        y_true = self._get_numpy_array_from_variable(y_batch)
        y_pred = self._get_numpy_array_from_variable(pred_labels)
        acc = accuracy_score(y_true, y_pred)

        ret_loss = self._get_numpy_array_from_variable(loss)[0]
        return ret_loss, acc 
開發者ID:codedecde,項目名稱:Recognizing-Textual-Entailment,代碼行數:21,代碼來源:mgru_rte_model.py

示例12: fit_batch

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss [as 別名]
def fit_batch(self, premise_batch, hypothesis_batch, y_batch):
        if not hasattr(self,'criterion'):
            self.criterion = nn.NLLLoss()
        if not hasattr(self, 'optimizer'):
            self.optimizer = optim.Adam(self.parameters(),  lr=self.options['LR'], betas=(0.9, 0.999), eps=1e-08, weight_decay=self.options['L2'])
        
        self.optimizer.zero_grad()
        preds = self.__call__(premise_batch, hypothesis_batch, training= True)
        loss = self.criterion(preds, y_batch)
        loss.backward()
        self.optimizer.step()
        
        _, pred_labels = torch.max(preds, dim=-1, keepdim = True)
        y_true = self._get_numpy_array_from_variable(y_batch)
        y_pred = self._get_numpy_array_from_variable(pred_labels)
        acc = accuracy_score(y_true, y_pred)

        ret_loss = self._get_numpy_array_from_variable(loss)[0]
        return ret_loss, acc 
開發者ID:codedecde,項目名稱:Recognizing-Textual-Entailment,代碼行數:21,代碼來源:rte_model.py

示例13: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss [as 別名]
def __init__(self, embed_size, hidden_size, q_hidden_size, vocab_size, layer_num, dropout_rate,
                 z_length, alpha,
                 max_ts, beam_search=False, teacher_force=100, **kwargs):
        super().__init__()
        self.u_encoder = DynamicEncoder(vocab_size, embed_size, hidden_size, layer_num, dropout_rate)
        self.p_encoder = DynamicEncoder(vocab_size, embed_size, q_hidden_size, layer_num, dropout_rate)

        self.qz_decoder = TextSpanDecoder(embed_size, q_hidden_size, vocab_size, dropout_rate)  # posterior
        self.pz_decoder = TextSpanDecoder(embed_size, hidden_size, vocab_size, dropout_rate)  # prior

        self.m_decoder = ResponseDecoder(embed_size, hidden_size, vocab_size, dropout_rate)
        self.p_decoder = ResponseDecoder(embed_size, q_hidden_size, vocab_size, dropout_rate)

        self.embed_size = embed_size
        self.vocab = kwargs['vocab']

        self.pr_loss = nn.NLLLoss(ignore_index=0)
        self.q_loss = nn.NLLLoss(ignore_index=0)
        self.dec_loss = nn.NLLLoss(ignore_index=0)
        self.kl_loss = MultinomialKLDivergenceLoss()

        self.z_length = z_length
        self.alpha = alpha
        self.max_ts = max_ts
        self.beam_search = beam_search
        self.teacher_force = teacher_force

        if self.beam_search:
            self.beam_size = kwargs['beam_size']
            self.eos_token_idx = kwargs['eos_token_idx'] 
開發者ID:AuCson,項目名稱:SEDST,代碼行數:32,代碼來源:unsup_net.py

示例14: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss [as 別名]
def __init__(self, embed_size, hidden_size, vocab_size, degree_size, layer_num, dropout_rate, z_length, alpha,
                 max_ts, beam_search=False, teacher_force=100, **kwargs):
        super().__init__()
        self.u_encoder = DynamicEncoder(vocab_size, embed_size, hidden_size, layer_num, dropout_rate)
        self.m_encoder = DynamicEncoder(vocab_size, embed_size, hidden_size, layer_num, dropout_rate)
        self.m_decoder = ResponseDecoder(embed_size, hidden_size, vocab_size, degree_size, dropout_rate)
        self.qz_decoder = MultiTurnInferenceDecoder_Z(embed_size, hidden_size, vocab_size, dropout_rate)  # posterior
        self.pz_decoder = MultiTurnPriorDecoder_Z(embed_size, hidden_size, vocab_size, dropout_rate)  # prior

        self.embed_size = embed_size
        self.vocab = kwargs['vocab']

        self.pr_loss = nn.NLLLoss(ignore_index=0)
        self.q_loss = nn.NLLLoss(ignore_index=0)
        self.dec_loss = nn.NLLLoss(ignore_index=0)
        self.kl_loss = MultinomialKLDivergenceLoss(special_tokens=[self.vocab.encode(x) for x in ['EOS_Z1','EOS_Z2',
                                                                                                  '</s>', '<pad>']])

        self.z_length = z_length
        self.alpha = alpha
        self.max_ts = max_ts
        self.beam_search = beam_search
        self.teacher_force = teacher_force

        if self.beam_search:
            self.beam_size = kwargs['beam_size']
            self.eos_token_idx = kwargs['eos_token_idx'] 
開發者ID:AuCson,項目名稱:SEDST,代碼行數:29,代碼來源:semi_sup_net.py

示例15: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss [as 別名]
def __init__(self, embed_size, hidden_size, vocab_size, degree_size, layer_num, dropout_rate, z_length,
                 max_ts, beam_search=False, teacher_force=100, **kwargs):
        super().__init__()
        self.vocab = kwargs['vocab']

        self.emb = nn.Embedding(vocab_size, embed_size)
        self.dec_gru = nn.GRU(degree_size + embed_size + hidden_size * 2, hidden_size, dropout=dropout_rate)
        self.proj = nn.Linear(hidden_size * 3, vocab_size)
        self.u_encoder = SimpleDynamicEncoder(vocab_size, embed_size, hidden_size, layer_num, dropout_rate)
        self.z_decoder = BSpanDecoder(embed_size, hidden_size, vocab_size, dropout_rate)
        self.m_decoder = ResponseDecoder(embed_size, hidden_size, vocab_size, degree_size, dropout_rate,
                                               self.dec_gru, self.proj, self.emb, self.vocab)
        self.embed_size = embed_size

        self.z_length = z_length
        self.max_ts = max_ts
        self.beam_search = beam_search
        self.teacher_force = teacher_force

        self.pr_loss = nn.NLLLoss(ignore_index=0)
        self.dec_loss = nn.NLLLoss(ignore_index=0)

        self.saved_log_policy = []

        if self.beam_search:
            self.beam_size = kwargs['beam_size']
            self.eos_token_idx = kwargs['eos_token_idx'] 
開發者ID:ConvLab,項目名稱:ConvLab,代碼行數:29,代碼來源:tsd_net.py


注:本文中的torch.nn.NLLLoss方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。