當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.MultiLabelMarginLoss方法代碼示例

本文整理匯總了Python中torch.nn.MultiLabelMarginLoss方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.MultiLabelMarginLoss方法的具體用法?Python nn.MultiLabelMarginLoss怎麽用?Python nn.MultiLabelMarginLoss使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.MultiLabelMarginLoss方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_grad

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiLabelMarginLoss [as 別名]
def test_grad():
    input=tensor(([1,2,3],[4,5,6],[7,8,9]),dtype=torch.float)
    #weight=tensor(([0.1,0.2,0.3,0.4],[0.1,0.2,0.3,0.4],[0.1,0.2,0.3,0.4]),requires_grad=True)
    weight=tensor(torch.rand(3, 4),requires_grad=True)
    #input=input.unsqueeze(0)
    print(input,weight)
    pre=torch.mm(input,weight)
    #loss1=f.multilabel_soft_margin_loss()
    loss2=nn.MultiLabelMarginLoss()
    lable1=tensor(([0, 1, 1,0],),dtype=torch.float)
    lable2 = tensor(([0, 1, 1,0], [1, 0, 0,0], [1, 0,1 ,1]), dtype=torch.long)
    print(pre,lable1)
    loss1=f.multilabel_soft_margin_loss(pre,lable1,reduction='sum')
    loss1.backward()
    print('weight.grad.data1:',weight.grad.data)

    # loss2 = loss2(pre, lable2)
    # loss2.backward()
    # print('weight.grad.data2:', weight.grad.data) 
開發者ID:MagicChuyi,項目名稱:SlowFast-Network-pytorch,代碼行數:21,代碼來源:test_con.py

示例2: set_loss_margin

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiLabelMarginLoss [as 別名]
def set_loss_margin(self, scores, gold_mask, margin):
        """Since the pytorch built-in MultiLabelMarginLoss fixes the margin as 1.
        We simply work around this annoying feature by *modifying* the golden scores.
        E.g., if we want margin as 3, we decrease each golden score by 3 - 1 before
        feeding it to the built-in loss.
        """
        new_scores = scores - (margin - 1) * gold_mask
        return new_scores 
開發者ID:hugochan,項目名稱:BAMnet,代碼行數:10,代碼來源:bamnet.py

示例3: eval_batch

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiLabelMarginLoss [as 別名]
def eval_batch(data_all, logit_all, in_train=False):
        out_list = []
        for batch, logit in zip(grouper(data_all, bs), grouper(logit_all, bs)):
            batch = [b if isinstance(b, torch.Tensor) else torch.from_numpy(b) for b in batch if b is not None]
            logit = [b if isinstance(b, torch.Tensor) else torch.from_numpy(b) for b in logit if b is not None]
            out_batch = net(torch.stack(batch, dim=0).cuda(), torch.stack(logit, dim=0).cuda(), in_train)
            out_list.append(out_batch)
        out = torch.cat(out_list, dim=0)
        return out

    # loss_fn = MultiLabelMarginLoss()
    # loss_fn = FocalLoss()
    # loss_fn = BCELoss()
    # loss_fn = BCEWithLogitsLoss() 
開發者ID:ildoonet,項目名稱:kaggle-human-protein-atlas-image-classification,代碼行數:16,代碼來源:ensemble_nn4.py

示例4: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiLabelMarginLoss [as 別名]
def __init__(self, opt):
        opt['cuda'] = not opt['no_cuda'] and torch.cuda.is_available()
        if opt['cuda']:
            print('[ Using CUDA ]')
            torch.cuda.set_device(opt['gpu'])
            # It enables benchmark mode in cudnn, which
            # leads to faster runtime when the input sizes do not vary.
            cudnn.benchmark = True

        self.opt = opt
        if self.opt['pre_word2vec']:
            pre_w2v = load_ndarray(self.opt['pre_word2vec'])
        else:
            pre_w2v = None

        self.ent_model = Entnet(opt['vocab_size'], opt['vocab_embed_size'], \
                opt['o_embed_size'], opt['hidden_size'], \
                opt['num_ent_types'], opt['num_relations'], \
                seq_enc_type=opt['seq_enc_type'], \
                word_emb_dropout=opt['word_emb_dropout'], \
                que_enc_dropout=opt['que_enc_dropout'], \
                ent_enc_dropout=opt['ent_enc_dropout'], \
                pre_w2v=pre_w2v, \
                num_hops=opt['num_ent_hops'], \
                att=opt['attention'], \
                use_cuda=opt['cuda'])
        if opt['cuda']:
            self.ent_model.cuda()

        self.loss_fn = MultiLabelMarginLoss()

        optim_params = [p for p in self.ent_model.parameters() if p.requires_grad]
        self.optimizers = {'entnet': optim.Adam(optim_params, lr=opt['learning_rate'])}
        self.scheduler = ReduceLROnPlateau(self.optimizers['entnet'], mode='min', \
                    patience=self.opt['valid_patience'] // 3, verbose=True)

        if opt.get('model_file') and os.path.isfile(opt['model_file']):
            print('Loading existing ent_model parameters from ' + opt['model_file'])
            self.load(opt['model_file'])
        else:
            self.save()
            self.load(opt['model_file'])
        super(EntnetAgent, self).__init__() 
開發者ID:hugochan,項目名稱:BAMnet,代碼行數:45,代碼來源:entnet.py

示例5: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiLabelMarginLoss [as 別名]
def __init__(self, opt, ctx_stops, vocab2id):
        self.ctx_stops = ctx_stops
        self.vocab2id = vocab2id
        opt['cuda'] = not opt['no_cuda'] and torch.cuda.is_available()
        if opt['cuda']:
            print('[ Using CUDA ]')
            torch.cuda.set_device(opt['gpu'])
            # It enables benchmark mode in cudnn, which
            # leads to faster runtime when the input sizes do not vary.
            cudnn.benchmark = True

        self.opt = opt
        if self.opt['pre_word2vec']:
            pre_w2v = load_ndarray(self.opt['pre_word2vec'])
        else:
            pre_w2v = None

        self.model = BAMnet(opt['vocab_size'], opt['vocab_embed_size'], \
                opt['o_embed_size'], opt['hidden_size'], \
                opt['num_ent_types'], opt['num_relations'], \
                opt['num_query_words'], \
                word_emb_dropout=opt['word_emb_dropout'], \
                que_enc_dropout=opt['que_enc_dropout'], \
                ans_enc_dropout=opt['ans_enc_dropout'], \
                pre_w2v=pre_w2v, \
                num_hops=opt['num_hops'], \
                att=opt['attention'], \
                use_cuda=opt['cuda'])
        if opt['cuda']:
            self.model.cuda()

        # MultiLabelMarginLoss
        # For each sample in the mini-batch:
        # loss(x, y) = sum_ij(max(0, 1 - (x[y[j]] - x[i]))) / x.size(0)
        self.loss_fn = MultiLabelMarginLoss()

        optim_params = [p for p in self.model.parameters() if p.requires_grad]
        self.optimizers = {'bamnet': optim.Adam(optim_params, lr=opt['learning_rate'])}
        self.scheduler = ReduceLROnPlateau(self.optimizers['bamnet'], mode='min', \
                    patience=self.opt['valid_patience'] // 3, verbose=True)

        if opt.get('model_file') and os.path.isfile(opt['model_file']):
            print('Loading existing model parameters from ' + opt['model_file'])
            self.load(opt['model_file'])
        super(BAMnetAgent, self).__init__() 
開發者ID:hugochan,項目名稱:BAMnet,代碼行數:47,代碼來源:bamnet.py


注:本文中的torch.nn.MultiLabelMarginLoss方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。