当前位置: 首页>>代码示例>>Python>>正文


Python functional.kl_div方法代码示例

本文整理汇总了Python中torch.nn.functional.kl_div方法的典型用法代码示例。如果您正苦于以下问题:Python functional.kl_div方法的具体用法?Python functional.kl_div怎么用?Python functional.kl_div使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.kl_div方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: cross_entropy

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import kl_div [as 别名]
def cross_entropy(pred,
                  label,
                  weight=None,
                  reduction='mean',
                  avg_factor=None,
                  label_smooth=None):
    # element-wise losses
    if label_smooth is None:
        loss = F.cross_entropy(pred, label, reduction='none')
    else:
        num_classes = pred.size(1)
        target = F.one_hot(label, num_classes).type_as(pred)
        target = target.sub_(label_smooth).clamp_(0).add_(label_smooth / num_classes)
        loss = F.kl_div(pred.log_softmax(1), target, reduction='none').sum(1)

    # apply weights and do the reduction
    if weight is not None:
        weight = weight.float()
    loss = weight_reduce_loss(
        loss, weight=weight, reduction=reduction, avg_factor=avg_factor)

    return loss 
开发者ID:tascj,项目名称:kaggle-kuzushiji-recognition,代码行数:24,代码来源:cross_entropy_loss.py

示例2: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import kl_div [as 别名]
def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch

        out = F.relu(self.conv1(x, edge_index))

        out, edge_index, _, batch, perm, score = self.pool1(
            out, edge_index, None, batch, attn=x)
        ratio = out.size(0) / x.size(0)

        out = F.relu(self.conv2(out, edge_index))
        out = global_add_pool(out, batch)
        out = self.lin(out).view(-1)

        attn_loss = F.kl_div(torch.log(score + 1e-14), data.attn[perm],
                             reduction='none')
        attn_loss = scatter_mean(attn_loss, batch)

        return out, attn_loss, ratio 
开发者ID:rusty1s,项目名称:pytorch_geometric,代码行数:20,代码来源:colors_topk_pool.py

示例3: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import kl_div [as 别名]
def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch

        x = F.relu(self.conv1(x, edge_index))
        x, edge_index, _, batch, perm, score = self.pool1(
            x, edge_index, None, batch)
        x = F.relu(self.conv2(x, edge_index))
        x, edge_index, _, batch, perm, score = self.pool2(
            x, edge_index, None, batch)
        ratio = x.size(0) / data.x.size(0)

        x = F.relu(self.conv3(x, edge_index))
        x = global_max_pool(x, batch)
        x = self.lin(x).view(-1)

        attn_loss = F.kl_div(
            torch.log(score + 1e-14), data.attn[perm], reduction='none')
        attn_loss = scatter_mean(attn_loss, batch)

        return x, attn_loss, ratio 
开发者ID:rusty1s,项目名称:pytorch_geometric,代码行数:22,代码来源:triangles_sag_pool.py

示例4: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import kl_div [as 别名]
def forward(self, input, target):
        y_true = target
        y_pred = torch.sigmoid(input)

        y_true = y_true.view(-1, 1)
        y_pred = y_pred.view(-1, 1)

        y_true_1 = y_true
        y_pred_1 = y_pred

        y_true_2 = y_true
        y_pred_2 = y_pred

        y_pred_1 = torch.clamp(y_pred_1, 1e-7, 1.0)
        y_true_2 = torch.clamp(y_true_2, 1e-4, 1.0)

        loss_ce = F.kl_div(torch.log(y_pred_1), y_true_1)
        loss_rce = F.kl_div(torch.log(y_true_2), y_pred_2)
        return self.alpha*loss_ce + self.beta*loss_rce 
开发者ID:pudae,项目名称:kaggle-understanding-clouds,代码行数:21,代码来源:sce.py

示例5: softmax_kl_loss

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import kl_div [as 别名]
def softmax_kl_loss(input_logits, target_logits):
    """Takes softmax on both sides and returns KL divergence

    Note:
    - Returns the sum over all examples. Divide by the batch size afterwards
      if you want the mean.
    - Sends gradients to inputs but not the targets.
    """
    assert input_logits.size() == target_logits.size()
    input_log_softmax = F.log_softmax(input_logits, dim=1)
    target_softmax = F.softmax(target_logits, dim=1)

    # return F.kl_div(input_log_softmax, target_softmax)
    kl_div = F.kl_div(input_log_softmax, target_softmax, reduction='none')
    # mean_kl_div = torch.mean(0.2*kl_div[:,0,...]+0.8*kl_div[:,1,...])
    return kl_div 
开发者ID:JunMa11,项目名称:SegWithDistMap,代码行数:18,代码来源:losses.py

示例6: warmup_train

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import kl_div [as 别名]
def warmup_train(model, train_loader, eva_loader, args):
    optimizer = SGD(model.parameters(), lr=args.warmup_lr, momentum=args.momentum, weight_decay=args.weight_decay)
    for epoch in range(args.warmup_epochs):
        loss_record = AverageMeter()
        model.train()
        for batch_idx, (x, label, idx) in enumerate(tqdm(train_loader)):
            x = x.to(device)
            feat = model(x)
            prob = feat2prob(feat, model.center)
            loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
            loss_record.update(loss.item(), x.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('Warmup Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
        _, _, _, probs = test(model, eva_loader, args)
    args.p_targets = target_distribution(probs) 
开发者ID:k-han,项目名称:DTC,代码行数:19,代码来源:imagenet2cifar_DTC.py

示例7: Baseline_train

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import kl_div [as 别名]
def Baseline_train(model, train_loader, eva_loader, args):
    optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.milestones, gamma=args.gamma)
    for epoch in range(args.epochs):
        loss_record = AverageMeter()
        model.train()
        exp_lr_scheduler.step()
        for batch_idx, (x, label, idx) in enumerate(tqdm(train_loader)):
            x = x.to(device)
            feat = model(x)
            prob = feat2prob(feat, model.center)
            loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
            loss_record.update(loss.item(), x.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
        _, _, _, probs = test(model, eva_loader, args)
        if epoch % args.update_interval==0:
            print('updating target ...')
            args.p_targets = target_distribution(probs) 
    torch.save(model.state_dict(), args.model_dir)
    print("model saved to {}.".format(args.model_dir)) 
开发者ID:k-han,项目名称:DTC,代码行数:25,代码来源:imagenet2cifar_DTC.py

示例8: Baseline_train

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import kl_div [as 别名]
def Baseline_train(model, train_loader, eva_loader, args):
    optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    for epoch in range(args.epochs):
        loss_record = AverageMeter()
        acc_record = AverageMeter()
        model.train()
        for batch_idx, (x, _, idx) in enumerate(tqdm(train_loader)):
            x = x.to(device)
            output = model(x)
            prob = feat2prob(output, model.center)
            loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
            loss_record.update(loss.item(), x.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
        _, _, _, probs= test(model, eva_loader, args, epoch)
        if epoch%args.update_interval == 0:
            print('updating target ...')
            args.p_targets = target_distribution(probs) 
    torch.save(model.state_dict(), args.model_dir)
    print("model saved to {}.".format(args.model_dir)) 
开发者ID:k-han,项目名称:DTC,代码行数:24,代码来源:imagenet_DTC.py

示例9: warmup_train

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import kl_div [as 别名]
def warmup_train(model, train_loader, eva_loader, args):
    optimizer = SGD(model.parameters(), lr=args.warmup_lr, momentum=args.momentum, weight_decay=args.weight_decay)
    for epoch in range(args.warmup_epochs):
        loss_record = AverageMeter()
        model.train()
        for batch_idx, ((x, _), label, idx) in enumerate(tqdm(train_loader)):
            x  = x.to(device)
            feat = model(x)
            prob = feat2prob(feat, model.center)
            loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
            loss_record.update(loss.item(), x.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('Warmup_train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
        _, _, _, probs = test(model, eva_loader, args)
    args.p_targets = target_distribution(probs) 
开发者ID:k-han,项目名称:DTC,代码行数:19,代码来源:svhn_DTC.py

示例10: warmup_train

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import kl_div [as 别名]
def warmup_train(model, train_loader, eva_loader, args):
    optimizer = SGD(model.parameters(), lr=args.warmup_lr, momentum=args.momentum, weight_decay=args.weight_decay)
    for epoch in range(args.warmup_epochs):
        loss_record = AverageMeter()
        model.train()
        for batch_idx, ((x, _), label, idx) in enumerate(tqdm(train_loader)):
            x = x.to(device)
            feat = model(x)
            prob = feat2prob(feat, model.center)
            loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
            loss_record.update(loss.item(), x.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('Warmup_train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
        _, _, _, probs = test(model, eva_loader, args)
    args.p_targets = target_distribution(probs) 
开发者ID:k-han,项目名称:DTC,代码行数:19,代码来源:cifar10_DTC.py

示例11: Baseline_train

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import kl_div [as 别名]
def Baseline_train(model, train_loader, eva_loader, args):
    optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    for epoch in range(args.epochs):
        loss_record = AverageMeter()
        model.train()
        for batch_idx, ((x, _), label, idx) in enumerate(tqdm(train_loader)):
            x = x.to(device)
            feat = model(x)
            prob = feat2prob(feat, model.center)
            loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
            loss_record.update(loss.item(), x.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
        _, _, _, probs = test(model, eva_loader, args)
        if epoch % args.update_interval ==0:
            print('updating target ...')
            args.p_targets = target_distribution(probs) 
    torch.save(model.state_dict(), args.model_dir)
    print("model saved to {}.".format(args.model_dir)) 
开发者ID:k-han,项目名称:DTC,代码行数:23,代码来源:cifar10_DTC.py

示例12: warmup_train

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import kl_div [as 别名]
def warmup_train(model, train_loader, eva_loader, args):
    optimizer = SGD(model.parameters(), lr=args.warmup_lr, momentum=args.momentum, weight_decay=args.weight_decay)
    for epoch in range(args.warmup_epochs):
        loss_record = AverageMeter()
        model.train()
        for batch_idx, ((x, _), label, idx) in enumerate(tqdm(train_loader)):
            x = x.to(device)
            _, feat = model(x)
            prob = feat2prob(feat, model.center)
            loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
            loss_record.update(loss.item(), x.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('Warmup_train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
        _, _, _, probs = test(model, eva_loader, args, epoch)
    args.p_targets = target_distribution(probs) 
开发者ID:k-han,项目名称:DTC,代码行数:19,代码来源:cifar100_DTC.py

示例13: Baseline_train

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import kl_div [as 别名]
def Baseline_train(model, train_loader, eva_loader, args):
    optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.milestones, gamma=args.gamma)
    for epoch in range(args.epochs):
        loss_record = AverageMeter()
        model.train()
        exp_lr_scheduler.step()
        for batch_idx, ((x, _), label, idx) in enumerate(tqdm(train_loader)):
            x = x.to(device)
            _, feat = model(x)
            prob = feat2prob(feat, model.center)
            loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
            loss_record.update(loss.item(), x.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
        _, _, _, probs = test(model, eva_loader, args, epoch)

        if epoch % args.update_interval==0:
            print('updating target ...')
            args.p_targets = target_distribution(probs) 
    torch.save(model.state_dict(), args.model_dir)
    print("model saved to {}.".format(args.model_dir)) 
开发者ID:k-han,项目名称:DTC,代码行数:26,代码来源:cifar100_DTC.py

示例14: Baseline_train

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import kl_div [as 别名]
def Baseline_train(model, alphabetStr, train_loader, eval_loader, args):
    optimizer = Adam(model.parameters(), lr=args.lr)
    for epoch in range(args.epochs):
        loss_record = AverageMeter()
        model.train()
        for batch_idx, (x, g_x, _, idx) in enumerate(train_loader):
            _, feat = model(x.to(device))
            prob = feat2prob(feat, model.center)
            loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            loss_record.update(loss.item(), x.size(0))
        print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
        _, _, _, probs = test(model, eval_loader, args)

        if epoch % args.update_interval==0:
            args.p_targets= target_distribution(probs) 
    torch.save(model.state_dict(), args.model_dir)
    print("model saved to {}.".format(args.model_dir)) 
开发者ID:k-han,项目名称:DTC,代码行数:22,代码来源:omniglot_DTC.py

示例15: PI_train

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import kl_div [as 别名]
def PI_train(model, alphabetStr, train_loader, eval_loader, args):
    optimizer = Adam(model.parameters(), lr=args.lr)
    for epoch in range(args.epochs):
        loss_record = AverageMeter()
        model.train()
        w = args.rampup_coefficient * ramps.sigmoid_rampup(epoch, args.rampup_length) 
        for batch_idx, (x, g_x, _, idx) in enumerate(train_loader):
            _,  feat = model(x.to(device))
            _,  feat_g = model(g_x.to(device))
            prob = feat2prob(feat, model.center)
            prob_g = feat2prob(feat_g, model.center)
            loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
            mse_loss = F.mse_loss(prob, prob_g)
            loss=loss + w*mse_loss
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            loss_record.update(loss.item(), x.size(0))
        print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
        _, _, _, probs = test(model, eval_loader, args)

        if epoch % args.update_interval==0:
            args.p_targets= target_distribution(probs) 
    torch.save(model.state_dict(), args.model_dir)
    print("model saved to {}.".format(args.model_dir)) 
开发者ID:k-han,项目名称:DTC,代码行数:27,代码来源:omniglot_DTC.py


注:本文中的torch.nn.functional.kl_div方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。