當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.MultiLabelSoftMarginLoss方法代碼示例

本文整理匯總了Python中torch.nn.MultiLabelSoftMarginLoss方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.MultiLabelSoftMarginLoss方法的具體用法?Python nn.MultiLabelSoftMarginLoss怎麽用?Python nn.MultiLabelSoftMarginLoss使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.MultiLabelSoftMarginLoss方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: train

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiLabelSoftMarginLoss [as 別名]
def train(model_name='model.pkl'):
    cnn = CNN()
    cnn.train()
    print('init net')
    criterion = nn.MultiLabelSoftMarginLoss()
    optimizer = torch.optim.Adam(
        cnn.parameters(), lr=setting.TRAIN_LEARNING_RATE)

    # Train the Model
    train_dataloader = dataset.get_train_data_loader()
    for epoch in range(setting.TRAIN_NUM_EPOCHS):
        for i, (images, labels) in enumerate(train_dataloader):
            images = Variable(images)
            labels = Variable(labels.float())
            predict_labels = cnn(images)
            loss = criterion(predict_labels, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('epoch: % -3s loss: %s' % (epoch, loss.item()))
    torch.save(cnn.state_dict(), setting.MODEL_PATH /
               model_name)  # current is model.pkl
    print('save last model') 
開發者ID:tonglei100,項目名稱:look,代碼行數:25,代碼來源:captcha.py

示例2: train

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiLabelSoftMarginLoss [as 別名]
def train(data, model, optimizer, epoch, args):
    model.train()
    loss_funct = nn.MultiLabelSoftMarginLoss()

    avg_loss = 0.0
    n_batches = 0
    progress_bar = tqdm(data)
    for batch_idx, sample_batched in enumerate(progress_bar):
        img, target = load_tensor_data(sample_batched, args.cuda, volatile=False)

        # forward and backward pass
        optimizer.zero_grad()
        output = model(img)
        loss = loss_funct(output, target)
        loss.backward()

        optimizer.step()

        # Show progress
        progress_bar.set_postfix(dict(loss=loss.data[0]))
        avg_loss += loss.data[0]
        n_batches += 1

        if batch_idx % args.log_interval == 0:
            avg_loss /= n_batches
            processed = batch_idx * args.batch_size
            n_samples = len(data) * args.batch_size
            progress = float(processed) / n_samples
            print('Train Epoch: {} [{}/{} ({:.0%})] Train loss: {}'.format(
                epoch, processed, n_samples, progress, avg_loss))
            avg_loss = 0.0
            n_batches = 0 
開發者ID:mesnico,項目名稱:RelationNetworks-CLEVR,代碼行數:34,代碼來源:cnn_train.py

示例3: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiLabelSoftMarginLoss [as 別名]
def __init__(self):
    logger.info('Set Data Loader')
    self.dataset = AnimeFaceDataset(avatar_tag_dat_path=avatar_tag_dat_path,
                                    transform=transforms.Compose([ToTensor()]))
    self.data_loader = torch.utils.data.DataLoader(self.dataset,
                                                   batch_size=batch_size,
                                                   shuffle=True,
                                                   num_workers=num_workers, drop_last=True)
    checkpoint, checkpoint_name = self.load_checkpoint(model_dump_path)
    if checkpoint == None:
      logger.info('Don\'t have pre-trained model. Ignore loading model process.')
      logger.info('Set Generator and Discriminator')
      self.G = Generator().to(device)
      self.D = Discriminator().to(device)
      logger.info('Initialize Weights')
      self.G.apply(initital_network_weights).to(device)
      self.D.apply(initital_network_weights).to(device)
      logger.info('Set Optimizers')
      self.optimizer_G = torch.optim.Adam(self.G.parameters(), lr=learning_rate, betas=(beta_1, 0.999))
      self.optimizer_D = torch.optim.Adam(self.D.parameters(), lr=learning_rate, betas=(beta_1, 0.999))
      self.epoch = 0
    else:
      logger.info('Load Generator and Discriminator')
      self.G = Generator().to(device)
      self.D = Discriminator().to(device)
      logger.info('Load Pre-Trained Weights From Checkpoint'.format(checkpoint_name))
      self.G.load_state_dict(checkpoint['G'])
      self.D.load_state_dict(checkpoint['D'])
      logger.info('Load Optimizers')
      self.optimizer_G = torch.optim.Adam(self.G.parameters(), lr=learning_rate, betas=(beta_1, 0.999))
      self.optimizer_D = torch.optim.Adam(self.D.parameters(), lr=learning_rate, betas=(beta_1, 0.999))
      self.optimizer_G.load_state_dict(checkpoint['optimizer_G'])
      self.optimizer_D.load_state_dict(checkpoint['optimizer_D'])
      self.epoch = checkpoint['epoch']
    logger.info('Set Criterion')
    self.label_criterion = nn.BCEWithLogitsLoss().to(device)
    self.tag_criterion = nn.MultiLabelSoftMarginLoss().to(device) 
開發者ID:VincentXWD,項目名稱:create-girls-moe-pytorch,代碼行數:39,代碼來源:gan.py

示例4: main_voc2007

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiLabelSoftMarginLoss [as 別名]
def main_voc2007():
    global args, best_prec1, use_gpu 
    args = parser.parse_args()

    use_gpu = torch.cuda.is_available()

    # define dataset
    train_dataset = Voc2007Classification(args.data, 'trainval')
    val_dataset = Voc2007Classification(args.data, 'test')
    num_classes = 20

    # load model
    model = vgg16_sp(num_classes, pretrained=True)
    
    print(model)

    criterion = nn.MultiLabelSoftMarginLoss()

    state = {'batch_size': args.batch_size, 'max_epochs': args.epochs, 
            'image_size': args.image_size, 'evaluate': args.evaluate, 'resume': args.resume,
             'lr':args.lr, 'momentum':args.momentum, 'weight_decay':args.weight_decay}
    state['difficult_examples'] = True
    state['save_model_path'] = 'logs/voc2007/'

    engine = MultiLabelMAPEngine(state)
    engine.multi_learning(model, criterion, train_dataset, val_dataset) 
開發者ID:yeezhu,項目名稱:SPN.pytorch,代碼行數:28,代碼來源:demo_voc2007.py

示例5: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiLabelSoftMarginLoss [as 別名]
def __init__(self, vocab_size, emb_mat):
        kernel_size = 3
        filters = 128
        super(Cnn, self).__init__()
        self.loss_fn = nn.MultiLabelSoftMarginLoss()
        self.encoder = nn.Embedding(vocab_size, conf.emb_size)
        self.title_conv = nn.Sequential(
            nn.Conv1d(conf.emb_size, filters, kernel_size=kernel_size),
            nn.BatchNorm1d(filters),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=(conf.title_seq_len - 2 * kernel_size + 2))
        )
        self.content_conv = nn.Sequential(
            nn.Conv1d(conf.emb_size, filters, kernel_size=3),
            nn.BatchNorm1d(filters),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=(conf.content_seq_len - 2 * kernel_size + 2))
        )
        self.fc = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear((filters + filters), 1024),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
            nn.Linear(1024, conf.n_classes),
        )

        self.encoder.weight.data.copy_(emb_mat) 
開發者ID:moxiu2012,項目名稱:PJ_NLP,代碼行數:29,代碼來源:model.py

示例6: _init_data

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiLabelSoftMarginLoss [as 別名]
def _init_data(self, manager, cfg):
        self.data_train = manager.create_dataset('train', cfg['batchsz'])
        self.data_valid = manager.create_dataset('val', cfg['batchsz'])
        self.data_test = manager.create_dataset('test', cfg['batchsz'])
        self.save_dir = cfg['save_dir']
        self.print_per_batch = cfg['print_per_batch']
        self.save_per_epoch = cfg['save_per_epoch']
        self.multi_entropy_loss = nn.MultiLabelSoftMarginLoss() 
開發者ID:thu-coai,項目名稱:tatk,代碼行數:10,代碼來源:train.py

示例7: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiLabelSoftMarginLoss [as 別名]
def __init__(self, manager, cfg):
        self.data_train = manager.create_dataset('train', cfg['batchsz'])
        self.data_valid = manager.create_dataset('val', cfg['batchsz'])
        self.data_test = manager.create_dataset('test', cfg['batchsz'])
        self.save_dir = cfg['save_dir']
        self.print_per_batch = cfg['print_per_batch']
        self.save_per_epoch = cfg['save_per_epoch']

        voc_file = os.path.join(root_dir, 'data/crosswoz/sys_da_voc.json')
        voc_opp_file = os.path.join(root_dir, 'data/crosswoz/usr_da_voc.json')
        vector = CrossWozVector(voc_file, voc_opp_file)
        self.policy = MultiDiscretePolicy(vector.state_dim, cfg['h_dim'], vector.sys_da_dim).to(device=DEVICE)
        self.policy.eval()
        self.policy_optim = torch.optim.Adam(self.policy.parameters(), lr=cfg['lr'])
        self.multi_entropy_loss = nn.MultiLabelSoftMarginLoss() 
開發者ID:thu-coai,項目名稱:tatk,代碼行數:17,代碼來源:train.py

示例8: main_voc2007

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiLabelSoftMarginLoss [as 別名]
def main_voc2007():
    global args, best_prec1, use_gpu
    args = parser.parse_args()

    use_gpu = torch.cuda.is_available()

    # define dataset
    train_dataset = Voc2007Classification(args.data, 'trainval')
    val_dataset = Voc2007Classification(args.data, 'test')
    num_classes = 20

    # load model
    model = resnet101_wildcat(num_classes, pretrained=True, kmax=args.k, alpha=args.alpha, num_maps=args.maps)
    print('classifier', model.classifier)
    print('spatial pooling', model.spatial_pooling)

    # define loss function (criterion)
    criterion = nn.MultiLabelSoftMarginLoss()

    # define optimizer
    optimizer = torch.optim.SGD(model.get_config_optim(args.lr, args.lrp),
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    state = {'batch_size': args.batch_size, 'image_size': args.image_size, 'max_epochs': args.epochs,
             'evaluate': args.evaluate, 'resume': args.resume}
    state['difficult_examples'] = True
    state['save_model_path'] = '../expes/models/voc2007/'

    engine = MultiLabelMAPEngine(state)
    engine.learning(model, criterion, train_dataset, val_dataset, optimizer) 
開發者ID:durandtibo,項目名稱:wildcat.pytorch,代碼行數:34,代碼來源:demo_voc2007.py

示例9: main

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiLabelSoftMarginLoss [as 別名]
def main():
    cnn = CNN()
    cnn.train()
    print('init net')
    criterion = nn.MultiLabelSoftMarginLoss()
    optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)

    # Train the Model
    train_dataloader = my_dataset.get_train_data_loader()
    for epoch in range(num_epochs):
        for i, (images, labels) in enumerate(train_dataloader):
            images = Variable(images)
            labels = Variable(labels.float())
            predict_labels = cnn(images)
            # print(predict_labels.type)
            # print(labels.type)
            loss = criterion(predict_labels, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if (i+1) % 10 == 0:
                print("epoch:", epoch, "step:", i, "loss:", loss.item())
            if (i+1) % 100 == 0:
                torch.save(cnn.state_dict(), "./model.pkl")   #current is model.pkl
                print("save model")
        print("epoch:", epoch, "step:", i, "loss:", loss.item())
    torch.save(cnn.state_dict(), "./model.pkl")   #current is model.pkl
    print("save last model") 
開發者ID:dee1024,項目名稱:pytorch-captcha-recognition,代碼行數:30,代碼來源:captcha_train.py

示例10: main

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiLabelSoftMarginLoss [as 別名]
def main(args):
    global C, H, W
    coco_labels = json.load(open(args.coco_labels))
    num_classes = coco_labels['num_classes']
    if args.model == 'inception_v3':
        C, H, W = 3, 299, 299
        model = pretrainedmodels.inceptionv3(pretrained='imagenet')

    elif args.model == 'resnet152':
        C, H, W = 3, 224, 224
        model = pretrainedmodels.resnet152(pretrained='imagenet')

    elif args.model == 'inception_v4':
        C, H, W = 3, 299, 299
        model = pretrainedmodels.inceptionv4(
            num_classes=1000, pretrained='imagenet')

    else:
        print("doesn't support %s" % (args['model']))

    load_image_fn = utils.LoadTransformImage(model)
    dim_feats = model.last_linear.in_features
    model = MILModel(model, dim_feats, num_classes)
    model = model.cuda()
    dataset = CocoDataset(coco_labels)
    dataloader = DataLoader(
        dataset, batch_size=args.batch_size, shuffle=True)
    optimizer = optim.Adam(
        model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
    exp_lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.learning_rate_decay_every,
                                                 gamma=args.learning_rate_decay_rate)

    crit = nn.MultiLabelSoftMarginLoss()
    if not os.path.isdir(args.checkpoint_path):
        os.mkdir(args.checkpoint_path)
    train(dataloader, model, crit, optimizer,
          exp_lr_scheduler, load_image_fn, args) 
開發者ID:Sundrops,項目名稱:video-caption.pytorch,代碼行數:39,代碼來源:finetune_cnn.py

示例11: main

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiLabelSoftMarginLoss [as 別名]
def main(args):
    cnn = CNN().to(device)
    
    cnn.train()
    criterion = nn.MultiLabelSoftMarginLoss()
    optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)

    if args.resume:
        cnn.load_state_dict(torch.load(args.model_path, map_location=device))

    max_acc = 0
    # Train the Model
    train_dataloader = datasets.get_train_data_loader()
    for epoch in range(num_epochs):
        for i, (images, labels) in enumerate(train_dataloader):
            images = Variable(images)
            labels = Variable(labels.float())
            predict_labels = cnn(images)
            loss = criterion(predict_labels, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if (i+1) % 2 == 0:
                print("epoch: %03g \t step: %03g \t loss: %.5f \t\r" % (epoch, i+1, loss.item()))
                torch.save(cnn.state_dict(), "./weights/cnn_%03g.pt" % epoch)
        print("epoch: %03g \t step: %03g \t loss: %.5f \t" % (epoch, i, loss.item()))
        torch.save(cnn.state_dict(), "./weights/cnn_%03g.pt" % epoch)
        acc = test.test_data("./weights/cnn_%03g.pt" % epoch)
        if max_acc < acc:
            print("update accuracy %.5f." % acc)
            max_acc = acc
            shutil.copy("./weights/cnn_%03g.pt" % epoch, "./weights/cnn_best.pt")
        else:
            print("do not update %.5f." % acc)
        
    torch.save(cnn.state_dict(), "./weights/cnn_last.pt")
    print("save last model") 
開發者ID:pprp,項目名稱:captcha_identify.pytorch,代碼行數:39,代碼來源:train.py


注:本文中的torch.nn.MultiLabelSoftMarginLoss方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。