当前位置: 首页>>代码示例>>Python>>正文


Python torch.utils方法代码示例

本文整理汇总了Python中torch.utils方法的典型用法代码示例。如果您正苦于以下问题:Python torch.utils方法的具体用法?Python torch.utils怎么用?Python torch.utils使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.utils方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build

# 需要导入模块: import torch [as 别名]
# 或者: from torch import utils [as 别名]
def build(self):
        """
        Build Retina Net architecture.
        """

        # Image size must be dividable by 2 multiple times.
        h, w = self.cf.patch_size[:2]
        if h / 2 ** 5 != int(h / 2 ** 5) or w / 2 ** 5 != int(w / 2 ** 5):
            raise Exception("Image size must be dividable by 2 at least 5 times "
                            "to avoid fractions when downscaling and upscaling."
                            "For example, use 256, 320, 384, 448, 512, ... etc. ")

        # instanciate abstract multi dimensional conv class and backbone model.
        conv = mutils.NDConvGenerator(self.cf.dim)
        backbone = utils.import_module('bbone', self.cf.backbone_path)

        # build Anchors, FPN, Classifier / Bbox-Regressor -head
        self.np_anchors = mutils.generate_pyramid_anchors(self.logger, self.cf)
        self.anchors = torch.from_numpy(self.np_anchors).float().cuda()
        self.Fpn = backbone.FPN(self.cf, conv, operate_stride1=self.cf.operate_stride1)
        self.Classifier = Classifier(self.cf, conv)
        self.BBRegressor = BBRegressor(self.cf, conv)
        self.final_conv = conv(self.cf.end_filts, self.cf.num_seg_classes, ks=1, pad=0, norm=None, relu=None) 
开发者ID:MIC-DKFZ,项目名称:medicaldetectiontoolkit,代码行数:25,代码来源:retina_unet.py

示例2: build

# 需要导入模块: import torch [as 别名]
# 或者: from torch import utils [as 别名]
def build(self):
        """
        Build Retina Net architecture.
        """

        # Image size must be dividable by 2 multiple times.
        h, w = self.cf.patch_size[:2]
        if h / 2 ** 5 != int(h / 2 ** 5) or w / 2 ** 5 != int(w / 2 ** 5):
            raise Exception("Image size must be dividable by 2 at least 5 times "
                            "to avoid fractions when downscaling and upscaling."
                            "For example, use 256, 320, 384, 448, 512, ... etc. ")

        # instanciate abstract multi dimensional conv class and backbone model.
        conv = mutils.NDConvGenerator(self.cf.dim)
        backbone = utils.import_module('bbone', self.cf.backbone_path)

        # build Anchors, FPN, Classifier / Bbox-Regressor -head
        self.np_anchors = mutils.generate_pyramid_anchors(self.logger, self.cf)
        self.anchors = torch.from_numpy(self.np_anchors).float().cuda()
        self.Fpn = backbone.FPN(self.cf, conv, operate_stride1=self.cf.operate_stride1)
        self.Classifier = Classifier(self.cf, conv)
        self.BBRegressor = BBRegressor(self.cf, conv) 
开发者ID:MIC-DKFZ,项目名称:medicaldetectiontoolkit,代码行数:24,代码来源:retina_net.py

示例3: build

# 需要导入模块: import torch [as 别名]
# 或者: from torch import utils [as 别名]
def build(self):
        """Build Mask R-CNN architecture."""

        # Image size must be dividable by 2 multiple times.
        h, w = self.cf.patch_size[:2]
        if h / 2**5 != int(h / 2**5) or w / 2**5 != int(w / 2**5):
            raise Exception("Image size must be dividable by 2 at least 5 times "
                            "to avoid fractions when downscaling and upscaling."
                            "For example, use 256, 320, 384, 448, 512, ... etc. ")

        # instanciate abstract multi dimensional conv class and backbone class.
        conv = mutils.NDConvGenerator(self.cf.dim)
        backbone = utils.import_module('bbone', self.cf.backbone_path)

        # build Anchors, FPN, RPN, Classifier / Bbox-Regressor -head, Mask-head
        self.np_anchors = mutils.generate_pyramid_anchors(self.logger, self.cf)
        self.anchors = torch.from_numpy(self.np_anchors).float().cuda()
        self.fpn = backbone.FPN(self.cf, conv, operate_stride1=True)
        self.rpn = RPN(self.cf, conv)
        self.classifier = Classifier(self.cf, conv)
        self.mask = Mask(self.cf, conv)
        self.final_conv = conv(self.cf.end_filts, self.cf.num_seg_classes, ks=1, pad=0, norm=self.cf.norm, relu=None) 
开发者ID:MIC-DKFZ,项目名称:medicaldetectiontoolkit,代码行数:24,代码来源:ufrcnn.py

示例4: training

# 需要导入模块: import torch [as 别名]
# 或者: from torch import utils [as 别名]
def training(self, epoch):
        train_loss = 0.0
        self.model.train()
        tbar = tqdm(self.trainloader)
        for i, (image, target) in enumerate(tbar):
            self.scheduler(self.optimizer, i, epoch, self.best_pred)
            self.optimizer.zero_grad()
            outputs = self.model(image)
            loss = self.criterion(outputs, target)
            loss.backward()
            self.optimizer.step()
            train_loss += loss.item()
            tbar.set_description('Train loss: %.3f' % (train_loss / (i + 1)))

        if self.args.no_val:
            # save checkpoint every epoch
            is_best = False
            utils.save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': self.model.module.state_dict(),
                'optimizer': self.optimizer.state_dict(),
                'best_pred': self.best_pred,
            }, self.args, is_best) 
开发者ID:zhanghang1989,项目名称:PyTorch-Encoding,代码行数:25,代码来源:train.py

示例5: child_valid

# 需要导入模块: import torch [as 别名]
# 或者: from torch import utils [as 别名]
def child_valid(valid_queue, model, arch_pool, criterion):
    valid_acc_list = []
    with torch.no_grad():
        model.eval()
        for i, arch in enumerate(arch_pool):
            # for step, (input, target) in enumerate(valid_queue):
            inputs, targets = next(iter(valid_queue))
            inputs = inputs.cuda()
            targets = targets.cuda()
                
            logits, _ = model(inputs, arch, bn_train=True)
            loss = criterion(logits, targets)
                
            prec1, prec5 = utils.accuracy(logits, targets, topk=(1, 5))
            valid_acc_list.append(prec1.data/100)
            
            if (i+1) % 100 == 0:
                logging.info('Valid arch %s\n loss %.2f top1 %f top5 %f', ' '.join(map(str, arch[0] + arch[1])), loss, prec1, prec5)
        
    return valid_acc_list 
开发者ID:renqianluo,项目名称:NAO_pytorch,代码行数:22,代码来源:train_search.py

示例6: valid

# 需要导入模块: import torch [as 别名]
# 或者: from torch import utils [as 别名]
def valid(valid_queue, model, criterion):
    objs = utils.AvgrageMeter()
    top1 = utils.AvgrageMeter()
    top5 = utils.AvgrageMeter()
    with torch.no_grad():
        model.eval()
        for step, (input, target) in enumerate(valid_queue):
            input = input.cuda()
            target = target.cuda()
        
            logits, _ = model(input)
            loss = criterion(logits, target)
        
            prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
            n = input.size(0)
            objs.update(loss.data, n)
            top1.update(prec1.data, n)
            top5.update(prec5.data, n)
        
            if (step+1) % 100 == 0:
                logging.info('valid %03d %e %f %f', step+1, objs.avg, top1.avg, top5.avg)

    return top1.avg, top5.avg, objs.avg 
开发者ID:renqianluo,项目名称:NAO_pytorch,代码行数:25,代码来源:train_imagenet.py

示例7: valid

# 需要导入模块: import torch [as 别名]
# 或者: from torch import utils [as 别名]
def valid(valid_queue, model, criterion):
    objs = utils.AvgrageMeter()
    top1 = utils.AvgrageMeter()
    top5 = utils.AvgrageMeter()
    with torch.no_grad():
        model.eval()
        for step, (input, target) in enumerate(valid_queue):
            input = input.cuda()
            target = target.cuda()
        
            logits, _ = model(input)
            loss = criterion(logits, target)
        
            prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
            n = input.size(0)
            objs.update(loss.data, n)
            top1.update(prec1.data, n)
            top5.update(prec5.data, n)
        
            if (step+1) % 100 == 0:
                logging.info('valid %03d %e %f %f', step+1, objs.avg, top1.avg, top5.avg)

    return top1.avg, objs.avg 
开发者ID:renqianluo,项目名称:NAO_pytorch,代码行数:25,代码来源:test_cifar.py

示例8: nao_valid

# 需要导入模块: import torch [as 别名]
# 或者: from torch import utils [as 别名]
def nao_valid(queue, model):
    pa = utils.AvgrageMeter()
    hs = utils.AvgrageMeter()
    with torch.no_grad():
        model.eval()
        for step, sample in enumerate(queue):
            encoder_input = sample['encoder_input']
            encoder_target = sample['encoder_target']
            decoder_target = sample['decoder_target']
            
            encoder_input = encoder_input.cuda()
            encoder_target = encoder_target.cuda()
            decoder_target = decoder_target.cuda()
            
            predict_value, logits, arch = model(encoder_input)
            n = encoder_input.size(0)
            pairwise_acc = utils.pairwise_accuracy(encoder_target.data.squeeze().tolist(),
                                                predict_value.data.squeeze().tolist())
            hamming_dis = utils.hamming_distance(decoder_target.data.squeeze().tolist(), arch.data.squeeze().tolist())
            pa.update(pairwise_acc, n)
            hs.update(hamming_dis, n)
    return pa.avg, hs.avg 
开发者ID:renqianluo,项目名称:NAO_pytorch,代码行数:24,代码来源:test_controller.py

示例9: nao_valid

# 需要导入模块: import torch [as 别名]
# 或者: from torch import utils [as 别名]
def nao_valid(queue, model):
    pa = utils.AvgrageMeter()
    hs = utils.AvgrageMeter()
    mse = utils.AvgrageMeter()
    with torch.no_grad():
        model.eval()
        for step, sample in enumerate(queue):
            encoder_input = sample['encoder_input']
            encoder_target = sample['encoder_target']
            decoder_target = sample['decoder_target']
            
            encoder_input = encoder_input.cuda()
            encoder_target = encoder_target.cuda()
            decoder_target = decoder_target.cuda()
            
            predict_value, logits, arch = model(encoder_input)
            n = encoder_input.size(0)
            pairwise_acc = utils.pairwise_accuracy(encoder_target.data.squeeze().tolist(),
                                                predict_value.data.squeeze().tolist())
            hamming_dis = utils.hamming_distance(decoder_target.data.squeeze().tolist(), arch.data.squeeze().tolist())
            mse.update(F.mse_loss(predict_value.data.squeeze(), encoder_target.data.squeeze()), n)
            pa.update(pairwise_acc, n)
            hs.update(hamming_dis, n)
    return mse.avg, pa.avg, hs.avg 
开发者ID:renqianluo,项目名称:NAO_pytorch,代码行数:26,代码来源:train_controller.py

示例10: child_valid

# 需要导入模块: import torch [as 别名]
# 或者: from torch import utils [as 别名]
def child_valid(valid_queue, model, arch_pool, criterion):
    valid_acc_list = []
    with torch.no_grad():
        model.eval()
        for i, arch in enumerate(arch_pool):
            #for step, (inputs, targets) in enumerate(valid_queue):
            inputs, targets = next(iter(valid_queue))
            inputs = inputs.cuda()
            targets = targets.cuda()
                
            logits, _ = model(inputs, arch, bn_train=True)
            loss = criterion(logits, targets)
                
            prec1, prec5 = utils.accuracy(logits, targets, topk=(1, 5))
            valid_acc_list.append(prec1.data/100)
            
            if (i+1) % 100 == 0:
                logging.info('Valid arch %s\n loss %.2f top1 %f top5 %f', ' '.join(map(str, arch[0] + arch[1])), loss, prec1, prec5)
        
    return valid_acc_list 
开发者ID:renqianluo,项目名称:NAO_pytorch,代码行数:22,代码来源:train_search.py

示例11: nao_valid

# 需要导入模块: import torch [as 别名]
# 或者: from torch import utils [as 别名]
def nao_valid(queue, model):
    inputs = []
    targets = []
    predictions = []
    archs = []
    with torch.no_grad():
        model.eval()
        for step, sample in enumerate(queue):
            encoder_input = sample['encoder_input']
            encoder_target = sample['encoder_target']
            decoder_target = sample['decoder_target']
            
            encoder_input = encoder_input.cuda()
            encoder_target = encoder_target.cuda()
            decoder_target = decoder_target.cuda()
            
            predict_value, logits, arch = model(encoder_input)
            n = encoder_input.size(0)
            inputs += encoder_input.data.squeeze().tolist()
            targets += encoder_target.data.squeeze().tolist()
            predictions += predict_value.data.squeeze().tolist()
            archs += arch.data.squeeze().tolist()
    pa = utils.pairwise_accuracy(targets, predictions)
    hd = utils.hamming_distance(inputs, archs)
    return pa, hd 
开发者ID:renqianluo,项目名称:NAO_pytorch,代码行数:27,代码来源:train_search.py

示例12: load_data

# 需要导入模块: import torch [as 别名]
# 或者: from torch import utils [as 别名]
def load_data(path='../data/', data_name='celebA', img_size=64):
    print('Loading ' + data_name + 'data...')
    train_transform, test_transform = utils.data_transforms(img_size=img_size)

    if data_name != 'svhn':
        # The image data should be contained in sub folders (e.g., ../data/celebA/train/image/aaa.png)
        train_data = torchvision.datasets.ImageFolder('{}{}/train'.format(path, data_name), transform=train_transform)
        test_data = torchvision.datasets.ImageFolder('{}{}/test'.format(path, data_name), transform=test_transform)
    else:
        train_data = torchvision.datasets.SVHN(path, split='train', transform=train_transform, download=True)
        test_data = torchvision.datasets.SVHN(path, split='test', transform=test_transform, download=True)
        # extra_data = torchvision.datasets.SVHN(path, split='extra', transform=train_transform, download=True)
        # train_data = torch.utils.data.ConcatDataset([train_data, extra_data])

    print('train_data_size: %d, test_data_size: %d' % (len(train_data), len(test_data)))
    return train_data, test_data


# Save result data 
开发者ID:shirakawas,项目名称:ASNG-NAS,代码行数:21,代码来源:main_inpainting_int.py

示例13: build

# 需要导入模块: import torch [as 别名]
# 或者: from torch import utils [as 别名]
def build(self):
        """Build Mask R-CNN architecture."""

        # Image size must be dividable by 2 multiple times.
        h, w = self.cf.patch_size[:2]
        if h / 2**5 != int(h / 2**5) or w / 2**5 != int(w / 2**5):
            raise Exception("Image size must be divisible by 2 at least 5 times "
                            "to avoid fractions when downscaling and upscaling."
                            "For example, use 256, 288, 320, 384, 448, 512, ... etc.,i.e.,"
                            "any number x*32 will do!")

        # instantiate abstract multi-dimensional conv generator and load backbone module.
        backbone = utils.import_module('bbone', self.cf.backbone_path)
        self.logger.info("loaded backbone from {}".format(self.cf.backbone_path))
        conv = backbone.ConvGenerator(self.cf.dim)

        # build Anchors, FPN, RPN, Classifier / Bbox-Regressor -head, Mask-head
        self.np_anchors = mutils.generate_pyramid_anchors(self.logger, self.cf)
        self.anchors = torch.from_numpy(self.np_anchors).float().cuda()
        self.fpn = backbone.FPN(self.cf, conv, relu_enc=self.cf.relu, operate_stride1=False).cuda()
        self.rpn = RPN(self.cf, conv)
        self.classifier = Classifier(self.cf, conv)
        self.mask = Mask(self.cf, conv) 
开发者ID:MIC-DKFZ,项目名称:RegRCNN,代码行数:25,代码来源:mrcnn.py

示例14: mnist_loaders

# 需要导入模块: import torch [as 别名]
# 或者: from torch import utils [as 别名]
def mnist_loaders(dataset, batch_size, shuffle_train = True, shuffle_test = False, normalize_input = False, num_examples = None, test_batch_size=None): 
    mnist_train = dataset("./data", train=True, download=True, transform=transforms.ToTensor())
    mnist_test = dataset("./data", train=False, download=True, transform=transforms.ToTensor())
    if num_examples:
        indices = list(range(num_examples))
        mnist_train = data.Subset(mnist_train, indices)
        mnist_test = data.Subset(mnist_test, indices)
    train_loader = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=shuffle_train, pin_memory=True, num_workers=min(multiprocessing.cpu_count(),2))
    if test_batch_size:
        batch_size = test_batch_size
    test_loader = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=shuffle_test, pin_memory=True, num_workers=min(multiprocessing.cpu_count(),2))
    std = [1.0]
    mean = [0.0]
    train_loader.std = std
    test_loader.std = std
    train_loader.mean = mean
    test_loader.mean = mean
    return train_loader, test_loader 
开发者ID:huanzhang12,项目名称:CROWN-IBP,代码行数:20,代码来源:datasets.py

示例15: infer

# 需要导入模块: import torch [as 别名]
# 或者: from torch import utils [as 别名]
def infer(valid_queue, model, criterion):
    objs = utils.AvgrageMeter()
    top1 = utils.AvgrageMeter()
    top5 = utils.AvgrageMeter()
    model.eval()

    for step, (input, target) in enumerate(valid_queue):
        input = input.cuda()
        target = target.cuda(non_blocking=True)

        logits = model(input, discrete=True)
        loss = criterion(logits, target)

        prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
        n = input.size(0)
        objs.update(loss.data.item(), n)
        top1.update(prec1.data.item(), n)
        top5.update(prec5.data.item(), n)

        if step % args.report_freq == 0:
            logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
            if args.debug:
                break

    return top1.avg, objs.avg 
开发者ID:automl,项目名称:nasbench-1shot1,代码行数:27,代码来源:train.py


注:本文中的torch.utils方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。