当前位置: 首页>>代码示例>>Python>>正文


Python transforms.Pad方法代码示例

本文整理汇总了Python中torchvision.transforms.Pad方法的典型用法代码示例。如果您正苦于以下问题:Python transforms.Pad方法的具体用法?Python transforms.Pad怎么用?Python transforms.Pad使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torchvision.transforms的用法示例。


在下文中一共展示了transforms.Pad方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: detect_image

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Pad [as 别名]
def detect_image(img, model, img_size=416, conf_threshold=0.8, nms_threshold=0.4):
    # resize and pad image
    ratio = min(img_size/img.size[0], img_size/img.size[1])
    imw = round(img.size[0] * ratio)
    imh = round(img.size[1] * ratio)
    img_transforms = transforms.Compose([
        transforms.Resize((imh, imw)),
        transforms.Pad((
            max(int((imh-imw)/2),0),
            max(int((imw-imh)/2),0)), fill=(128,128,128)),
        transforms.ToTensor(),
    ])

    # convert image to Tensor
    Tensor = torch.cuda.FloatTensor
    tensor = img_transforms(img).float()
    tensor = tensor.unsqueeze_(0)
    input_image = Variable(tensor.type(Tensor))

    # run inference on the model and get detections
    with torch.no_grad():
        detections = model(input_image)
        detections = non_max_suppression(detections, 80, conf_threshold, nms_threshold)
    return detections[0] 
开发者ID:afunTW,项目名称:pytorch-mot-tracking,代码行数:26,代码来源:inference_detection.py

示例2: initialize_dataset

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Pad [as 别名]
def initialize_dataset(clevr_dir, dictionaries, state_description=True):
    if not state_description:
        train_transforms = transforms.Compose([transforms.Resize((128, 128)),
                                           transforms.Pad(8),
                                           transforms.RandomCrop((128, 128)),
                                           transforms.RandomRotation(2.8),  # .05 rad
                                           transforms.ToTensor()])
        test_transforms = transforms.Compose([transforms.Resize((128, 128)),
                                          transforms.ToTensor()])
                                          
        clevr_dataset_train = ClevrDataset(clevr_dir, True, dictionaries, train_transforms)
        clevr_dataset_test = ClevrDataset(clevr_dir, False, dictionaries, test_transforms)
        
    else:
        clevr_dataset_train = ClevrDatasetStateDescription(clevr_dir, True, dictionaries)
        clevr_dataset_test = ClevrDatasetStateDescription(clevr_dir, False, dictionaries)
    
    return clevr_dataset_train, clevr_dataset_test 
开发者ID:mesnico,项目名称:RelationNetworks-CLEVR,代码行数:20,代码来源:train.py

示例3: get_trm

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Pad [as 别名]
def get_trm(cfg, is_train=True):
    normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
    if is_train:
        transform = T.Compose([
            T.Resize(cfg.INPUT.SIZE_TRAIN),
            T.RandomHorizontalFlip(p=cfg.INPUT.PROB),
            T.Pad(cfg.INPUT.PADDING),
            T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
            T.ToTensor(),
            normalize_transform,
            RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
        ])
    else:
        transform = T.Compose([
            T.Resize(cfg.INPUT.SIZE_TEST),
            T.ToTensor(),
            normalize_transform
        ])
    return transform 
开发者ID:DTennant,项目名称:reid_baseline_with_syncbn,代码行数:21,代码来源:__init__.py

示例4: check_dataset

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Pad [as 别名]
def check_dataset(opt):
    normalize_transform = transforms.Compose([transforms.ToTensor(),
                                              transforms.Normalize((0.485, 0.456, 0.406),
                                                                   (0.229, 0.224, 0.225))])
    train_large_transform = transforms.Compose([transforms.RandomResizedCrop(224),
                                                transforms.RandomHorizontalFlip()])
    val_large_transform = transforms.Compose([transforms.Resize(256),
                                              transforms.CenterCrop(224)])
    train_small_transform = transforms.Compose([transforms.Pad(4),
                                                transforms.RandomCrop(32),
                                                transforms.RandomHorizontalFlip()])

    splits = check_split(opt)

    if opt.dataset in ['cub200', 'indoor', 'stanford40', 'dog']:
        train, val = 'train', 'test'
        train_transform = transforms.Compose([train_large_transform, normalize_transform])
        val_transform = transforms.Compose([val_large_transform, normalize_transform])
        sets = [dset.ImageFolder(root=os.path.join(opt.dataroot, train), transform=train_transform),
                dset.ImageFolder(root=os.path.join(opt.dataroot, train), transform=val_transform),
                dset.ImageFolder(root=os.path.join(opt.dataroot, val), transform=val_transform)]
        sets = [FolderSubset(dataset, *split) for dataset, split in zip(sets, splits)]

        opt.num_classes = len(splits[0][0])

    else:
        raise Exception('Unknown dataset')

    loaders = [torch.utils.data.DataLoader(dataset,
                                           batch_size=opt.batchSize,
                                           shuffle=True,
                                           num_workers=0) for dataset in sets]
    return loaders 
开发者ID:alinlab,项目名称:L2T-ww,代码行数:35,代码来源:check_dataset.py

示例5: __call__

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Pad [as 别名]
def __call__(self, rgb_img, label_img):
        w, h = rgb_img.size
        pad_along_w = max(0, int((1 + self.crop_size[0] - w) / 2))
        pad_along_h = max(0, int((1 + self.crop_size[1] - h) / 2))
        # padd the images
        rgb_img = Pad(padding=(pad_along_w, pad_along_h), fill=0, padding_mode='constant')(rgb_img)
        label_img = Pad(padding=(pad_along_w, pad_along_h), fill=self.ignore_idx, padding_mode='constant')(label_img)

        i, j, h, w = self.get_params(rgb_img, self.crop_size)
        rgb_img = F.crop(rgb_img, i, j, h, w)
        label_img = F.crop(label_img, i, j, h, w)
        return rgb_img, label_img 
开发者ID:clovaai,项目名称:ext_portrait_segmentation,代码行数:14,代码来源:PILTransform.py

示例6: cifar10

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Pad [as 别名]
def cifar10(n_labels, data_root='./data-local/cifar10/'):
    channel_stats = dict(mean = [0.4914, 0.4822, 0.4465],
                         std = [0.2023, 0.1994, 0.2010])
    train_transform = transforms.Compose([
        transforms.Pad(2, padding_mode='reflect'),
        transforms.ColorJitter(brightness=0.4, contrast=0.4,
                               saturation=0.4, hue=0.1),
        transforms.RandomCrop(32),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(**channel_stats)
    ])
    eval_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(**channel_stats)
    ])
    trainset = tv.datasets.CIFAR10(data_root, train=True, download=True,
                                   transform=train_transform)
    evalset = tv.datasets.CIFAR10(data_root, train=False, download=True,
                                   transform=eval_transform)
    num_classes = 10
    label_per_class = n_labels // num_classes
    labeled_idxs, unlabed_idxs = split_relabel_data(
                                    np.array(trainset.train_labels),
                                    trainset.train_labels,
                                    label_per_class,
                                    num_classes)
    return {
        'trainset': trainset,
        'evalset': evalset,
        'label_idxs': labeled_idxs,
        'unlab_idxs': unlabed_idxs,
        'num_classes': num_classes
    } 
开发者ID:iBelieveCJM,项目名称:Tricks-of-Semi-supervisedDeepLeanring-Pytorch,代码行数:36,代码来源:datasets.py

示例7: wscifar10

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Pad [as 别名]
def wscifar10(n_labels, data_root='./data-local/cifar10/'):
    channel_stats = dict(mean = [0.4914, 0.4822, 0.4465],
                         std = [0.2023, 0.1994, 0.2010])
    weak = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.Pad(2, padding_mode='reflect'),
        transforms.RandomCrop(32),
        transforms.ToTensor(),
        transforms.Normalize(**channel_stats)
    ])
    strong = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.Pad(2, padding_mode='reflect'),
        transforms.RandomCrop(32),
        RandAugmentMC(n=2, m=10),
        transforms.ToTensor(),
        transforms.Normalize(**channel_stats)
    ])
    train_transform = wstwice(weak, strong)
    eval_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(**channel_stats)
    ])
    trainset = tv.datasets.CIFAR10(data_root, train=True, download=True,
                                   transform=train_transform)
    evalset = tv.datasets.CIFAR10(data_root, train=False, download=True,
                                   transform=eval_transform)
    num_classes = 10
    label_per_class = n_labels // num_classes
    labeled_idxs, unlabed_idxs = split_relabel_data(
                                    np.array(trainset.train_labels),
                                    trainset.train_labels,
                                    label_per_class,
                                    num_classes)
    return {
        'trainset': trainset,
        'evalset': evalset,
        'label_idxs': labeled_idxs,
        'unlab_idxs': unlabed_idxs,
        'num_classes': num_classes
    } 
开发者ID:iBelieveCJM,项目名称:Tricks-of-Semi-supervisedDeepLeanring-Pytorch,代码行数:43,代码来源:datasets.py

示例8: cifar100

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Pad [as 别名]
def cifar100(n_labels, data_root='./data-local/cifar100/'):
    channel_stats = dict(mean = [0.5071, 0.4867, 0.4408],
                         std = [0.2675, 0.2565, 0.2761])
    train_transform = transforms.Compose([
        transforms.Pad(2, padding_mode='reflect'),
        transforms.ColorJitter(brightness=0.4, contrast=0.4,
                               saturation=0.4, hue=0.1),
        transforms.RandomCrop(32),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(**channel_stats)
    ])
    eval_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(**channel_stats)
    ])
    trainset = tv.datasets.CIFAR100(data_root, train=True, download=True,
                                   transform=train_transform)
    evalset = tv.datasets.CIFAR100(data_root, train=False, download=True,
                                   transform=eval_transform)
    num_classes = 100
    label_per_class = n_labels // num_classes
    labeled_idxs, unlabed_idxs = split_relabel_data(
                                    np.array(trainset.train_labels),
                                    trainset.train_labels,
                                    label_per_class,
                                    num_classes)
    return {
        'trainset': trainset,
        'evalset': evalset,
        'labeled_idxs': labeled_idxs,
        'unlabeled_idxs': unlabed_idxs,
        'num_classes': num_classes
    } 
开发者ID:iBelieveCJM,项目名称:Tricks-of-Semi-supervisedDeepLeanring-Pytorch,代码行数:36,代码来源:datasets.py

示例9: __index__

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Pad [as 别名]
def __index__(self, index):
        data, gt_density, gt_count = self.blob_list[index]
        fname = self.dataloader.query_fname(index)
        W, H = data.size
        fixed_size = self.fixed_size
        transform_img = []

        if fixed_size != -1 and not (H % fixed_size == 0 and W % fixed_size == 0):
            pad_h = ((H / fixed_size + 1) * fixed_size - H) % fixed_size
            pad_w = ((W / fixed_size + 1) * fixed_size - W) % fixed_size
            image_pads = (pad_w / 2, pad_h / 2, pad_w - pad_w / 2, pad_h - pad_h / 2)

            transform_img.append(transforms.Pad(image_pads, fill=0))
            H = H + pad_h
            W = W + pad_w
            mask = torch.zeros((H, W),dtype=torch.uint8).byte()
            mask[pad_h / 2:H - (pad_h - pad_h / 2), pad_w / 2:W - (pad_w - pad_w / 2)] = 1
        elif H % fixed_size == 0 and W % fixed_size == 0:
            mask = torch.ones((H, W),dtype=torch.uint8).byte()
        else:
            mask = None 

        normalizor = transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])
        if fixed_size != -1:
            crop_indexs = [(x * fixed_size, y * fixed_size) for x, y in itertools.product(range(W / fixed_size), range(H / fixed_size))]
            transform_img.append(transforms.Lambda(lambda img: multi_crop(img, crop_indexs, fixed_size, fixed_size)))
            transform_img.append(transforms.Lambda(lambda crops: [transforms.ToTensor()(crop) for crop in crops]))
            transform_img.append(transforms.Lambda(lambda crops: torch.stack([normalizor(crop) for crop in crops])))
        else:
            transform_img.append(transforms.ToTensor())
            transform_img.append(normalizor)

        if self.dataloader.test:
            return index, fname, transforms.Compose(transform_img)(data.copy()), mask, gt_count
        else:
            return index, fname, transforms.Compose(transform_img)(data.copy()), mask, gt_density, gt_count 
开发者ID:Legion56,项目名称:Counting-ICCV-DSSINet,代码行数:38,代码来源:sampler.py

示例10: pad_random_crop

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Pad [as 别名]
def pad_random_crop(input_size, scale_size=None, normalize=__imagenet_stats, fill=0):
    padding = int((scale_size - input_size) / 2)
    return transforms.Compose([
        transforms.Pad(padding, fill=fill),
        transforms.RandomCrop(input_size),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(**normalize),
    ]) 
开发者ID:eladhoffer,项目名称:bigBatch,代码行数:11,代码来源:preprocess.py

示例11: cifar_transform

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Pad [as 别名]
def cifar_transform(is_training=True):
  if is_training:
    transform_list = [transforms.RandomHorizontalFlip(),
                      transforms.Pad(padding=4, padding_mode='reflect'),
                      transforms.RandomCrop(32, padding=0),
                      transforms.ToTensor(),
                      transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]
  else:
    transform_list = [transforms.ToTensor(),
                      transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]

  transform_list = transforms.Compose(transform_list)
  return transform_list 
开发者ID:zzzxxxttt,项目名称:pytorch_DoReFaNet,代码行数:15,代码来源:preprocessing.py

示例12: build_transforms

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Pad [as 别名]
def build_transforms():
    tfms = TF.Compose([
        TF.Resize(32),
        TF.ToTensor(),
        TF.Normalize([0.5] * 3, [0.5] * 3, True),
    ])
    train_tfms = TF.Compose([
        TF.Pad(4),
        TF.RandomCrop(32),
        TF.ColorJitter(0.5, 0.5, 0.4, 0.05),
        TF.RandomHorizontalFlip(),
        TF.ToTensor(),
        TF.Normalize([0.5] * 3, [0.5] * 3, True),
    ])
    return tfms, train_tfms 
开发者ID:Vermeille,项目名称:Torchelie,代码行数:17,代码来源:mnist.py

示例13: mnist_dataloader

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Pad [as 别名]
def mnist_dataloader():
    train_dataset = dsets.MNIST(
        root="./mnist",
        train=True,
        transform=transforms.Compose(
            [
                transforms.Pad((2, 2)),
                transforms.ToTensor(),
                transforms.Normalize(mean=(0.5,), std=(0.5,)),
            ]
        ),
        download=True,
    )
    train_loader = data.DataLoader(train_dataset, batch_size=128, shuffle=True)
    return train_loader 
开发者ID:torchgan,项目名称:torchgan,代码行数:17,代码来源:test_trainer.py

示例14: get_dataset

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Pad [as 别名]
def get_dataset(self, dataset_idx, task_num, num_samples_per_class=False, normalize=True):
        dataset_name = list(mean_datasets.keys())[dataset_idx]
        nspc = num_samples_per_class
        if normalize:
            transformation = transforms.Compose([transforms.ToTensor(),
                                                 transforms.Normalize(mean_datasets[dataset_name],std_datasets[dataset_name])])
            mnist_transformation = transforms.Compose([
                transforms.Pad(padding=2, fill=0),
                transforms.ToTensor(),
                transforms.Normalize(mean_datasets[dataset_name], std_datasets[dataset_name])])
        else:
            transformation = transforms.Compose([transforms.ToTensor()])
            mnist_transformation = transforms.Compose([
                transforms.Pad(padding=2, fill=0),
                transforms.ToTensor(),
                ])

        # target_transormation = transforms.Compose([transforms.ToTensor()])
        target_transormation = None

        if dataset_idx == 0:
            trainset = CIFAR10_(root=self.root, task_num=task_num, num_samples_per_class=nspc, train=True, download=self.download, target_transform = target_transormation, transform=transformation)
            testset = CIFAR10_(root=self.root,  task_num=task_num, num_samples_per_class=nspc, train=False, download=self.download, target_transform = target_transormation, transform=transformation)

        if dataset_idx == 1:
            trainset = notMNIST_(root=self.root, task_num=task_num, num_samples_per_class=nspc, train=True, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
            testset = notMNIST_(root=self.root,  task_num=task_num, num_samples_per_class=nspc, train=False, download=self.download, target_transform = target_transormation, transform=mnist_transformation)

        if dataset_idx == 2:
            trainset = MNIST_RGB(root=self.root, train=True, num_samples_per_class=nspc, task_num=task_num, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
            testset = MNIST_RGB(root=self.root,  train=False, num_samples_per_class=nspc, task_num=task_num, download=self.download, target_transform = target_transormation, transform=mnist_transformation)

        if dataset_idx == 3:
            trainset = SVHN_(root=self.root,  train=True, num_samples_per_class=nspc, task_num=task_num, download=self.download, target_transform = target_transormation, transform=transformation)
            testset = SVHN_(root=self.root,  train=False, num_samples_per_class=nspc, task_num=task_num, download=self.download, target_transform = target_transormation, transform=transformation)

        if dataset_idx == 4:
            trainset = FashionMNIST_(root=self.root, num_samples_per_class=nspc, task_num=task_num, train=True, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
            testset = FashionMNIST_(root=self.root,  num_samples_per_class=nspc, task_num=task_num, train=False, download=self.download, target_transform = target_transormation, transform=mnist_transformation)

        return trainset, testset 
开发者ID:facebookresearch,项目名称:Adversarial-Continual-Learning,代码行数:43,代码来源:mulitidatasets.py

示例15: get_test_loader

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Pad [as 别名]
def get_test_loader(batch_size=16, index=0, dev_mode=False, pad_mode='edge'):
    test_meta = get_test_meta()
    if dev_mode:
        test_meta = test_meta.iloc[:10]
    test_set = ImageDataset(False, test_meta,
                            image_augment=None if pad_mode == 'resize' else transforms.Pad((13,13,14,14), padding_mode=pad_mode),
                            image_transform=get_tta_transforms(index, pad_mode))
    test_loader = data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=4, collate_fn=test_set.collate_fn, drop_last=False)
    test_loader.num = len(test_set)
    test_loader.meta = test_set.meta

    return test_loader 
开发者ID:microsoft,项目名称:nni,代码行数:14,代码来源:loader.py


注:本文中的torchvision.transforms.Pad方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。