當前位置: 首頁>>代碼示例>>Python>>正文


Python transforms.ToTensor方法代碼示例

本文整理匯總了Python中torchvision.transforms.transforms.ToTensor方法的典型用法代碼示例。如果您正苦於以下問題:Python transforms.ToTensor方法的具體用法?Python transforms.ToTensor怎麽用?Python transforms.ToTensor使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torchvision.transforms.transforms的用法示例。


在下文中一共展示了transforms.ToTensor方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: preprocess

# 需要導入模塊: from torchvision.transforms import transforms [as 別名]
# 或者: from torchvision.transforms.transforms import ToTensor [as 別名]
def preprocess(image: PIL.Image.Image, image_min_side: float, image_max_side: float) -> Tuple[Tensor, float]:
        # resize according to the rules:
        #   1. scale shorter side to IMAGE_MIN_SIDE
        #   2. after scaling, if longer side > IMAGE_MAX_SIDE, scale longer side to IMAGE_MAX_SIDE
        scale_for_shorter_side = image_min_side / min(image.width, image.height)
        longer_side_after_scaling = max(image.width, image.height) * scale_for_shorter_side
        scale_for_longer_side = (image_max_side / longer_side_after_scaling) if longer_side_after_scaling > image_max_side else 1
        scale = scale_for_shorter_side * scale_for_longer_side

        transform = transforms.Compose([
            transforms.Resize((round(image.height * scale), round(image.width * scale))),  # interpolation `BILINEAR` is applied by default
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
        image = transform(image)

        return image, scale 
開發者ID:potterhsu,項目名稱:easy-faster-rcnn.pytorch,代碼行數:19,代碼來源:base.py

示例2: preprocess

# 需要導入模塊: from torchvision.transforms import transforms [as 別名]
# 或者: from torchvision.transforms.transforms import ToTensor [as 別名]
def preprocess(self,image: PIL.Image.Image, image_min_side: float, image_max_side: float) -> Tuple[Tensor, float]:
        # resize according to the rules:
        #   1. scale shorter side to IMAGE_MIN_SIDE
        #   2. after scaling, if longer side > IMAGE_MAX_SIDE, scale longer side to IMAGE_MAX_SIDE
        scale_for_shorter_side = image_min_side / min(image.width, image.height)
        longer_side_after_scaling = max(image.width, image.height) * scale_for_shorter_side
        scale_for_longer_side = (image_max_side / longer_side_after_scaling) if longer_side_after_scaling > image_max_side else 1
        scale = scale_for_shorter_side * scale_for_longer_side

        transform = transforms.Compose([
            transforms.Resize((round(image.height * scale), round(image.width * scale))),  # interpolation `BILINEAR` is applied by default
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
        image = transform(image)

        return image, scale 
開發者ID:MagicChuyi,項目名稱:SlowFast-Network-pytorch,代碼行數:19,代碼來源:AVA.py

示例3: pil_to_tensor

# 需要導入模塊: from torchvision.transforms import transforms [as 別名]
# 或者: from torchvision.transforms.transforms import ToTensor [as 別名]
def pil_to_tensor(img, shape=(64, 64, 3), transform=None):
    """
    Convert PIL image to float tensor

    :param img: PIL image
    :type img: Image.Image
    :param shape: image shape in (H, W, C)
    :type shape: tuple or list
    :param transform: image transform
    :return: tensor
    :rtype: torch.Tensor
    """
    if transform is None:
        transform = transforms.Compose((
            transforms.Resize(shape[0]),
            transforms.ToTensor()
        ))
    return transform(img) 
開發者ID:corenel,項目名稱:pytorch-glow,代碼行數:20,代碼來源:util.py

示例4: get_datasets

# 需要導入模塊: from torchvision.transforms import transforms [as 別名]
# 或者: from torchvision.transforms.transforms import ToTensor [as 別名]
def get_datasets(initial_pool):
    transform = transforms.Compose(
        [transforms.Resize((224, 224)),
         transforms.RandomHorizontalFlip(),
         transforms.RandomRotation(30),
         transforms.ToTensor(),
         transforms.Normalize(3 * [0.5], 3 * [0.5]), ])
    test_transform = transforms.Compose(
        [
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize(3 * [0.5], 3 * [0.5]),
        ]
    )
    # Note: We use the test set here as an example. You should make your own validation set.
    train_ds = datasets.CIFAR10('.', train=True,
                                transform=transform, target_transform=None, download=True)
    test_set = datasets.CIFAR10('.', train=False,
                                transform=test_transform, target_transform=None, download=True)

    active_set = ActiveLearningDataset(train_ds, pool_specifics={'transform': test_transform})

    # We start labeling randomly.
    active_set.label_randomly(initial_pool)
    return active_set, test_set 
開發者ID:ElementAI,項目名稱:baal,代碼行數:27,代碼來源:vgg_mcdropout_cifar10.py

示例5: prepare_data

# 需要導入模塊: from torchvision.transforms import transforms [as 別名]
# 或者: from torchvision.transforms.transforms import ToTensor [as 別名]
def prepare_data(images, color_mode='BGR', new_shape=416, color=(127.5, 127.5, 127.5), mode='square'):
    images_ok = np.zeros((images.shape[0], new_shape, new_shape, 3), dtype=images[0].dtype)
    images_tensor = torch.zeros((images.shape[0], 3, new_shape, new_shape), dtype=torch.float32)
    for i in range(len(images)):
        if color_mode == 'BGR':
            images[i] = cv2.cvtColor(images[i], cv2.COLOR_BGR2RGB)
        elif color_mode == 'RGB':
            pass
        else:
            raise NotImplementedError
        images_ok[i], _, _, _ = letterbox(images[i], new_shape, color, mode)

        images_tensor[i] = transforms.Compose([
            transforms.ToPILImage(),
            transforms.ToTensor(),
        ])(images_ok[i])

    return images_tensor 
開發者ID:stefanopini,項目名稱:simple-HRNet,代碼行數:20,代碼來源:YOLOv3.py

示例6: __getitem__

# 需要導入模塊: from torchvision.transforms import transforms [as 別名]
# 或者: from torchvision.transforms.transforms import ToTensor [as 別名]
def __getitem__(self, index):
        im, xpatch, ypatch, rotation, flip, enhance = np.unravel_index(index, self.shape)

        with Image.open(self.names[im]) as img:
            extractor = PatchExtractor(img=img, patch_size=PATCH_SIZE, stride=self.stride)
            patch = extractor.extract_patch((xpatch, ypatch))

            if rotation != 0:
                patch = patch.rotate(rotation * 90)

            if flip != 0:
                patch = patch.transpose(Image.FLIP_LEFT_RIGHT)

            if enhance != 0:
                factors = np.random.uniform(.5, 1.5, 3)
                patch = ImageEnhance.Color(patch).enhance(factors[0])
                patch = ImageEnhance.Contrast(patch).enhance(factors[1])
                patch = ImageEnhance.Brightness(patch).enhance(factors[2])

            label = self.labels[self.names[im]]
            return transforms.ToTensor()(patch), label 
開發者ID:ImagingLab,項目名稱:ICIAR2018,代碼行數:23,代碼來源:datasets.py

示例7: load_images

# 需要導入模塊: from torchvision.transforms import transforms [as 別名]
# 或者: from torchvision.transforms.transforms import ToTensor [as 別名]
def load_images(img_path):
    # imread from img_path
    img = cv2.imread(img_path)
    img = cv2.resize(img, (224, 224))

    # pytorch must normalize the pic by 
    # mean = [0.485, 0.456, 0.406]
    # std = [0.229, 0.224, 0.225]
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
        ])
    
    img = transform(img)
    img.unsqueeze_(0)
    #img_s = img.numpy()
    #img_s = np.transpose(img_s, (1, 2, 0))
    #cv2.imshow("test img", img_s)
    #cv2.waitKey()
    return img 
開發者ID:huybery,項目名稱:VisualizingCNN,代碼行數:22,代碼來源:main.py

示例8: get_transforms

# 需要導入模塊: from torchvision.transforms import transforms [as 別名]
# 或者: from torchvision.transforms.transforms import ToTensor [as 別名]
def get_transforms(eval=False, aug=None):
    trans = []

    if aug["randcrop"] and not eval:
        trans.append(transforms.RandomCrop(aug["randcrop"]))

    if aug["randcrop"] and eval:
        trans.append(transforms.CenterCrop(aug["randcrop"]))

    if aug["flip"] and not eval:
        trans.append(transforms.RandomHorizontalFlip())

    if aug["grayscale"]:
        trans.append(transforms.Grayscale())
        trans.append(transforms.ToTensor())
        trans.append(transforms.Normalize(mean=aug["bw_mean"], std=aug["bw_std"]))
    elif aug["mean"]:
        trans.append(transforms.ToTensor())
        trans.append(transforms.Normalize(mean=aug["mean"], std=aug["std"]))
    else:
        trans.append(transforms.ToTensor())

    trans = transforms.Compose(trans)
    return trans 
開發者ID:loeweX,項目名稱:Greedy_InfoMax,代碼行數:26,代碼來源:get_dataloader.py

示例9: transforms

# 需要導入模塊: from torchvision.transforms import transforms [as 別名]
# 或者: from torchvision.transforms.transforms import ToTensor [as 別名]
def transforms(self) -> Compose:
        return Compose([transforms.ToTensor()]) 
開發者ID:georgia-tech-db,項目名稱:eva,代碼行數:4,代碼來源:pytorch_abstract_udf.py

示例10: __init__

# 需要導入模塊: from torchvision.transforms import transforms [as 別名]
# 或者: from torchvision.transforms.transforms import ToTensor [as 別名]
def __init__(self, base_path, txt_file, im_size=96, frames=5):
        super(Video_Provider, self).__init__()
        self.base_path = base_path
        self.txt_file = open(txt_file, 'r').readlines()
        self.im_size = im_size
        self.trans = transforms.ToTensor()
        self.frames = frames 
開發者ID:z-bingo,項目名稱:FastDVDNet,代碼行數:9,代碼來源:data_provider.py

示例11: classification_task

# 需要導入模塊: from torchvision.transforms import transforms [as 別名]
# 或者: from torchvision.transforms.transforms import ToTensor [as 別名]
def classification_task(tmpdir):
    model = nn.Sequential(nn.Conv2d(3, 32, 3),
                          nn.ReLU(),
                          nn.Conv2d(32, 64, 3),
                          nn.MaxPool2d(2),
                          nn.AdaptiveAvgPool2d((7, 7)),
                          Flatten(),
                          nn.Linear(7 * 7 * 64, 128),
                          Dropout(),
                          nn.Linear(128, 10)
                          )
    model = ModelWrapper(model, nn.CrossEntropyLoss())
    test = datasets.CIFAR10(tmpdir, train=False, download=True, transform=transforms.ToTensor())
    return model, test 
開發者ID:ElementAI,項目名稱:baal,代碼行數:16,代碼來源:heuristics_gpu_test.py

示例12: segmentation_task

# 需要導入模塊: from torchvision.transforms import transforms [as 別名]
# 或者: from torchvision.transforms.transforms import ToTensor [as 別名]
def segmentation_task(tmpdir):
    model = nn.Sequential(nn.Conv2d(3, 32, 3),
                          nn.ReLU(),
                          nn.Conv2d(32, 64, 3),
                          nn.MaxPool2d(2),
                          nn.Conv2d(64, 64, 3),
                          Dropout2d(),
                          nn.ConvTranspose2d(64, 10, 3, 1)
                          )
    model = ModelWrapper(model, nn.CrossEntropyLoss())
    test = datasets.CIFAR10(tmpdir, train=False, download=True, transform=transforms.ToTensor())
    return model, test 
開發者ID:ElementAI,項目名稱:baal,代碼行數:14,代碼來源:heuristics_gpu_test.py

示例13: cifar100_loader

# 需要導入模塊: from torchvision.transforms import transforms [as 別名]
# 或者: from torchvision.transforms.transforms import ToTensor [as 別名]
def cifar100_loader(size=None,root="./cifar100",train=True,batch_size=32,mean=0.5,std=0.5,transform="default",download=True,target_transform=None,**loader_args):
    """

    :param size:
    :param root:
    :param train:
    :param batch_size:
    :param mean:
    :param std:
    :param transform:
    :param download:
    :param target_transform:
    :param loader_args:
    :return:
    """
    if size is not None:
        if not isinstance(size,tuple):
            size = (size,size)

    if transform == "default":
        t = []
        if size is not None:
            t.append(transformations.Resize(size))

        t.append(transformations.ToTensor())
        if mean is not None and std is not None:
            if not isinstance(mean, tuple):
                mean = (mean,)
            if not isinstance(std, tuple):
                std = (std,)
            t.append(transformations.Normalize(mean=mean, std=std))

        trans = transformations.Compose(t)
    else:
        trans = transform

    data = MNIST(root,train=train,transform=trans,download=download,target_transform=target_transform)

    return DataLoader(data,batch_size=batch_size,shuffle=train,**loader_args) 
開發者ID:johnolafenwa,項目名稱:TorchFusion,代碼行數:41,代碼來源:datasets.py

示例14: fashionmnist_loader

# 需要導入模塊: from torchvision.transforms import transforms [as 別名]
# 或者: from torchvision.transforms.transforms import ToTensor [as 別名]
def fashionmnist_loader(size=None,root="./fashionmnist",train=True,batch_size=32,mean=0.5,std=0.5,transform="default",download=True,target_transform=None,**loader_args):
    """

    :param size:
    :param root:
    :param train:
    :param batch_size:
    :param mean:
    :param std:
    :param transform:
    :param download:
    :param target_transform:
    :param loader_args:
    :return:
    """

    if size is not None:
        if not isinstance(size,tuple):
            size = (size,size)

    if transform == "default":
        t = []
        if size is not None:
            t.append(transformations.Resize(size))

        t.append(transformations.ToTensor())
        if mean is not None and std is not None:
            if not isinstance(mean, tuple):
                mean = (mean,)
            if not isinstance(std, tuple):
                std = (std,)
            t.append(transformations.Normalize(mean=mean, std=std))

        trans = transformations.Compose(t)
    else:
        trans = transform

    data = FashionMNIST(root,train=train,transform=trans,download=download,target_transform=target_transform)

    return DataLoader(data,batch_size=batch_size,shuffle=train,**loader_args) 
開發者ID:johnolafenwa,項目名稱:TorchFusion,代碼行數:42,代碼來源:datasets.py

示例15: pathimages_loader

# 需要導入模塊: from torchvision.transforms import transforms [as 別名]
# 或者: from torchvision.transforms.transforms import ToTensor [as 別名]
def pathimages_loader(image_paths,size=None,recursive=True,allowed_exts=['jpg', 'jpeg', 'png', 'ppm', 'bmp', 'pgm', 'tif'],shuffle=False,batch_size=32,mean=0.5,std=0.5,transform="default",**loader_args):
    """

    :param image_paths:
    :param size:
    :param recursive:
    :param allowed_exts:
    :param shuffle:
    :param batch_size:
    :param mean:
    :param std:
    :param transform:
    :param loader_args:
    :return:
    """
    if size is not None:
        if not isinstance(size,tuple):
            size = (size,size)

    if transform == "default":
        t = []
        if size is not None:
            t.append(transformations.Resize(size))

        t.append(transformations.ToTensor())

        if mean is not None and std is not None:
            if not isinstance(mean, tuple):
                mean = (mean,)
            if not isinstance(std, tuple):
                std = (std,)
            t.append(transformations.Normalize(mean=mean, std=std))

        trans = transformations.Compose(t)
    else:
        trans = transform

    data = ImagesFromPaths(image_paths,trans,recursive=recursive,allowed_exts=allowed_exts)

    return DataLoader(data,batch_size=batch_size,shuffle=shuffle,**loader_args) 
開發者ID:johnolafenwa,項目名稱:TorchFusion,代碼行數:42,代碼來源:datasets.py


注:本文中的torchvision.transforms.transforms.ToTensor方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。