本文整理汇总了Python中torchvision.transforms.Compose方法的典型用法代码示例。如果您正苦于以下问题:Python transforms.Compose方法的具体用法?Python transforms.Compose怎么用?Python transforms.Compose使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torchvision.transforms
的用法示例。
在下文中一共展示了transforms.Compose方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_data
# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Compose [as 别名]
def load_data(root_path, dir, batch_size, phase):
transform_dict = {
'src': transforms.Compose(
[transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
'tar': transforms.Compose(
[transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])}
data = datasets.ImageFolder(root=root_path + dir, transform=transform_dict[phase])
data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4)
return data_loader
示例2: transform_for_train
# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Compose [as 别名]
def transform_for_train(fixed_scale = 512, rotate_prob = 15):
"""
Options:
1.RandomCrop
2.CenterCrop
3.RandomHorizontalFlip
4.Normalize
5.ToTensor
6.FixedResize
7.RandomRotate
"""
transform_list = []
#transform_list.append(FixedResize(size = (fixed_scale, fixed_scale)))
transform_list.append(RandomSized(fixed_scale))
transform_list.append(RandomRotate(rotate_prob))
transform_list.append(RandomHorizontalFlip())
#transform_list.append(Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
transform_list.append(Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)))
transform_list.append(ToTensor())
return transforms.Compose(transform_list)
示例3: load_imageclef_train
# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Compose [as 别名]
def load_imageclef_train(root_path, domain, batch_size, phase):
transform_dict = {
'src': transforms.Compose(
[transforms.Resize((256, 256)),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
'tar': transforms.Compose(
[transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])}
data = ImageCLEF(root_dir=root_path, domain=domain, transform=transform_dict[phase])
train_size = int(0.8 * len(data))
test_size = len(data) - train_size
data_train, data_val = torch.utils.data.random_split(data, [train_size, test_size])
train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, drop_last=False,
num_workers=4)
val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=True, drop_last=False,
num_workers=4)
return train_loader, val_loader
示例4: get_data_loader
# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Compose [as 别名]
def get_data_loader(opt):
if opt.dset_name == 'moving_mnist':
transform = transforms.Compose([vtransforms.ToTensor()])
dset = MovingMNIST(opt.dset_path, opt.is_train, opt.n_frames_input,
opt.n_frames_output, opt.num_objects, transform)
elif opt.dset_name == 'bouncing_balls':
transform = transforms.Compose([vtransforms.Scale(opt.image_size),
vtransforms.ToTensor()])
dset = BouncingBalls(opt.dset_path, opt.is_train, opt.n_frames_input,
opt.n_frames_output, opt.image_size[0], transform)
else:
raise NotImplementedError
dloader = data.DataLoader(dset, batch_size=opt.batch_size, shuffle=opt.is_train,
num_workers=opt.n_workers, pin_memory=True)
return dloader
示例5: __init__
# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Compose [as 别名]
def __init__(self, config):
self.config = config
if config.data_mode == "imgs":
transform = v_transforms.Compose(
[v_transforms.ToTensor(),
v_transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
dataset = v_datasets.ImageFolder(self.config.data_folder, transform=transform)
self.dataset_len = len(dataset)
self.num_iterations = (self.dataset_len + config.batch_size - 1) // config.batch_size
self.loader = DataLoader(dataset,
batch_size=config.batch_size,
shuffle=True,
num_workers=config.data_loader_workers,
pin_memory=config.pin_memory)
elif config.data_mode == "numpy":
raise NotImplementedError("This mode is not implemented YET")
else:
raise Exception("Please specify in the json a specified mode in data_mode")
示例6: __init__
# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Compose [as 别名]
def __init__(self, args, train=True):
self.root_dir = args.data
if train:
self.data_set_list = train_set_list
elif args.use_test_for_val:
self.data_set_list = test_set_list
else:
self.data_set_list = val_set_list
self.data_set_list = ['%06d.png' % (x) for x in self.data_set_list]
self.args = args
self.read_features = args.read_features
self.features_dir = args.features_dir
self.transform = transforms.Compose([
transforms.Scale((args.image_size, args.image_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
self.transform_segmentation = transforms.Compose([
transforms.Scale((args.segmentation_size, args.segmentation_size)),
transforms.ToTensor(),
])
示例7: load_data
# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Compose [as 别名]
def load_data(data_folder, batch_size, phase='train', train_val_split=True, train_ratio=.8):
transform_dict = {
'train': transforms.Compose(
[transforms.Resize(256),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
'test': transforms.Compose(
[transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])}
data = datasets.ImageFolder(root=data_folder, transform=transform_dict[phase])
if phase == 'train':
if train_val_split:
train_size = int(train_ratio * len(data))
test_size = len(data) - train_size
data_train, data_val = torch.utils.data.random_split(data, [train_size, test_size])
train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, drop_last=True,
num_workers=4)
val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=False, drop_last=False,
num_workers=4)
return [train_loader, val_loader]
else:
train_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True,
num_workers=4)
return train_loader
else:
test_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=False, drop_last=False,
num_workers=4)
return test_loader
## Below are for ImageCLEF datasets
示例8: load_imageclef_test
# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Compose [as 别名]
def load_imageclef_test(root_path, domain, batch_size, phase):
transform_dict = {
'src': transforms.Compose(
[transforms.Resize((256,256)),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
'tar': transforms.Compose(
[transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])}
data = ImageCLEF(root_dir=root_path, domain=domain, transform=transform_dict[phase])
data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4)
return data_loader
示例9: load_training
# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Compose [as 别名]
def load_training(root_path, dir, batch_size, kwargs):
transform = transforms.Compose(
[transforms.Resize([256, 256]),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()])
data = datasets.ImageFolder(root=root_path + dir, transform=transform)
train_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True, **kwargs)
return train_loader
示例10: load_data
# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Compose [as 别名]
def load_data(data_folder, batch_size, train, kwargs):
transform = {
'train': transforms.Compose(
[transforms.Resize([256, 256]),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])]),
'test': transforms.Compose(
[transforms.Resize([224, 224]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
}
data = datasets.ImageFolder(root = data_folder, transform=transform['train' if train else 'test'])
data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, **kwargs, drop_last = True if train else False)
return data_loader
示例11: load_train
# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Compose [as 别名]
def load_train(root_path, dir, batch_size, phase):
transform_dict = {
'src': transforms.Compose(
[transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
'tar': transforms.Compose(
[transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])}
data = datasets.ImageFolder(root=root_path + dir, transform=transform_dict[phase])
train_size = int(0.8 * len(data))
test_size = len(data) - train_size
data_train, data_val = torch.utils.data.random_split(data, [train_size, test_size])
train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4)
val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4)
return train_loader, val_loader
示例12: _get_ds_val
# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Compose [as 别名]
def _get_ds_val(self, images_spec, crop=False, truncate=False):
img_to_tensor_t = [images_loader.IndexImagesDataset.to_tensor_uint8_transform()]
if crop:
img_to_tensor_t.insert(0, transforms.CenterCrop(crop))
img_to_tensor_t = transforms.Compose(img_to_tensor_t)
fixed_first = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fixedimg.jpg')
if not os.path.isfile(fixed_first):
print(f'INFO: No file found at {fixed_first}')
fixed_first = None
ds = images_loader.IndexImagesDataset(
images=images_loader.ImagesCached(
images_spec, self.config_dl.image_cache_pkl,
min_size=self.config_dl.val_glob_min_size),
to_tensor_transform=img_to_tensor_t,
fixed_first=fixed_first) # fix a first image to have consistency in tensor board
if truncate:
ds = pe.TruncatedDataset(ds, num_elemens=truncate)
return ds
示例13: __init__
# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Compose [as 别名]
def __init__(self, train_mode, loader_params, dataset_params, augmentation_params):
super().__init__(train_mode, loader_params, dataset_params, augmentation_params)
self.image_transform = transforms.Compose([transforms.Grayscale(num_output_channels=3),
transforms.ToTensor(),
transforms.Normalize(mean=self.dataset_params.MEAN,
std=self.dataset_params.STD),
])
self.mask_transform = transforms.Compose([transforms.Lambda(to_array),
transforms.Lambda(to_tensor),
])
self.image_augment_train = ImgAug(self.augmentation_params['image_augment_train'])
self.image_augment_with_target_train = ImgAug(self.augmentation_params['image_augment_with_target_train'])
self.image_augment_inference = ImgAug(self.augmentation_params['image_augment_inference'])
self.image_augment_with_target_inference = ImgAug(
self.augmentation_params['image_augment_with_target_inference'])
if self.dataset_params.target_format == 'png':
self.dataset = ImageSegmentationPngDataset
elif self.dataset_params.target_format == 'json':
self.dataset = ImageSegmentationJsonDataset
else:
raise Exception('files must be png or json')
示例14: main
# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Compose [as 别名]
def main():
best_acc = 0
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('==> Preparing data..')
transforms_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
dataset_train = CIFAR10(root='../data', train=True, download=True,
transform=transforms_train)
train_loader = DataLoader(dataset_train, batch_size=args.batch_size,
shuffle=True, num_workers=args.num_worker)
# there are 10 classes so the dataset name is cifar-10
classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
print('==> Making model..')
net = pyramidnet()
net = nn.DataParallel(net)
net = net.to(device)
num_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
print('The number of parameters of model is', num_params)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=args.lr)
# optimizer = optim.SGD(net.parameters(), lr=args.lr,
# momentum=0.9, weight_decay=1e-4)
train(net, criterion, optimizer, train_loader, device)
示例15: __init__
# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Compose [as 别名]
def __init__(self):
self.batch_size = 64
self.test_batch_size = 100
self.learning_rate = 0.01
self.sgd_momentum = 0.9
self.log_interval = 100
# Fetch MNIST data set.
self.train_loader = torch.utils.data.DataLoader(
datasets.MNIST('/tmp/mnist/data', train=True, download=True, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=self.batch_size,
shuffle=True)
self.test_loader = torch.utils.data.DataLoader(
datasets.MNIST('/tmp/mnist/data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=self.test_batch_size,
shuffle=True)
self.network = Net()
# Train the network for several epochs, validating after each epoch.