本文整理汇总了Python中torchvision.datasets.CIFAR100属性的典型用法代码示例。如果您正苦于以下问题:Python datasets.CIFAR100属性的具体用法?Python datasets.CIFAR100怎么用?Python datasets.CIFAR100使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类torchvision.datasets
的用法示例。
在下文中一共展示了datasets.CIFAR100属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_dataset
# 需要导入模块: from torchvision import datasets [as 别名]
# 或者: from torchvision.datasets import CIFAR100 [as 别名]
def get_dataset(self):
"""
Uses torchvision.datasets.CIFAR100 to load dataset.
Downloads dataset if doesn't exist already.
Returns:
torch.utils.data.TensorDataset: trainset, valset
"""
trainset = datasets.SVHN('datasets/SVHN/train/', split='train', transform=self.train_transforms,
target_transform=None, download=True)
valset = datasets.SVHN('datasets/SVHN/test/', split='test', transform=self.val_transforms,
target_transform=None, download=True)
extraset = datasets.SVHN('datasets/SVHN/extra', split='extra', transform=self.train_transforms,
target_transform=None, download=True)
trainset = torch.utils.data.ConcatDataset([trainset, extraset])
return trainset, valset
示例2: dataLoader
# 需要导入模块: from torchvision import datasets [as 别名]
# 或者: from torchvision.datasets import CIFAR100 [as 别名]
def dataLoader(is_train=True, cuda=True, batch_size=64, shuffle=True):
if is_train:
trans = [transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize(mean=[n/255.
for n in [129.3, 124.1, 112.4]], std=[n/255. for n in [68.2, 65.4, 70.4]])]
trans = transforms.Compose(trans)
train_set = td.CIFAR100('data', train=True, transform=trans)
size = len(train_set.train_labels)
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=batch_size, shuffle=shuffle)
else:
trans = [transforms.ToTensor(),
transforms.Normalize(mean=[n/255.
for n in [129.3, 124.1, 112.4]], std=[n/255. for n in [68.2, 65.4, 70.4]])]
trans = transforms.Compose(trans)
test_set = td.CIFAR100('data', train=False, transform=trans)
size = len(test_set.test_labels)
train_loader = torch.utils.data.DataLoader(
test_set, batch_size=batch_size, shuffle=shuffle)
return train_loader, size
示例3: test
# 需要导入模块: from torchvision import datasets [as 别名]
# 或者: from torchvision.datasets import CIFAR100 [as 别名]
def test():
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
if args.dataset == 'cifar10':
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data.cifar10', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
elif args.dataset == 'cifar100':
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./data.cifar100', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
else:
raise ValueError("No valid dataset is given.")
model.eval()
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
print('\nTest set: Accuracy: {}/{} ({:.1f}%)\n'.format(
correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset)))
return correct / float(len(test_loader.dataset))
示例4: test
# 需要导入模块: from torchvision import datasets [as 别名]
# 或者: from torchvision.datasets import CIFAR100 [as 别名]
def test(model):
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
if args.dataset == 'cifar10':
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data.cifar10', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
elif args.dataset == 'cifar100':
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./data.cifar100', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
else:
raise ValueError("No valid dataset is given.")
model.eval()
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
print('\nTest set: Accuracy: {}/{} ({:.1f}%)\n'.format(
correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset)))
return correct / float(len(test_loader.dataset))
示例5: test
# 需要导入模块: from torchvision import datasets [as 别名]
# 或者: from torchvision.datasets import CIFAR100 [as 别名]
def test(model):
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
if args.dataset == 'cifar10':
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data.cifar10', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])),
batch_size=args.test_batch_size, shuffle=False, **kwargs)
elif args.dataset == 'cifar100':
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./data.cifar100', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])),
batch_size=args.test_batch_size, shuffle=False, **kwargs)
else:
raise ValueError("No valid dataset is given.")
model.eval()
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
print('\nTest set: Accuracy: {}/{} ({:.1f}%)\n'.format(
correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset)))
return correct / float(len(test_loader.dataset))
示例6: __init__
# 需要导入模块: from torchvision import datasets [as 别名]
# 或者: from torchvision.datasets import CIFAR100 [as 别名]
def __init__(self):
super(CIFAR100MetaInfo, self).__init__()
self.label = "CIFAR100"
self.root_dir_name = "cifar100"
self.dataset_class = CIFAR100Fine
self.num_classes = 100
示例7: __init__
# 需要导入模块: from torchvision import datasets [as 别名]
# 或者: from torchvision.datasets import CIFAR100 [as 别名]
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
A few things can be done here.
- save the options (have been done in BaseDataset)
- get image paths and meta information of the dataset.
- define the image transformation.
"""
# save the option and dataset root
BaseDataset.__init__(self, opt)
# define the default transform function. You can use <base_dataset.get_transform>; You can also define your custom transform function
self.transform = get_transform(opt)
# import torchvision dataset
if opt.dataset_name == 'CIFAR10':
from torchvision.datasets import CIFAR10 as torchvisionlib
elif opt.dataset_name == 'CIFAR100':
from torchvision.datasets import CIFAR100 as torchvisionlib
else:
raise ValueError('torchvision_dataset import fault.')
self.dataload = torchvisionlib(root = opt.download_root,
transform = self.transform,
download = True)
示例8: build_cifar100
# 需要导入模块: from torchvision import datasets [as 别名]
# 或者: from torchvision.datasets import CIFAR100 [as 别名]
def build_cifar100(model_state_dict=None, optimizer_state_dict=None, **kwargs):
epoch = kwargs.pop('epoch')
ratio = kwargs.pop('ratio')
train_transform, valid_transform = utils._data_transforms_cifar10(args.cutout_size)
train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=valid_transform)
num_train = len(train_data)
assert num_train == len(valid_data)
indices = list(range(num_train))
split = int(np.floor(ratio * num_train))
np.random.shuffle(indices)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.child_batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=16)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.child_eval_batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=16)
model = NASWSNetworkCIFAR(args, 100, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob, args.child_drop_path_keep_prob,
args.child_use_aux_head, args.steps)
model = model.cuda()
train_criterion = nn.CrossEntropyLoss().cuda()
eval_criterion = nn.CrossEntropyLoss().cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
optimizer = torch.optim.SGD(
model.parameters(),
args.child_lr_max,
momentum=0.9,
weight_decay=args.child_l2_reg,
)
if model_state_dict is not None:
model.load_state_dict(model_state_dict)
if optimizer_state_dict is not None:
optimizer.load_state_dict(optimizer_state_dict)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.child_epochs, args.child_lr_min, epoch)
return train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler
示例9: build_cifar100
# 需要导入模块: from torchvision import datasets [as 别名]
# 或者: from torchvision.datasets import CIFAR100 [as 别名]
def build_cifar100(model_state_dict, optimizer_state_dict, **kwargs):
epoch = kwargs.pop('epoch')
train_transform, valid_transform = utils._data_transforms_cifar10(args.cutout_size)
train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=16)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.eval_batch_size, shuffle=False, pin_memory=True, num_workers=16)
model = NASNetworkCIFAR(args, 100, args.layers, args.nodes, args.channels, args.keep_prob, args.drop_path_keep_prob,
args.use_aux_head, args.steps, args.arch)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
logging.info("multi adds = %fM", model.multi_adds / 1000000)
if model_state_dict is not None:
model.load_state_dict(model_state_dict)
if torch.cuda.device_count() > 1:
logging.info("Use %d %s", torch.cuda.device_count(), "GPUs !")
model = nn.DataParallel(model)
model = model.cuda()
train_criterion = nn.CrossEntropyLoss().cuda()
eval_criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.lr_max,
momentum=0.9,
weight_decay=args.l2_reg,
)
if optimizer_state_dict is not None:
optimizer.load_state_dict(optimizer_state_dict)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), args.lr_min, epoch)
return train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler
示例10: build_cifar100
# 需要导入模块: from torchvision import datasets [as 别名]
# 或者: from torchvision.datasets import CIFAR100 [as 别名]
def build_cifar100(model_state_dict, optimizer_state_dict, **kwargs):
epoch = kwargs.pop('epoch')
train_transform, valid_transform = utils._data_transforms_cifar10(args.cutout_size)
train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR100(root=args.data, train=False, download=True, transform=valid_transform)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=16)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.eval_batch_size, shuffle=False, pin_memory=True, num_workers=16)
model = NASNetworkCIFAR(args, 100, args.layers, args.nodes, args.channels, args.keep_prob, args.drop_path_keep_prob,
args.use_aux_head, args.steps, args.arch)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
logging.info("multi adds = %fM", model.multi_adds / 1000000)
if model_state_dict is not None:
model.load_state_dict(model_state_dict)
if torch.cuda.device_count() > 1:
logging.info("Use %d %s", torch.cuda.device_count(), "GPUs !")
model = nn.DataParallel(model)
model = model.cuda()
train_criterion = nn.CrossEntropyLoss().cuda()
eval_criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.lr_max,
momentum=0.9,
weight_decay=args.l2_reg,
)
if optimizer_state_dict is not None:
optimizer.load_state_dict(optimizer_state_dict)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), args.lr_min, epoch)
return train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler
示例11: build_cifar100
# 需要导入模块: from torchvision import datasets [as 别名]
# 或者: from torchvision.datasets import CIFAR100 [as 别名]
def build_cifar100(model_state_dict=None, optimizer_state_dict=None, **kwargs):
epoch = kwargs.pop('epoch')
ratio = kwargs.pop('ratio')
train_transform, valid_transform = utils._data_transforms_cifar10(args.cutout_size)
train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=valid_transform)
num_train = len(train_data)
assert num_train == len(valid_data)
indices = list(range(num_train))
split = int(np.floor(ratio * num_train))
np.random.shuffle(indices)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.child_batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=16)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.child_eval_batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=16)
model = NASWSNetworkCIFAR(100, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob, args.child_drop_path_keep_prob,
args.child_use_aux_head, args.steps)
model = model.cuda()
train_criterion = nn.CrossEntropyLoss().cuda()
eval_criterion = nn.CrossEntropyLoss().cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
optimizer = torch.optim.SGD(
model.parameters(),
args.child_lr_max,
momentum=0.9,
weight_decay=args.child_l2_reg,
)
if model_state_dict is not None:
model.load_state_dict(model_state_dict)
if optimizer_state_dict is not None:
optimizer.load_state_dict(optimizer_state_dict)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.child_epochs, args.child_lr_min, epoch)
return train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler
示例12: __init__
# 需要导入模块: from torchvision import datasets [as 别名]
# 或者: from torchvision.datasets import CIFAR100 [as 别名]
def __init__(self, options):
transform_list = []
if options.image_size is not None:
transform_list.append(transforms.Resize((options.image_size, options.image_size)))
# transform_list.append(transforms.CenterCrop(options.image_size))
transform_list.append(transforms.ToTensor())
if options.image_colors == 1:
transform_list.append(transforms.Normalize(mean=[0.5], std=[0.5]))
elif options.image_colors == 3:
transform_list.append(transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]))
transform = transforms.Compose(transform_list)
if options.dataset == 'mnist':
dataset = datasets.MNIST(options.data_dir, train=True, download=True, transform=transform)
elif options.dataset == 'emnist':
# Updated URL from https://www.westernsydney.edu.au/bens/home/reproducible_research/emnist
datasets.EMNIST.url = 'https://cloudstor.aarnet.edu.au/plus/s/ZNmuFiuQTqZlu9W/download'
dataset = datasets.EMNIST(options.data_dir, split=options.image_class, train=True, download=True, transform=transform)
elif options.dataset == 'fashion-mnist':
dataset = datasets.FashionMNIST(options.data_dir, train=True, download=True, transform=transform)
elif options.dataset == 'lsun':
training_class = options.image_class + '_train'
dataset = datasets.LSUN(options.data_dir, classes=[training_class], transform=transform)
elif options.dataset == 'cifar10':
dataset = datasets.CIFAR10(options.data_dir, train=True, download=True, transform=transform)
elif options.dataset == 'cifar100':
dataset = datasets.CIFAR100(options.data_dir, train=True, download=True, transform=transform)
else:
dataset = datasets.ImageFolder(root=options.data_dir, transform=transform)
self.dataloader = DataLoader(
dataset,
batch_size=options.batch_size,
num_workers=options.loader_workers,
shuffle=True,
drop_last=True,
pin_memory=options.pin_memory
)
self.iterator = iter(self.dataloader)
示例13: get_dataset
# 需要导入模块: from torchvision import datasets [as 别名]
# 或者: from torchvision.datasets import CIFAR100 [as 别名]
def get_dataset(name, split='train', transform=None,
target_transform=None, download=True, datasets_path=__DATASETS_DEFAULT_PATH):
train = (split == 'train')
root = os.path.join(datasets_path, name)
if name == 'cifar10':
return datasets.CIFAR10(root=root,
train=train,
transform=transform,
target_transform=target_transform,
download=download)
elif name == 'cifar100':
return datasets.CIFAR100(root=root,
train=train,
transform=transform,
target_transform=target_transform,
download=download)
elif name == 'mnist':
return datasets.MNIST(root=root,
train=train,
transform=transform,
target_transform=target_transform,
download=download)
elif name == 'stl10':
return datasets.STL10(root=root,
split=split,
transform=transform,
target_transform=target_transform,
download=download)
elif name == 'imagenet':
if train:
root = os.path.join(root, 'train')
else:
root = os.path.join(root, 'val')
return datasets.ImageFolder(root=root,
transform=transform,
target_transform=target_transform)
示例14: fetch_bylabel
# 需要导入模块: from torchvision import datasets [as 别名]
# 或者: from torchvision.datasets import CIFAR100 [as 别名]
def fetch_bylabel(label):
if label == 10:
normalizer = transforms.Normalize(mean=[0.4914, 0.4824, 0.4467],
std=[0.2471, 0.2435, 0.2616])
data_cls = datasets.CIFAR10
else:
normalizer = transforms.Normalize(mean=[0.5071, 0.4867, 0.4408],
std=[0.2675, 0.2565, 0.2761])
data_cls = datasets.CIFAR100
return normalizer, data_cls
示例15: __init__
# 需要导入模块: from torchvision import datasets [as 别名]
# 或者: from torchvision.datasets import CIFAR100 [as 别名]
def __init__(self, opt):
kwargs = {
'num_workers': opt.workers,
'batch_size' : opt.batch_size,
'shuffle' : True,
'pin_memory': True}
self.train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(opt.data_dir, train=True, download=True,
transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[x/255.0 for x in [129.3, 124.1, 112.4]],
std=[x/255.0 for x in [68.2, 65.4, 70.4]])
])),
**kwargs)
self.val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(opt.data_dir, train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[x/255.0 for x in [129.3, 124.1, 112.4]],
std=[x/255.0 for x in [68.2, 65.4, 70.4]])
])),
**kwargs)