本文整理汇总了Python中torch.utils._data_transforms_cifar10方法的典型用法代码示例。如果您正苦于以下问题:Python utils._data_transforms_cifar10方法的具体用法?Python utils._data_transforms_cifar10怎么用?Python utils._data_transforms_cifar10使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.utils
的用法示例。
在下文中一共展示了utils._data_transforms_cifar10方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_cifar10
# 需要导入模块: from torch import utils [as 别名]
# 或者: from torch.utils import _data_transforms_cifar10 [as 别名]
def build_cifar10(model_state_dict=None, optimizer_state_dict=None, **kwargs):
epoch = kwargs.pop('epoch')
ratio = kwargs.pop('ratio')
train_transform, valid_transform = utils._data_transforms_cifar10(args.child_cutout_size)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=valid_transform)
num_train = len(train_data)
assert num_train == len(valid_data)
indices = list(range(num_train))
split = int(np.floor(ratio * num_train))
np.random.shuffle(indices)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.child_batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=16)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.child_eval_batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=16)
model = NASWSNetworkCIFAR(args, 10, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob, args.child_drop_path_keep_prob,
args.child_use_aux_head, args.steps)
model = model.cuda()
train_criterion = nn.CrossEntropyLoss().cuda()
eval_criterion = nn.CrossEntropyLoss().cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
optimizer = torch.optim.SGD(
model.parameters(),
args.child_lr_max,
momentum=0.9,
weight_decay=args.child_l2_reg,
)
if model_state_dict is not None:
model.load_state_dict(model_state_dict)
if optimizer_state_dict is not None:
optimizer.load_state_dict(optimizer_state_dict)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.child_epochs, args.child_lr_min, epoch)
return train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler
示例2: build_cifar100
# 需要导入模块: from torch import utils [as 别名]
# 或者: from torch.utils import _data_transforms_cifar10 [as 别名]
def build_cifar100(model_state_dict=None, optimizer_state_dict=None, **kwargs):
epoch = kwargs.pop('epoch')
ratio = kwargs.pop('ratio')
train_transform, valid_transform = utils._data_transforms_cifar10(args.cutout_size)
train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=valid_transform)
num_train = len(train_data)
assert num_train == len(valid_data)
indices = list(range(num_train))
split = int(np.floor(ratio * num_train))
np.random.shuffle(indices)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.child_batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=16)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.child_eval_batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=16)
model = NASWSNetworkCIFAR(args, 100, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob, args.child_drop_path_keep_prob,
args.child_use_aux_head, args.steps)
model = model.cuda()
train_criterion = nn.CrossEntropyLoss().cuda()
eval_criterion = nn.CrossEntropyLoss().cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
optimizer = torch.optim.SGD(
model.parameters(),
args.child_lr_max,
momentum=0.9,
weight_decay=args.child_l2_reg,
)
if model_state_dict is not None:
model.load_state_dict(model_state_dict)
if optimizer_state_dict is not None:
optimizer.load_state_dict(optimizer_state_dict)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.child_epochs, args.child_lr_min, epoch)
return train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler
示例3: build_cifar10
# 需要导入模块: from torch import utils [as 别名]
# 或者: from torch.utils import _data_transforms_cifar10 [as 别名]
def build_cifar10(model_state_dict, optimizer_state_dict, **kwargs):
epoch = kwargs.pop('epoch')
train_transform, valid_transform = utils._data_transforms_cifar10(args.cutout_size)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=16)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.eval_batch_size, shuffle=False, pin_memory=True, num_workers=16)
model = NASNetworkCIFAR(args, 10, args.layers, args.nodes, args.channels, args.keep_prob, args.drop_path_keep_prob,
args.use_aux_head, args.steps, args.arch)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
logging.info("multi adds = %fM", model.multi_adds / 1000000)
if model_state_dict is not None:
model.load_state_dict(model_state_dict)
if torch.cuda.device_count() > 1:
logging.info("Use %d %s", torch.cuda.device_count(), "GPUs !")
model = nn.DataParallel(model)
model = model.cuda()
train_criterion = nn.CrossEntropyLoss().cuda()
eval_criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.lr_max,
momentum=0.9,
weight_decay=args.l2_reg,
)
if optimizer_state_dict is not None:
optimizer.load_state_dict(optimizer_state_dict)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), args.lr_min, epoch)
return train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler
示例4: build_cifar100
# 需要导入模块: from torch import utils [as 别名]
# 或者: from torch.utils import _data_transforms_cifar10 [as 别名]
def build_cifar100(model_state_dict, optimizer_state_dict, **kwargs):
epoch = kwargs.pop('epoch')
train_transform, valid_transform = utils._data_transforms_cifar10(args.cutout_size)
train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=16)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.eval_batch_size, shuffle=False, pin_memory=True, num_workers=16)
model = NASNetworkCIFAR(args, 100, args.layers, args.nodes, args.channels, args.keep_prob, args.drop_path_keep_prob,
args.use_aux_head, args.steps, args.arch)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
logging.info("multi adds = %fM", model.multi_adds / 1000000)
if model_state_dict is not None:
model.load_state_dict(model_state_dict)
if torch.cuda.device_count() > 1:
logging.info("Use %d %s", torch.cuda.device_count(), "GPUs !")
model = nn.DataParallel(model)
model = model.cuda()
train_criterion = nn.CrossEntropyLoss().cuda()
eval_criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.lr_max,
momentum=0.9,
weight_decay=args.l2_reg,
)
if optimizer_state_dict is not None:
optimizer.load_state_dict(optimizer_state_dict)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), args.lr_min, epoch)
return train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler
示例5: build_cifar10
# 需要导入模块: from torch import utils [as 别名]
# 或者: from torch.utils import _data_transforms_cifar10 [as 别名]
def build_cifar10(model_state_dict, optimizer_state_dict, **kwargs):
epoch = kwargs.pop('epoch')
train_transform, valid_transform = utils._data_transforms_cifar10(args.cutout_size, args.autoaugment)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=16)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.eval_batch_size, shuffle=False, pin_memory=True, num_workers=16)
model = NASNetworkCIFAR(args, 10, args.layers, args.nodes, args.channels, args.keep_prob, args.drop_path_keep_prob,
args.use_aux_head, args.steps, args.arch)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
if model_state_dict is not None:
model.load_state_dict(model_state_dict)
if torch.cuda.device_count() > 1:
logging.info("Use %d %s", torch.cuda.device_count(), "GPUs !")
model = nn.DataParallel(model)
model = model.cuda()
train_criterion = nn.CrossEntropyLoss().cuda()
eval_criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.lr_max,
momentum=0.9,
weight_decay=args.l2_reg,
)
if optimizer_state_dict is not None:
optimizer.load_state_dict(optimizer_state_dict)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), args.lr_min, epoch)
return train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler
示例6: build_cifar10
# 需要导入模块: from torch import utils [as 别名]
# 或者: from torch.utils import _data_transforms_cifar10 [as 别名]
def build_cifar10(model_state_dict=None, optimizer_state_dict=None, **kwargs):
epoch = kwargs.pop('epoch')
ratio = kwargs.pop('ratio')
train_transform, valid_transform = utils._data_transforms_cifar10(args.child_cutout_size)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=valid_transform)
num_train = len(train_data)
assert num_train == len(valid_data)
indices = list(range(num_train))
split = int(np.floor(ratio * num_train))
np.random.shuffle(indices)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.child_batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=16)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.child_eval_batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=16)
model = NASWSNetworkCIFAR(10, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob, args.child_drop_path_keep_prob,
args.child_use_aux_head, args.steps)
model = model.cuda()
train_criterion = nn.CrossEntropyLoss().cuda()
eval_criterion = nn.CrossEntropyLoss().cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
optimizer = torch.optim.SGD(
model.parameters(),
args.child_lr_max,
momentum=0.9,
weight_decay=args.child_l2_reg,
)
if model_state_dict is not None:
model.load_state_dict(model_state_dict)
if optimizer_state_dict is not None:
optimizer.load_state_dict(optimizer_state_dict)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.child_epochs, args.child_lr_min, epoch)
return train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler
示例7: build_cifar100
# 需要导入模块: from torch import utils [as 别名]
# 或者: from torch.utils import _data_transforms_cifar10 [as 别名]
def build_cifar100(model_state_dict=None, optimizer_state_dict=None, **kwargs):
epoch = kwargs.pop('epoch')
ratio = kwargs.pop('ratio')
train_transform, valid_transform = utils._data_transforms_cifar10(args.cutout_size)
train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=valid_transform)
num_train = len(train_data)
assert num_train == len(valid_data)
indices = list(range(num_train))
split = int(np.floor(ratio * num_train))
np.random.shuffle(indices)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.child_batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=16)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.child_eval_batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=16)
model = NASWSNetworkCIFAR(100, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob, args.child_drop_path_keep_prob,
args.child_use_aux_head, args.steps)
model = model.cuda()
train_criterion = nn.CrossEntropyLoss().cuda()
eval_criterion = nn.CrossEntropyLoss().cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
optimizer = torch.optim.SGD(
model.parameters(),
args.child_lr_max,
momentum=0.9,
weight_decay=args.child_l2_reg,
)
if model_state_dict is not None:
model.load_state_dict(model_state_dict)
if optimizer_state_dict is not None:
optimizer.load_state_dict(optimizer_state_dict)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.child_epochs, args.child_lr_min, epoch)
return train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler
示例8: build_cifar10
# 需要导入模块: from torch import utils [as 别名]
# 或者: from torch.utils import _data_transforms_cifar10 [as 别名]
def build_cifar10(model_state_dict, optimizer_state_dict, **kwargs):
epoch = kwargs.pop('epoch')
train_transform, valid_transform = utils._data_transforms_cifar10(args.cutout_size)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=16)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.eval_batch_size, shuffle=False, pin_memory=True, num_workers=16)
model = NASNetworkCIFAR(args, 10, args.layers, args.nodes, args.channels, args.keep_prob, args.drop_path_keep_prob,
args.use_aux_head, args.steps, args.arch)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
if model_state_dict is not None:
model.load_state_dict(model_state_dict)
if torch.cuda.device_count() > 1:
logging.info("Use %d %s", torch.cuda.device_count(), "GPUs !")
model = nn.DataParallel(model)
model = model.cuda()
train_criterion = nn.CrossEntropyLoss().cuda()
eval_criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.lr_max,
momentum=0.9,
weight_decay=args.l2_reg,
)
if optimizer_state_dict is not None:
optimizer.load_state_dict(optimizer_state_dict)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), args.lr_min, epoch)
return train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler
示例9: main
# 需要导入模块: from torch import utils [as 别名]
# 或者: from torch.utils import _data_transforms_cifar10 [as 别名]
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
model = model.cuda()
utils.load(model, args.model_path)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
_, test_transform = utils._data_transforms_cifar10(args)
test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform)
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
model.drop_path_prob = args.drop_path_prob
test_acc, test_obj = infer(test_queue, model, criterion)
logging.info('test_acc %f', test_acc)
示例10: get_train_val_loaders
# 需要导入模块: from torch import utils [as 别名]
# 或者: from torch.utils import _data_transforms_cifar10 [as 别名]
def get_train_val_loaders(self):
if self.args.dataset == 'cifar10':
train_transform, valid_transform = utils._data_transforms_cifar10(self.args)
train_data = dset.CIFAR10(
root=self.args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(
root=self.args.data, train=False, download=True, transform=valid_transform)
elif self.args.dataset == 'cifar100':
train_transform, valid_transform = utils._data_transforms_cifar100(self.args)
train_data = dset.CIFAR100(
root=self.args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR100(
root=self.args.data, train=False, download=True, transform=valid_transform)
elif self.args.dataset == 'svhn':
train_transform, valid_transform = utils._data_transforms_svhn(self.args)
train_data = dset.SVHN(
root=self.args.data, split='train', download=True, transform=train_transform)
valid_data = dset.SVHN(
root=self.args.data, split='test', download=True, transform=valid_transform)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=self.args.batch_size,
shuffle=True, pin_memory=True, num_workers=2)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=self.args.batch_size,
shuffle=False, pin_memory=True, num_workers=2)
return train_queue, valid_queue, train_transform, valid_transform
示例11: get_train_val_loaders
# 需要导入模块: from torch import utils [as 别名]
# 或者: from torch.utils import _data_transforms_cifar10 [as 别名]
def get_train_val_loaders(self):
if self.args.dataset == 'cifar10':
train_transform, valid_transform = utils._data_transforms_cifar10(self.args)
train_data = dset.CIFAR10(root=self.args.data, train=True, download=True, transform=train_transform)
elif self.args.dataset == 'cifar100':
train_transform, valid_transform = utils._data_transforms_cifar100(self.args)
train_data = dset.CIFAR100(root=self.args.data, train=True, download=True, transform=train_transform)
elif self.args.dataset == 'svhn':
train_transform, valid_transform = utils._data_transforms_svhn(self.args)
train_data = dset.SVHN(root=self.args.data, split='train', download=True, transform=train_transform)
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(self.args.train_portion * num_train))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=self.args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=2)
valid_queue = torch.utils.data.DataLoader(
train_data, batch_size=self.args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=2)
return train_queue, valid_queue, train_transform, valid_transform
示例12: build_cifar100
# 需要导入模块: from torch import utils [as 别名]
# 或者: from torch.utils import _data_transforms_cifar10 [as 别名]
def build_cifar100(model_state_dict, optimizer_state_dict, **kwargs):
epoch = kwargs.pop('epoch')
train_transform, valid_transform = utils._data_transforms_cifar10(args.cutout_size)
train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR100(root=args.data, train=False, download=True, transform=valid_transform)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=16)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.eval_batch_size, shuffle=False, pin_memory=True, num_workers=16)
model = NASNetworkCIFAR(args, 100, args.layers, args.nodes, args.channels, args.keep_prob, args.drop_path_keep_prob,
args.use_aux_head, args.steps, args.arch)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
logging.info("multi adds = %fM", model.multi_adds / 1000000)
if model_state_dict is not None:
model.load_state_dict(model_state_dict)
if torch.cuda.device_count() > 1:
logging.info("Use %d %s", torch.cuda.device_count(), "GPUs !")
model = nn.DataParallel(model)
model = model.cuda()
train_criterion = nn.CrossEntropyLoss().cuda()
eval_criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.lr_max,
momentum=0.9,
weight_decay=args.l2_reg,
)
if optimizer_state_dict is not None:
optimizer.load_state_dict(optimizer_state_dict)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), args.lr_min, epoch)
return train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler
示例13: main
# 需要导入模块: from torch import utils [as 别名]
# 或者: from torch.utils import _data_transforms_cifar10 [as 别名]
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
model = model.cuda()
utils.load(model, args.model_path)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
_, test_transform = utils._data_transforms_cifar10(args)
test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform)
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
model.drop_path_prob = args.drop_path_prob
with torch.no_grad():
test_acc, test_obj = infer(test_queue, model, criterion)
logging.info('test_acc %f', test_acc)
示例14: main
# 需要导入模块: from torch import utils [as 别名]
# 或者: from torch.utils import _data_transforms_cifar10 [as 别名]
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
model = model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
train_transform, valid_transform = utils._data_transforms_cifar10(args)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
for epoch in range(args.epochs):
scheduler.step()
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
train_acc, train_obj = train(train_queue, model, criterion, optimizer)
logging.info('train_acc %f', train_acc)
valid_acc, valid_obj = infer(valid_queue, model, criterion)
logging.info('valid_acc %f', valid_acc)
utils.save(model, os.path.join(args.save, 'weights.pt'))
示例15: main
# 需要导入模块: from torch import utils [as 别名]
# 或者: from torch.utils import _data_transforms_cifar10 [as 别名]
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
model = model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
train_transform, valid_transform = utils._data_transforms_cifar10(args)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
for epoch in range(args.epochs):
scheduler.step()
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
train_acc, train_obj = train(train_queue, model, criterion, optimizer)
logging.info('train_acc %f', train_acc)
valid_acc, valid_obj = infer(valid_queue, model, criterion)
logging.info('valid_acc %f', valid_acc)
utils.save(model, os.path.join(args.save, 'weights.pt'))