本文整理汇总了Python中datasets.load_dataset方法的典型用法代码示例。如果您正苦于以下问题:Python datasets.load_dataset方法的具体用法?Python datasets.load_dataset怎么用?Python datasets.load_dataset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类datasets
的用法示例。
在下文中一共展示了datasets.load_dataset方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: import_data_loaders
# 需要导入模块: import datasets [as 别名]
# 或者: from datasets import load_dataset [as 别名]
def import_data_loaders(config, n_workers, verbose=1):
"""Import datasets and wrap them into DataLoaders from configuration
"""
train_loaders, test_loaders = dict(), dict()
for dataset_config in config['datasets']:
train_data, test_data = datasets.load_dataset(
dataset_config['name'], dataset_config['kwargs'])
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=config['batch_size'],
shuffle=True,
num_workers=n_workers)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=config['batch_size'],
shuffle=False,
num_workers=n_workers)
train_loaders[dataset_config['task_id']] = train_loader
test_loaders[dataset_config['task_id']] = test_loader
log_utils.print_datasets_info(train_loaders, test_loaders, verbose)
return train_loaders, test_loaders
示例2: main
# 需要导入模块: import datasets [as 别名]
# 或者: from datasets import load_dataset [as 别名]
def main(args):
if args.cuda and not torch.cuda.is_available():
raise ValueError("GPUs are not available, please run at cpu mode")
# initialize datasets
train_set, val_set = load_dataset(args.root, 'IM')
print("Dataset : {} ==> Train : {} ; Val : {} .".format(args.dataset, len(train_set), len(val_set)))
# initialize network
args.src_ch = train_set.src_ch
args.tar_ch = train_set.tar_ch
net = load_model(args)
print("Model : {} ==> (Src_ch : {} ; Tar_ch : {} ; Base_Kernel : {})".format(net.symbol, args.src_ch, args.tar_ch, args.base_kernel))
# initialize runner
method = "{}-{}".format(net.symbol, args.dataset)
run = set_trainer(args, method)
print("Start training ...")
run.training(net, [train_set, val_set])
run.save_log()
run.learning_curve()
示例3: main
# 需要导入模块: import datasets [as 别名]
# 或者: from datasets import load_dataset [as 别名]
def main(args):
if args.cuda and not torch.cuda.is_available():
raise ValueError("GPUs are not available, please run at cpu mode")
# initialize datasets
if "MCFCN" in args.net:
mode = 'IMS'
elif "BRNet" in args.net:
mode = 'IME'
else:
mode = 'IE'
train_set, val_set = load_dataset(args.root, mode)
print("Dataset : {} ==> Train : {} ; Val : {}".format(args.root, len(train_set), len(val_set)))
# initialize network
args.src_ch = train_set.src_ch
args.tar_ch = train_set.tar_ch
net = load_model(args)
print("Model : {} ==> (Src_ch : {} ; Tar_ch : {} ; Base_Kernel : {})".format(args.net, args.src_ch, args.tar_ch, args.base_kernel))
# initialize runner
method = "{}-{}*{}*{}-{}{}-{}".format(args.net, args.src_ch, args.tar_ch, args.base_kernel, args.root, mode, args.loss)
run = set_trainer(args, method)
print("Start training ...")
run.training(net, [train_set, val_set])
run.save_log()
run.learning_curve()
示例4: main
# 需要导入模块: import datasets [as 别名]
# 或者: from datasets import load_dataset [as 别名]
def main(args):
if args.cuda and not torch.cuda.is_available():
raise ValueError("GPUs are not available, please run at cpu mode")
# initialize datasets
if "MCFCN" in args.net:
mode = 'IMS'
elif "BRNet" in args.net:
mode = 'IME'
else:
mode = 'IM'
train_set, val_set = load_dataset(args.root, mode)
print("Dataset : {} ==> Train : {} ; Val : {}".format(args.root, len(train_set), len(val_set)))
# initialize network
args.src_ch = train_set.src_ch
args.tar_ch = train_set.tar_ch
net = load_model(args)
print("Model : {} ==> (Src_ch : {} ; Tar_ch : {} ; Base_Kernel : {})".format(args.net, args.src_ch, args.tar_ch, args.base_kernel))
# initialize runner
method = "{}-{}*{}*{}-{}".format(args.net, args.src_ch, args.tar_ch, args.base_kernel, args.root)
run = set_trainer(args, method)
print("Start training ...")
run.training(net, [train_set, val_set])
run.save_log()
run.learning_curve()
示例5: main
# 需要导入模块: import datasets [as 别名]
# 或者: from datasets import load_dataset [as 别名]
def main(args):
train_loader, test_loader = load_dataset(args.label, args.batch_size)
model = ShakePyramidNet(depth=args.depth, alpha=args.alpha, label=args.label)
model = torch.nn.DataParallel(model).cuda()
cudnn.benckmark = True
opt = optim.SGD(model.parameters(),
lr=args.lr,
momentum=0.9,
weight_decay=args.weight_decay,
nesterov=args.nesterov)
scheduler = optim.lr_scheduler.MultiStepLR(opt, [args.epochs // 2, args.epochs * 3 // 4])
loss_func = nn.CrossEntropyLoss().cuda()
headers = ["Epoch", "LearningRate", "TrainLoss", "TestLoss", "TrainAcc.", "TestAcc."]
logger = utils.Logger(args.checkpoint, headers)
for e in range(args.epochs):
scheduler.step()
model.train()
train_loss, train_acc, train_n = 0, 0, 0
bar = tqdm(total=len(train_loader), leave=False)
for x, t in train_loader:
x, t = Variable(x.cuda()), Variable(t.cuda())
y = model(x)
loss = loss_func(y, t)
opt.zero_grad()
loss.backward()
opt.step()
train_acc += utils.accuracy(y, t).item()
train_loss += loss.item() * t.size(0)
train_n += t.size(0)
bar.set_description("Loss: {:.4f}, Accuracy: {:.2f}".format(
train_loss / train_n, train_acc / train_n * 100), refresh=True)
bar.update()
bar.close()
model.eval()
test_loss, test_acc, test_n = 0, 0, 0
for x, t in tqdm(test_loader, total=len(test_loader), leave=False):
with torch.no_grad():
x, t = Variable(x.cuda()), Variable(t.cuda())
y = model(x)
loss = loss_func(y, t)
test_loss += loss.item() * t.size(0)
test_acc += utils.accuracy(y, t).item()
test_n += t.size(0)
if (e + 1) % args.snapshot_interval == 0:
torch.save({
"state_dict": model.state_dict(),
"optimizer": opt.state_dict()
}, os.path.join(args.checkpoint, "{}.tar".format(e + 1)))
lr = opt.param_groups[0]["lr"]
logger.write(e+1, lr, train_loss / train_n, test_loss / test_n,
train_acc / train_n * 100, test_acc / test_n * 100)