本文整理汇总了Python中dataset.DatasetFromHdf5方法的典型用法代码示例。如果您正苦于以下问题:Python dataset.DatasetFromHdf5方法的具体用法?Python dataset.DatasetFromHdf5怎么用?Python dataset.DatasetFromHdf5使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类dataset
的用法示例。
在下文中一共展示了dataset.DatasetFromHdf5方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: import dataset [as 别名]
# 或者: from dataset import DatasetFromHdf5 [as 别名]
def main():
global opt, model
opt = parser.parse_args()
print(opt)
cuda = opt.cuda
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
opt.seed = random.randint(1, 10000)
print("Random Seed: ", opt.seed)
torch.manual_seed(opt.seed)
if cuda:
torch.cuda.manual_seed(opt.seed)
cudnn.benchmark = True
print("===> Loading datasets")
train_set = DatasetFromHdf5("path_to_dataset.h5")
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
print("===> Building model")
model = Net()
criterion = nn.L1Loss(size_average=False)
print("===> Setting GPU")
if cuda:
model = model.cuda()
criterion = criterion.cuda()
# optionally resume from a checkpoint
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
checkpoint = torch.load(opt.resume)
opt.start_epoch = checkpoint["epoch"] + 1
model.load_state_dict(checkpoint["model"].state_dict())
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
print("===> Setting Optimizer")
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=opt.lr, weight_decay=opt.weight_decay, betas = (0.9, 0.999), eps=1e-08)
print("===> Training")
for epoch in range(opt.start_epoch, opt.nEpochs + 1):
train(training_data_loader, optimizer, model, criterion, epoch)
save_checkpoint(model, epoch)
示例2: main
# 需要导入模块: import dataset [as 别名]
# 或者: from dataset import DatasetFromHdf5 [as 别名]
def main():
global opt, model
opt = parser.parse_args()
print(opt)
cuda = opt.cuda
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
opt.seed = random.randint(1, 10000)
print("Random Seed: ", opt.seed)
cudnn.benchmark = True
print("===> Loading datasets")
train_set = DatasetFromHdf5("data/train_291_32_x234.h5")
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
print("===> Building model")
model = DRRN()
criterion = nn.MSELoss(size_average=False)
print("===> Setting GPU")
if cuda:
model = torch.nn.DataParallel(model).cuda()
criterion = criterion.cuda()
# optionally resume from a checkpoint
if opt.resume:
if os.path.isfile(opt.resume):
print("===> loading checkpoint: {}".format(opt.resume))
checkpoint = torch.load(opt.resume)
opt.start_epoch = checkpoint["epoch"] + 1
model.load_state_dict(checkpoint["model"].state_dict())
else:
print("===> no checkpoint found at {}".format(opt.resume))
# optionally copy weights from a checkpoint
if opt.pretrained:
if os.path.isfile(opt.pretrained):
print("===> load model {}".format(opt.pretrained))
weights = torch.load(opt.pretrained)
model.load_state_dict(weights['model'].state_dict())
else:
print("===> no model found at {}".format(opt.pretrained))
print("===> Setting Optimizer")
optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay)
print("===> Training")
for epoch in range(opt.start_epoch, opt.nEpochs + 1):
train(training_data_loader, optimizer, model, criterion, epoch)
save_checkpoint(model, epoch)
# os.system("python eval.py --cuda --model=model/model_epoch_{}.pth".format(epoch))
示例3: main
# 需要导入模块: import dataset [as 别名]
# 或者: from dataset import DatasetFromHdf5 [as 别名]
def main():
global opt, model
opt = parser.parse_args()
print(opt)
cuda = opt.cuda
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
opt.seed = random.randint(1, 10000)
print("Random Seed: ", opt.seed)
torch.manual_seed(opt.seed)
if cuda:
torch.cuda.manual_seed(opt.seed)
cudnn.benchmark = True
print("===> Loading datasets")
train_set = DatasetFromHdf5("data/lap_pry_x4_small.h5")
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
print("===> Building model")
model = Net()
criterion = L1_Charbonnier_loss()
print("===> Setting GPU")
if cuda:
model = model.cuda()
criterion = criterion.cuda()
else:
model = model.cpu()
# optionally resume from a checkpoint
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
checkpoint = torch.load(opt.resume)
opt.start_epoch = checkpoint["epoch"] + 1
model.load_state_dict(checkpoint["model"].state_dict())
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
# optionally copy weights from a checkpoint
if opt.pretrained:
if os.path.isfile(opt.pretrained):
print("=> loading model '{}'".format(opt.pretrained))
weights = torch.load(opt.pretrained)
model.load_state_dict(weights['model'].state_dict())
else:
print("=> no model found at '{}'".format(opt.pretrained))
print("===> Setting Optimizer")
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
print("===> Training")
for epoch in range(opt.start_epoch, opt.nEpochs + 1):
train(training_data_loader, optimizer, model, criterion, epoch)
save_checkpoint(model, epoch)
示例4: main
# 需要导入模块: import dataset [as 别名]
# 或者: from dataset import DatasetFromHdf5 [as 别名]
def main():
global opt, model
opt = parser.parse_args()
print opt
cuda = opt.cuda
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
opt.seed = random.randint(1, 10000)
print("Random Seed: ", opt.seed)
torch.manual_seed(opt.seed)
if cuda:
torch.cuda.manual_seed(opt.seed)
cudnn.benchmark = True
print("===> Loading datasets")
train_set = DatasetFromHdf5("/path/to/your/dataset/like/imagenet_50K.h5")
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
print("===> Building model")
model = Net()
criterion = L1_Charbonnier_loss()
print("===> Setting GPU")
if cuda:
model = model.cuda()
criterion = criterion.cuda()
# optionally resume from a checkpoint
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
checkpoint = torch.load(opt.resume)
opt.start_epoch = checkpoint["epoch"] + 1
model.load_state_dict(checkpoint["model"].state_dict())
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
# optionally copy weights from a checkpoint
if opt.pretrained:
if os.path.isfile(opt.pretrained):
print("=> loading model '{}'".format(opt.pretrained))
weights = torch.load(opt.pretrained)
model.load_state_dict(weights['model'].state_dict())
else:
print("=> no model found at '{}'".format(opt.pretrained))
print("===> Setting Optimizer")
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
print("===> Training")
for epoch in range(opt.start_epoch, opt.nEpochs + 1):
train(training_data_loader, optimizer, model, criterion, epoch)
save_checkpoint(model, epoch)