本文整理汇总了Python中torch.utils.data.dataloader.DataLoader方法的典型用法代码示例。如果您正苦于以下问题:Python dataloader.DataLoader方法的具体用法?Python dataloader.DataLoader怎么用?Python dataloader.DataLoader使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.utils.data.dataloader
的用法示例。
在下文中一共展示了dataloader.DataLoader方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: init_center_c
# 需要导入模块: from torch.utils.data import dataloader [as 别名]
# 或者: from torch.utils.data.dataloader import DataLoader [as 别名]
def init_center_c(self, train_loader: DataLoader, net: BaseNet, eps=0.1):
"""Initialize hypersphere center c as the mean from an initial forward pass on the data."""
n_samples = 0
c = torch.zeros(net.rep_dim, device=self.device)
net.eval()
with torch.no_grad():
for data in train_loader:
# get the inputs of the batch
inputs, _, _, _ = data
inputs = inputs.to(self.device)
outputs = net(inputs)
n_samples += outputs.shape[0]
c += torch.sum(outputs, dim=0)
c /= n_samples
# If c_i is too close to 0, set to +-eps. Reason: a zero unit can be trivially matched with zero weights.
c[(abs(c) < eps) & (c < 0)] = -eps
c[(abs(c) < eps) & (c > 0)] = eps
return c
示例2: init_center_c
# 需要导入模块: from torch.utils.data import dataloader [as 别名]
# 或者: from torch.utils.data.dataloader import DataLoader [as 别名]
def init_center_c(self, train_loader: DataLoader, net: BaseNet, eps=0.1):
"""Initialize hypersphere center c as the mean from an initial forward pass on the data."""
n_samples = 0
c = torch.zeros(net.rep_dim, device=self.device)
net.eval()
with torch.no_grad():
for data in train_loader:
# get the inputs of the batch
inputs, _, _ = data
inputs = inputs.to(self.device)
outputs = net(inputs)
n_samples += outputs.shape[0]
c += torch.sum(outputs, dim=0)
c /= n_samples
# If c_i is too close to 0, set to +-eps. Reason: a zero unit can be trivially matched with zero weights.
c[(abs(c) < eps) & (c < 0)] = -eps
c[(abs(c) < eps) & (c > 0)] = eps
return c
示例3: copy
# 需要导入模块: from torch.utils.data import dataloader [as 别名]
# 或者: from torch.utils.data.dataloader import DataLoader [as 别名]
def copy(loader):
"""
Init a sDataloader from an existing Dataloader
:param loader: an instance of Dataloader
:type loader: DataLoader
:return: a new instance of sDataloader
:rtype: sDataLoader
"""
if not isinstance(loader, DataLoader):
logger.warning('loader should be an instance of Dataloader, but got {}'.format(type(loader)))
return loader
new_loader = sDataLoader(loader.dataset)
for k, v in loader.__dict__.items():
setattr(new_loader, k, v)
return new_loader
示例4: predict
# 需要导入模块: from torch.utils.data import dataloader [as 别名]
# 或者: from torch.utils.data.dataloader import DataLoader [as 别名]
def predict(self, skip_folds=None):
for fold, (train_index, val_index) in enumerate(self.folds):
prefix = ('fold' + str(fold) + "_") if self.test else ""
if skip_folds is not None:
if fold in skip_folds:
continue
self.prev_name = None
ds_cls = ValDataset if not self.test else SequentialDataset
val_dataset = ds_cls(self.ds, val_index, stage='test', config=self.config)
val_dl = PytorchDataLoader(val_dataset, batch_size=self.config.predict_batch_size, num_workers=self.num_workers, drop_last=False)
weights_path = os.path.join(self.config.models_dir, 'albu')
model = read_model(weights_path, self.folder, fold)
pbar = val_dl if self.config.dbg else tqdm.tqdm(val_dl, total=len(val_dl))
for data in pbar:
self.show_mask = 'mask' in data and self.show_mask
if 'mask' not in data:
self.need_dice = False
predicted = self.predict_samples(model, data)
self.process_data(predicted, model, data, prefix=prefix)
if not self.config.dbg and self.need_dice:
pbar.set_postfix(dice="{:.5f}".format(np.mean(self.dice)))
if self.config.use_crop:
self.on_image_constructed(prefix=prefix)
示例5: get_dataloader
# 需要导入模块: from torch.utils.data import dataloader [as 别名]
# 或者: from torch.utils.data.dataloader import DataLoader [as 别名]
def get_dataloader(self, filename, bs=1):
full_path = os.path.join(self.base_path, filename)
if self.concat:
dataset = ContLMDataset(full_path, vocab=self.vocab, bptt=self.bptt)
else:
dataset = LMDataset(full_path, vocab=self.vocab, bptt=self.bptt)
return DataLoader(
dataset=dataset,
batch_size=bs,
shuffle=self.shuffle,
pin_memory=self.pin_memory,
collate_fn=pad_collate_fn,
# num_workers=1,
# waiting for a new torch version to support
# drop_last=True,
)
示例6: test_warning_with_iterable_dataset_and_len
# 需要导入模块: from torch.utils.data import dataloader [as 别名]
# 或者: from torch.utils.data.dataloader import DataLoader [as 别名]
def test_warning_with_iterable_dataset_and_len(tmpdir):
""" Tests that a warning messages is shown when an IterableDataset defines `__len__`. """
model = EvalModelTemplate()
original_dataset = model.train_dataloader().dataset
class IterableWithLen(IterableDataset):
def __iter__(self):
return iter(original_dataset)
def __len__(self):
return len(original_dataset)
dataloader = DataLoader(IterableWithLen(), batch_size=16)
assert _has_len(dataloader)
assert _has_iterable_dataset(dataloader)
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=3,
)
with pytest.warns(UserWarning, match='Your `IterableDataset` has `__len__` defined.'):
trainer.fit(model, train_dataloader=dataloader, val_dataloaders=[dataloader])
with pytest.warns(UserWarning, match='Your `IterableDataset` has `__len__` defined.'):
trainer.test(model, test_dataloaders=[dataloader])
示例7: ImageNetLoader882
# 需要导入模块: from torch.utils.data import dataloader [as 别名]
# 或者: from torch.utils.data.dataloader import DataLoader [as 别名]
def ImageNetLoader882(batch_size, num_workers, split='train', shuffle=False, path='data_shallow14/datasets/ImageNet/'):
img_split = 'images/'+split
classes_118, class_to_idx_118 = find_classes_from_file(path+'imagenet_rand118/imagenet_118.txt')
samples_118 = make_dataset(path+img_split, classes_118, class_to_idx_118)
classes_1000, _ = find_classes_from_folder(path+img_split)
classes_882 = list(set(classes_1000) - set(classes_118))
class_to_idx_882 = {classes_882[i]: i for i in range(len(classes_882))}
samples_882 = make_dataset(path+img_split, classes_882, class_to_idx_882)
if split=='train':
transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
else:
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
dataset = ImageFolder(transform=transform, samples=samples_882)
dataloader_882 = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=True)
return dataloader_882
示例8: ImageNetLoader82from882
# 需要导入模块: from torch.utils.data import dataloader [as 别名]
# 或者: from torch.utils.data.dataloader import DataLoader [as 别名]
def ImageNetLoader82from882(batch_size, num_workers, num_val_cls=30, path='data_shallow14/datasets/ImageNet/'):
classes_118, class_to_idx_118 = find_classes_from_file(path+'imagenet_rand118/imagenet_118.txt')
samples_118 = make_dataset(path+'images/train', classes_118, class_to_idx_118)
classes_1000, _ = find_classes_from_folder(path+'images/train')
classes_882 = list(set(classes_1000) - set(classes_118))
classes_val = classes_882[800:800+num_val_cls]
class_to_idx_val = {classes_val[i]: i for i in range(len(classes_val))}
samples_val = make_dataset(path+'images/train', classes_val, class_to_idx_val)
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
dataset_val = ImageFolder(transform=transform, samples=samples_val)
dataloader_val= DataLoader(dataset_val, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True)
return dataloader_val
示例9: __rank_slates
# 需要导入模块: from torch.utils.data import dataloader [as 别名]
# 或者: from torch.utils.data.dataloader import DataLoader [as 别名]
def __rank_slates(dataloader: DataLoader, model: LTRModel) -> Tuple[torch.Tensor, torch.Tensor]:
reranked_X = []
reranked_y = []
model.eval()
device = get_torch_device()
with torch.no_grad():
for xb, yb, _ in dataloader:
X = xb.type(torch.float32).to(device=device)
y_true = yb.to(device=device)
input_indices = torch.ones_like(y_true).type(torch.long)
mask = (y_true == losses.PADDED_Y_VALUE)
scores = model.score(X, mask, input_indices)
scores[mask] = float('-inf')
_, indices = scores.sort(descending=True, dim=-1)
indices_X = torch.unsqueeze(indices, -1).repeat_interleave(X.shape[-1], -1)
reranked_X.append(torch.gather(X, dim=1, index=indices_X).cpu())
reranked_y.append(torch.gather(y_true, dim=1, index=indices).cpu())
combined_X = torch.cat(reranked_X)
combined_y = torch.cat(reranked_y)
return combined_X, combined_y
示例10: __init__
# 需要导入模块: from torch.utils.data import dataloader [as 别名]
# 或者: from torch.utils.data.dataloader import DataLoader [as 别名]
def __init__(
self,
dataset,
batch_size=1,
shuffle=False,
sampler=None,
batch_sampler=None,
num_workers=8,
drop_last=False,
timeout=0,
worker_init_fn=None,
):
""" This is an extension of the PyTorch DataLoader
The collate function is always a list. The rest of the parameters
can be sent by the user
Parameters
----------
dataset : Dataset
batch_size : int
shuffle
sampler: torch.utils.data.Sampler
batch_sampler
num_workers: int
drop_last : bool
timeout : int
worker_init_fn
"""
super(SciwingDataLoader, self).__init__(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
batch_sampler=batch_sampler,
num_workers=num_workers,
drop_last=drop_last,
timeout=timeout,
worker_init_fn=worker_init_fn,
collate_fn=list,
sampler=sampler,
)
示例11: val
# 需要导入模块: from torch.utils.data import dataloader [as 别名]
# 或者: from torch.utils.data.dataloader import DataLoader [as 别名]
def val(model):
""" 计算模型在验证集上的分数 """
top_k = 3
# 状态置为验证
model.eval()
# 数据准备
dataset = ZhiHuData(conf.dev_data)
data_loader = DataLoader(dataset, batch_size=conf.batch_size)
# 预测
predict_label_and_marked_label_list = []
for i, batch in enumerate(data_loader):
title, content, label = batch
with t.no_grad():
title, content = Variable(title.cuda()), Variable(content.cuda())
score = model(title, content)
pred_value = score.data.topk(top_k, dim=1)[0].cpu()
pred_index = score.data.topk(top_k, dim=1)[1].cpu()
# 计算得分
true_value = label.data.float().topk(top_k, dim=1)[0]
true_index = label.data.float().topk(top_k, dim=1)[1]
tmp = []
for jj in range(label.size(0)):
true = true_index[jj][true_value[jj] > 0]
pred = pred_index[jj][pred_value[jj] > 0]
tmp.append((pred.tolist(), true.tolist()))
predict_label_and_marked_label_list.extend(tmp)
scores, prec_, recall_ = calc_score(predict_label_and_marked_label_list, topk=top_k)
print('calc_score score: {} - prec: {} - recall: {}'.format(scores, prec_, recall_))
scores, prec_, recall_ = calc_f1(predict_label_and_marked_label_list)
print('calc_f1 score: {} - prec: {} - recall: {}'.format(scores, prec_, recall_))
# 状态置为训练
model.train()
return scores, prec_, recall_
示例12: train
# 需要导入模块: from torch.utils.data import dataloader [as 别名]
# 或者: from torch.utils.data.dataloader import DataLoader [as 别名]
def train():
data = np.load(conf.emb_dict_path)
emb_mat = t.from_numpy(data['vec'])
word2id = data['word2id'].item()
del data
vocab_size = len(word2id)
print('vocab size : {}'.format(vocab_size))
dataset = ZhiHuData(conf.train_data)
data_loader = DataLoader(dataset=dataset, batch_size=conf.batch_size)
Model = name_model[model_name]
model = Model(vocab_size, emb_mat).cuda()
# 打印参数
get_params_num(model)
optimizer = model.get_optimizer(conf.lr1, conf.lr2)
best_score = 0
step = 0
for epoch in range(conf.epochs):
print('epoch:===>', epoch)
for i, batch in tqdm.tqdm(enumerate(data_loader)):
title, content, label = batch
title, content, label = Variable(title.cuda()), Variable(content.cuda()), Variable(label.cuda())
optimizer.zero_grad()
output = model(title, content)
loss = model.loss_fn(output, label.float())
loss.backward()
optimizer.step()
step += 1
writer.add_scalar('train loss', loss, step)
scores, prec_, recall_ = val(model)
if best_score < scores:
best_score = scores
t.save(model, conf.model_all_path.format(model_name))
# t.save(model.state_dict(), conf.model_dict_path.format(model_name))
# 可视化
writer.add_graph(model, (title, content))
writer.close()
示例13: make_dataloader
# 需要导入模块: from torch.utils.data import dataloader [as 别名]
# 或者: from torch.utils.data.dataloader import DataLoader [as 别名]
def make_dataloader(dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,
num_workers=0, pin_memory=False, drop_last=False):
return DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle, sampler=sampler,
batch_sampler=batch_sampler, collate_fn=collate, num_workers=num_workers,
pin_memory=pin_memory, drop_last=drop_last, worker_init_fn=worker_init
)
示例14: make_dataloader
# 需要导入模块: from torch.utils.data import dataloader [as 别名]
# 或者: from torch.utils.data.dataloader import DataLoader [as 别名]
def make_dataloader(cfg, num_gpus=1):
train_trm = get_trm(cfg, is_train=True)
val_trm = get_trm(cfg, is_train=False)
num_workers = cfg.DATALOADER.NUM_WORKERS * num_gpus
dataset = init_dataset(cfg)
num_classes = dataset.num_train_pids
train_set = ImageDataset(dataset.train, cfg, train_trm)
if cfg.DATALOADER.SAMPLER == 'softmax':
train_loader = DataLoader(
train_set, batch_size=cfg.SOLVER.IMS_PER_BATCH * num_gpus, shuffle=True,
num_workers=num_workers,
collate_fn=train_collate_fn
)
else:
train_loader = DataLoader(
train_set, batch_size=cfg.SOLVER.IMS_PER_BATCH * num_gpus,
sampler=RandomIdentitySampler(dataset.train,
cfg.SOLVER.IMS_PER_BATCH * num_gpus,
cfg.DATALOADER.NUM_INSTANCE * num_gpus),
num_workers=num_workers, collate_fn=train_collate_fn
)
val_set = ImageDataset(dataset.query + dataset.gallery, cfg, val_trm)
val_loader = DataLoader(
val_set, batch_size=cfg.TEST.IMS_PER_BATCH * num_gpus, shuffle=False,
num_workers=num_workers,
collate_fn=val_collate_fn
)
return train_loader, val_loader, len(dataset.query), num_classes
示例15: train
# 需要导入模块: from torch.utils.data import dataloader [as 别名]
# 或者: from torch.utils.data.dataloader import DataLoader [as 别名]
def train(ds, folds, config, num_workers=0, transforms=None, skip_folds=None):
os.makedirs(os.path.join('..', 'weights'), exist_ok=True)
os.makedirs(os.path.join('..', 'logs'), exist_ok=True)
for fold, (train_idx, val_idx) in enumerate(folds):
if skip_folds and fold in skip_folds:
continue
tr = TrainDataset(ds, train_idx, config, transform=transforms)
val = ValDataset(ds, val_idx, config, transform=None)
train_loader = PytorchDataLoader(tr,
batch_size=config.batch_size,
shuffle=True,
drop_last=True,
num_workers=num_workers,
pin_memory=True)
val_loader = PytorchDataLoader(val,
batch_size=config.batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers,
pin_memory=True)
trainer = PytorchTrain(fold=fold,
config=config,
metrics=[('soft dice', dice_loss),
('hard dice', dice_clamp),
('bce', nn.modules.loss.BCELoss())])
trainer.fit(train_loader, val_loader)
trainer.writer.close()