当前位置: 首页>>代码示例>>Python>>正文


Python data.DataLoader方法代码示例

本文整理汇总了Python中torch.utils.data.DataLoader方法的典型用法代码示例。如果您正苦于以下问题:Python data.DataLoader方法的具体用法?Python data.DataLoader怎么用?Python data.DataLoader使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.utils.data的用法示例。


在下文中一共展示了data.DataLoader方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_datagen

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import DataLoader [as 别名]
def get_datagen(self, X, y, train_mode, loader_params):
        if train_mode:
            dataset = self.dataset(X, y,
                                   train_mode=True,
                                   image_augment=self.image_augment_train,
                                   image_augment_with_target=self.image_augment_with_target_train,
                                   mask_transform=self.mask_transform,
                                   image_transform=self.image_transform,
                                   image_source=self.dataset_params.image_source)
        else:
            dataset = self.dataset(X, y,
                                   train_mode=False,
                                   image_augment=self.image_augment_inference,
                                   image_augment_with_target=self.image_augment_with_target_inference,
                                   mask_transform=self.mask_transform,
                                   image_transform=self.image_transform,
                                   image_source=self.dataset_params.image_source)

        datagen = DataLoader(dataset, **loader_params)
        steps = len(datagen)
        return datagen, steps 
开发者ID:minerva-ml,项目名称:steppy-toolkit,代码行数:23,代码来源:segmentation.py

示例2: __init__

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import DataLoader [as 别名]
def __init__(self, opt):
        self.trainset = GRIDDataset(opt, dset='train')
        self.trainset.load_data()
        self.testset = GRIDDataset(opt, dset='test')
        self.testset.load_data()
        self.trainloader = DataLoader(self.trainset, batch_size=opt.batch_size,
            shuffle=True, num_workers=opt.num_workers, collate_fn=ctc_collate, pin_memory=True)
        self.testloader = DataLoader(self.testset, batch_size=opt.batch_size,
            shuffle=False, num_workers=opt.num_workers, collate_fn=ctc_collate, pin_memory=True)

        # define network
        self.input_img_size = [3, 50, 100]
        self.chan, self.height, self.width = self.input_img_size
        self.vocab_size = len(self.trainset.vocab)
        assert self.testset.vocab <= self.trainset.vocab, 'possible OOV characters in test set'
        self.maxT = self.trainset.opt.max_timesteps

        self.model = LipNet(opt, self.vocab_size)
        self.opt = opt
        
        self.optimfunc = torch.optim.Adam(self.model.parameters(), lr=self.opt.lr)

    # learning rate scheduler: fixed LR 
开发者ID:sailordiary,项目名称:LipNet-PyTorch,代码行数:25,代码来源:model.py

示例3: get_data_loader

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import DataLoader [as 别名]
def get_data_loader(opt):
  if opt.dset_name == 'moving_mnist':
    transform = transforms.Compose([vtransforms.ToTensor()])
    dset = MovingMNIST(opt.dset_path, opt.is_train, opt.n_frames_input,
                       opt.n_frames_output, opt.num_objects, transform)

  elif opt.dset_name == 'bouncing_balls':
    transform = transforms.Compose([vtransforms.Scale(opt.image_size),
                                    vtransforms.ToTensor()])
    dset = BouncingBalls(opt.dset_path, opt.is_train, opt.n_frames_input,
                         opt.n_frames_output, opt.image_size[0], transform)

  else:
    raise NotImplementedError

  dloader = data.DataLoader(dset, batch_size=opt.batch_size, shuffle=opt.is_train,
                            num_workers=opt.n_workers, pin_memory=True)
  return dloader 
开发者ID:jthsieh,项目名称:DDPAE-video-prediction,代码行数:20,代码来源:get_data_loader.py

示例4: train_fine_tuning

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import DataLoader [as 别名]
def train_fine_tuning(net, optimizer, batch_size=128, num_epochs=4):
    train_iter = DataLoader(ImageFolder(os.path.join(data_dir, 'train'), transform=train_augs), batch_size, shuffle=True)
    test_iter = DataLoader(ImageFolder(os.path.join(data_dir, 'test'), transform=test_augs), batch_size)
    loss = torch.nn.CrossEntropyLoss()
    utils.train(train_iter, test_iter, net, loss, optimizer, device, num_epochs) 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:7,代码来源:48_fine_tune_hotdog.py

示例5: train

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import DataLoader [as 别名]
def train(encoder, decoder, dataset, lr, batch_size, num_epochs):
    enc_optimizer = torch.optim.Adam(encoder.parameters(), lr=lr)
    dec_optimizer = torch.optim.Adam(decoder.parameters(), lr=lr)

    loss = nn.CrossEntropyLoss(reduction='none')
    data_iter = Data.DataLoader(dataset, batch_size, shuffle=True)
    for epoch in range(num_epochs):
        l_sum = 0.0
        start = time.time()
        for X, Y in data_iter:
            enc_optimizer.zero_grad()
            dec_optimizer.zero_grad()
            l = batch_loss(encoder, decoder, X, Y, loss)
            l.backward()
            enc_optimizer.step()
            dec_optimizer.step()
            l_sum += l.item()
        if (epoch + 1) % 10 == 0:
            print("epoch %d, loss %.3f, time: %.1f sec" % (epoch + 1, l_sum / len(data_iter), time.time() - start)) 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:21,代码来源:53_machine_translation.py

示例6: __init__

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import DataLoader [as 别名]
def __init__(self, config):
        self.config = config

        if config.data_mode == "imgs":
            transform = v_transforms.Compose(
                [v_transforms.ToTensor(),
                 v_transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])

            dataset = v_datasets.ImageFolder(self.config.data_folder, transform=transform)

            self.dataset_len = len(dataset)

            self.num_iterations = (self.dataset_len + config.batch_size - 1) // config.batch_size

            self.loader = DataLoader(dataset,
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     num_workers=config.data_loader_workers,
                                     pin_memory=config.pin_memory)
        elif config.data_mode == "numpy":
            raise NotImplementedError("This mode is not implemented YET")
        else:
            raise Exception("Please specify in the json a specified mode in data_mode") 
开发者ID:moemen95,项目名称:Pytorch-Project-Template,代码行数:25,代码来源:celebA.py

示例7: load_textset

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import DataLoader [as 别名]
def load_textset(n_jobs, use_gpu, pin_memory, corpus, text):

    # Text tokenizer
    tokenizer = load_text_encoder(**text)
    # Dataset
    tr_set, dv_set, tr_loader_bs, dv_loader_bs, data_msg = create_textset(
        tokenizer, **corpus)
    collect_tr = partial(collect_text_batch, mode='train')
    collect_dv = partial(collect_text_batch, mode='dev')
    # Dataloader (Text data stored in RAM, no need num_workers)
    tr_set = DataLoader(tr_set, batch_size=tr_loader_bs, shuffle=True, drop_last=True, collate_fn=collect_tr,
                        num_workers=0, pin_memory=use_gpu)
    dv_set = DataLoader(dv_set, batch_size=dv_loader_bs, shuffle=False, drop_last=False, collate_fn=collect_dv,
                        num_workers=0, pin_memory=pin_memory)

    # Messages to show
    data_msg.append('I/O spec.  | Token type = {}\t| Vocab size = {}'
                    .format(tokenizer.token_type, tokenizer.vocab_size))

    return tr_set, dv_set, tokenizer.vocab_size, tokenizer, data_msg 
开发者ID:Alexander-H-Liu,项目名称:End-to-end-ASR-Pytorch,代码行数:22,代码来源:data.py

示例8: predict

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import DataLoader [as 别名]
def predict(train_data_path, test_data_path, slot_names_path, mode, bidirectional, saved_model_path, result_path, cuda):
    train_data = load_data(train_data_path)
    label2idx, idx2label = build_label_vocab(slot_names_path)
    word2idx, idx2word = build_vocab(train_data)

    test_data = load_data(test_data_path)
    test_X, test_y = build_dataset(test_data, word2idx, label2idx)
    test_set = ATISData(test_X, test_y)
    test_loader = DataLoader(dataset=test_set,
                            batch_size=1,
                            shuffle=False)
    
    vocab_size = len(word2idx)
    label_size = len(label2idx)

    model = SlotFilling(vocab_size, label_size, mode=mode, bidirectional=bidirectional)
    model.load_state_dict(torch.load(saved_model_path))    
    if cuda:
        model = model.cuda()
    predicted = do_eval(model, test_loader, cuda)
    predicted_labels = [idx2label[idx] for idx in predicted]
    gen_result_file(test_data, predicted_labels, result_path) 
开发者ID:llhthinker,项目名称:slot-filling,代码行数:24,代码来源:main.py

示例9: tile_prediction

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import DataLoader [as 别名]
def tile_prediction(model, image, transforms,
                    tile_size, tile_step, batch_size):
    tiler = ImageSlicer(image.shape,
                        tile_size=tile_size,
                        tile_step=tile_step)

    tiles = tiler.split(image, value=float(image.min()))
    tiles = [transforms(tile) for tile in tiles]

    loader = DataLoader(tiles, batch_size=batch_size)

    preds_lst = []

    for tiles_batch in loader:
        pred_batch = model.predict(tiles_batch)
        preds_lst.append(pred_batch)

    pred = torch.cat(preds_lst, dim=0)

    return pred.cpu().numpy() 
开发者ID:lRomul,项目名称:argus-freesound,代码行数:22,代码来源:predictor.py

示例10: main

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import DataLoader [as 别名]
def main():
    best_acc = 0

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    print('==> Preparing data..')
    transforms_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])

    dataset_train = CIFAR10(root='../data', train=True, download=True, 
                            transform=transforms_train)

    train_loader = DataLoader(dataset_train, batch_size=args.batch_size, 
                              shuffle=True, num_workers=args.num_worker)

    # there are 10 classes so the dataset name is cifar-10
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 
               'dog', 'frog', 'horse', 'ship', 'truck')

    print('==> Making model..')

    net = pyramidnet()
    net = nn.DataParallel(net)
    net = net.to(device)
    num_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
    print('The number of parameters of model is', num_params)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=args.lr)
    # optimizer = optim.SGD(net.parameters(), lr=args.lr, 
    #                       momentum=0.9, weight_decay=1e-4)
    
    train(net, criterion, optimizer, train_loader, device) 
开发者ID:dnddnjs,项目名称:pytorch-multigpu,代码行数:38,代码来源:train.py

示例11: prepare_data_loader

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import DataLoader [as 别名]
def prepare_data_loader(self, dataset, batch_size, rand_flag=True):
        # prepare data loader
        if rand_flag:
            data_sampler = RandomSampler(dataset)
        else:
            data_sampler = SequentialSampler(dataset)

        if self.custom_collate_fn is None:
            dataloader = DataLoader(dataset,
                                    batch_size=batch_size,
                                    sampler=data_sampler)
        else:
            dataloader = DataLoader(dataset,
                                    batch_size=batch_size,
                                    sampler=data_sampler,
                                    collate_fn=self.custom_collate_fn)

        return dataloader 
开发者ID:dolphin-zs,项目名称:Doc2EDAG,代码行数:20,代码来源:base_task.py

示例12: getTestingData

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import DataLoader [as 别名]
def getTestingData(batch_size=64):

    __imagenet_stats = {'mean': [0.485, 0.456, 0.406],
                        'std': [0.229, 0.224, 0.225]}
    # scale = random.uniform(1, 1.5)
    transformed_testing = depthDataset(csv_file='./data/nyu2_test.csv',
                                       transform=transforms.Compose([
                                           Scale(240),
                                           CenterCrop([304, 228], [152, 114]),
                                           ToTensor(is_test=True),
                                           Normalize(__imagenet_stats['mean'],
                                                     __imagenet_stats['std'])
                                       ]))

    dataloader_testing = DataLoader(transformed_testing, batch_size,
                                    shuffle=False, num_workers=4, pin_memory=False)

    return dataloader_testing 
开发者ID:JunjH,项目名称:Visualizing-CNNs-for-monocular-depth-estimation,代码行数:20,代码来源:loaddata.py

示例13: get_data_loaders

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import DataLoader [as 别名]
def get_data_loaders(filelist, imsz, viewnum, mode, bs, numworkers, classes=None, data_folder=None):
    print('Building dataloaders')
    dataset_train = DataProvider(filelist, imsz, viewnum,
                                 mode=mode, datadebug=False, classes=classes, data_folder=data_folder)

    if mode == 'test':
        shuffle = False
    else:
        shuffle = True
    
    train_loader = DataLoader(dataset_train, batch_size=bs,
                              shuffle=shuffle, num_workers=numworkers, collate_fn=collate_fn)
    
    print('train num {}'.format(len(dataset_train)))
    print('train iter'.format(len(train_loader)))
    
    return train_loader 
开发者ID:nv-tlabs,项目名称:DIB-R,代码行数:19,代码来源:dataloader_multiview_blender.py

示例14: __init__

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import DataLoader [as 别名]
def __init__(self, model, dataset, test_data_path, test_key_file, embed_path, args):
        """ Evaluate the pytorch model that is currently being build
            We take the embedding vocabulary currently being trained
        """
        self.test_key_file = test_key_file
        self.cuda = args.cuda
        self.model = model
        batch_sampler = NCBatchSampler(
            dataset.mentions_pair_length, batchsize=args.batchsize, shuffle=False
        )
        self.dataloader = DataLoader(
            dataset,
            collate_fn=padder_collate,
            batch_sampler=batch_sampler,
            num_workers=args.numworkers,
            pin_memory=args.cuda,
        )
        self.mentions_idx, self.n_pairs = batch_sampler.get_batch_info()
        self.load_meta(test_data_path) 
开发者ID:huggingface,项目名称:neuralcoref,代码行数:21,代码来源:evaluator.py

示例15: predict_dataset

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import DataLoader [as 别名]
def predict_dataset(self, dataset, export_path):
        """
        Predicts the images in the given dataset and saves it to disk.

        Args:
            dataset: the dataset of images to be exported, instance of unet.dataset.Image2D
            export_path: path to folder where results to be saved
        """
        self.net.train(False)
        chk_mkdir(export_path)

        for batch_idx, (X_batch, *rest) in enumerate(DataLoader(dataset, batch_size=1)):
            if isinstance(rest[0][0], str):
                image_filename = rest[0][0]
            else:
                image_filename = '%s.png' % str(batch_idx + 1).zfill(3)

            X_batch = Variable(X_batch.to(device=self.device))
            y_out = self.net(X_batch).cpu().data.numpy()

            io.imsave(os.path.join(export_path, image_filename), y_out[0, 1, :, :]) 
开发者ID:cosmic-cortex,项目名称:pytorch-UNet,代码行数:23,代码来源:model.py


注:本文中的torch.utils.data.DataLoader方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。