當前位置: 首頁>>代碼示例>>Python>>正文


Python sampler.SequentialSampler方法代碼示例

本文整理匯總了Python中torch.utils.data.sampler.SequentialSampler方法的典型用法代碼示例。如果您正苦於以下問題:Python sampler.SequentialSampler方法的具體用法?Python sampler.SequentialSampler怎麽用?Python sampler.SequentialSampler使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.utils.data.sampler的用法示例。


在下文中一共展示了sampler.SequentialSampler方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_respect_order

# 需要導入模塊: from torch.utils.data import sampler [as 別名]
# 或者: from torch.utils.data.sampler import SequentialSampler [as 別名]
def test_respect_order(self):
        drop_uneven = False
        dataset = [i for i in range(10)]
        group_ids = [0, 0, 1, 0, 1, 1, 0, 1, 1, 0]
        sampler = SequentialSampler(dataset)

        expected = [
            [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]],
            [[0, 1, 3], [2, 4, 5], [6, 9], [7, 8]],
            [[0, 1, 3, 6], [2, 4, 5, 7], [8], [9]],
        ]

        for idx, batch_size in enumerate([1, 3, 4]):
            batch_sampler = GroupedBatchSampler(
                sampler, group_ids, batch_size, drop_uneven
            )
            result = list(batch_sampler)
            self.assertEqual(result, expected[idx]) 
開發者ID:Res2Net,項目名稱:Res2Net-maskrcnn,代碼行數:20,代碼來源:test_data_samplers.py

示例2: test_number_of_iters_and_elements

# 需要導入模塊: from torch.utils.data import sampler [as 別名]
# 或者: from torch.utils.data.sampler import SequentialSampler [as 別名]
def test_number_of_iters_and_elements(self):
        for batch_size in [2, 3, 4]:
            for num_iterations in [4, 10, 20]:
                for drop_last in [False, True]:
                    dataset = [i for i in range(10)]
                    sampler = SequentialSampler(dataset)
                    batch_sampler = BatchSampler(
                        sampler, batch_size, drop_last=drop_last
                    )

                    iter_sampler = IterationBasedBatchSampler(
                        batch_sampler, num_iterations
                    )
                    assert len(iter_sampler) == num_iterations
                    for i, batch in enumerate(iter_sampler):
                        start = (i % len(batch_sampler)) * batch_size
                        end = min(start + batch_size, len(dataset))
                        expected = [x for x in range(start, end)]
                        self.assertEqual(batch, expected) 
開發者ID:Res2Net,項目名稱:Res2Net-maskrcnn,代碼行數:21,代碼來源:test_data_samplers.py

示例3: test_distributed_batch_sampler

# 需要導入模塊: from torch.utils.data import sampler [as 別名]
# 或者: from torch.utils.data.sampler import SequentialSampler [as 別名]
def test_distributed_batch_sampler():
    sampler = SequentialSampler(list(range(15)))
    batch_sampler = BatchSampler(sampler, 10, False)

    distributed_sampler = DistributedBatchSampler(batch_sampler, num_replicas=4, rank=0)
    assert list(distributed_sampler) == [[0, 4, 8], [10, 14]]
    assert len(distributed_sampler) == 2

    distributed_sampler = DistributedBatchSampler(batch_sampler, num_replicas=4, rank=1)
    assert list(distributed_sampler) == [[1, 5, 9], [11]]
    assert len(distributed_sampler) == 2

    distributed_sampler = DistributedBatchSampler(batch_sampler, num_replicas=4, rank=2)
    assert list(distributed_sampler) == [[2, 6], [12]]
    assert len(distributed_sampler) == 2

    distributed_sampler = DistributedBatchSampler(batch_sampler, num_replicas=4, rank=3)
    assert list(distributed_sampler) == [[3, 7], [13]]
    assert len(distributed_sampler) == 2 
開發者ID:PetrochukM,項目名稱:PyTorch-NLP,代碼行數:21,代碼來源:test_distributed_batch_sampler.py

示例4: get_dataloader

# 需要導入模塊: from torch.utils.data import sampler [as 別名]
# 或者: from torch.utils.data.sampler import SequentialSampler [as 別名]
def get_dataloader(self, batch_size, type, num_workers, shuffle):
        """
        get dataloader on train or dev dataset
        :param batch_size:
        :param type: 'train' or 'dev'
        :return:
        """
        data = self._data[type]
        dataset = CQA_Dataset(data['context'],
                              data['question'],
                              data['answer_range'],
                              self.meta_data,
                              self.global_config['preprocess'])
        if shuffle:
            sampler = SortedBatchSampler(dataset.get_lengths(), batch_size)
        else:
            sampler = SequentialSampler(dataset)
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=batch_size,
                                                 sampler=sampler,
                                                 collate_fn=self.collect_fun,
                                                 num_workers=num_workers)
        return dataloader 
開發者ID:laddie132,項目名稱:Match-LSTM,代碼行數:25,代碼來源:squad_dataset.py

示例5: get_data_loader

# 需要導入模塊: from torch.utils.data import sampler [as 別名]
# 或者: from torch.utils.data.sampler import SequentialSampler [as 別名]
def get_data_loader(X_in, y_in, batch_size, extended_batch_sampler=True, epoch_size=25000, upsample=False, seed=42):
    """ Returns a dataloader that enables larger epochs on small datasets and
        has upsampling functionality.

    # Arguments:
        X_in: Inputs of the given dataset.
        y_in: Outputs of the given dataset.
        batch_size: Batch size.
        epoch_size: Number of samples in an epoch.
        upsample: Whether upsampling should be done. This flag should only be
            set on binary class problems.

    # Returns:
        DataLoader.
    """
    dataset = DeepMojiDataset(X_in, y_in)

    if extended_batch_sampler:
        batch_sampler = DeepMojiBatchSampler(y_in, batch_size, epoch_size=epoch_size, upsample=upsample, seed=seed)
    else:
        batch_sampler = BatchSampler(SequentialSampler(y_in), batch_size, drop_last=False)

    return DataLoader(dataset, batch_sampler=batch_sampler, num_workers=0) 
開發者ID:natashamjaques,項目名稱:neural_chat,代碼行數:25,代碼來源:finetuning.py

示例6: build_train_sampler

# 需要導入模塊: from torch.utils.data import sampler [as 別名]
# 或者: from torch.utils.data.sampler import SequentialSampler [as 別名]
def build_train_sampler(
    data_source, train_sampler, batch_size=32, num_instances=4, **kwargs
):
    """Builds a training sampler.

    Args:
        data_source (list): contains tuples of (img_path(s), pid, camid).
        train_sampler (str): sampler name (default: ``RandomSampler``).
        batch_size (int, optional): batch size. Default is 32.
        num_instances (int, optional): number of instances per identity in a
            batch (when using ``RandomIdentitySampler``). Default is 4.
    """
    assert train_sampler in AVAI_SAMPLERS, \
        'train_sampler must be one of {}, but got {}'.format(AVAI_SAMPLERS, train_sampler)

    if train_sampler == 'RandomIdentitySampler':
        sampler = RandomIdentitySampler(data_source, batch_size, num_instances)

    elif train_sampler == 'SequentialSampler':
        sampler = SequentialSampler(data_source)

    elif train_sampler == 'RandomSampler':
        sampler = RandomSampler(data_source)

    return sampler 
開發者ID:KaiyangZhou,項目名稱:deep-person-reid,代碼行數:27,代碼來源:sampler.py

示例7: Our_Dataloader

# 需要導入模塊: from torch.utils.data import sampler [as 別名]
# 或者: from torch.utils.data.sampler import SequentialSampler [as 別名]
def Our_Dataloader(dataset,batch_size,shuffle=True,num_workers=2,drop_last=True,max_iteration=100000000):
    """
    幾近無限迭代器,迭代次數為1億次,每次迭代輸出一個批次的數據.
    :param dataset:         數據集
    :param batch_size:      批次數
    :param max_iteration:   迭代的總次數,默認1億次,具體迭代次數,在取數據時進行判斷會更為靈活
    :param shuffle:
    :param num_workers:
    :param drop_last:
    :return:
    """
    if shuffle:
        sampler = RandomSampler(dataset)        # 隨機采樣器
    else:
        sampler = SequentialSampler(dataset)    # 順序采樣器
    batch_sampler = BatchSampler_Our(sampler=sampler,
                                     batch_size=batch_size,
                                     max_iteration=max_iteration,
                                     drop_last=drop_last)
    loader = DataLoader(dataset=dataset,batch_sampler=batch_sampler,num_workers=num_workers,collate_fn=BatchCollator(is_train=dataset.is_train))
    return loader 
開發者ID:yatengLG,項目名稱:SSD-Pytorch,代碼行數:23,代碼來源:Dataloader.py

示例8: test_respect_order_simple

# 需要導入模塊: from torch.utils.data import sampler [as 別名]
# 或者: from torch.utils.data.sampler import SequentialSampler [as 別名]
def test_respect_order_simple(self):
        drop_uneven = False
        dataset = [i for i in range(40)]
        group_ids = [i // 10 for i in dataset]
        sampler = SequentialSampler(dataset)
        for batch_size in [1, 3, 5, 6]:
            batch_sampler = GroupedBatchSampler(
                sampler, group_ids, batch_size, drop_uneven
            )
            result = list(batch_sampler)
            merged_result = list(itertools.chain.from_iterable(result))
            self.assertEqual(merged_result, dataset) 
開發者ID:Res2Net,項目名稱:Res2Net-maskrcnn,代碼行數:14,代碼來源:test_data_samplers.py

示例9: test_respect_order_drop_uneven

# 需要導入模塊: from torch.utils.data import sampler [as 別名]
# 或者: from torch.utils.data.sampler import SequentialSampler [as 別名]
def test_respect_order_drop_uneven(self):
        batch_size = 3
        drop_uneven = True
        dataset = [i for i in range(10)]
        group_ids = [0, 0, 1, 0, 1, 1, 0, 1, 1, 0]
        sampler = SequentialSampler(dataset)
        batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)

        result = list(batch_sampler)

        expected = [[0, 1, 3], [2, 4, 5]]
        self.assertEqual(result, expected) 
開發者ID:Res2Net,項目名稱:Res2Net-maskrcnn,代碼行數:14,代碼來源:test_data_samplers.py


注:本文中的torch.utils.data.sampler.SequentialSampler方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。