当前位置: 首页>>代码示例>>Python>>正文


Python dataset.TensorDataset方法代码示例

本文整理汇总了Python中torch.utils.data.dataset.TensorDataset方法的典型用法代码示例。如果您正苦于以下问题:Python dataset.TensorDataset方法的具体用法?Python dataset.TensorDataset怎么用?Python dataset.TensorDataset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.utils.data.dataset的用法示例。


在下文中一共展示了dataset.TensorDataset方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: score

# 需要导入模块: from torch.utils.data import dataset [as 别名]
# 或者: from torch.utils.data.dataset import TensorDataset [as 别名]
def score(self, X, y=None, sample_weight=None) -> float:
        loss_function = torch.nn.MSELoss()
        if self.autoencoder is None:
            raise NotFittedError
        if issparse(X):
            X = X.todense()
        self.autoencoder.eval()
        ds = TensorDataset(torch.from_numpy(X.astype(np.float32)))
        dataloader = DataLoader(ds, batch_size=self.batch_size, shuffle=False)
        loss = 0
        for index, batch in enumerate(dataloader):
            batch = batch[0]
            if self.cuda:
                batch = batch.cuda(non_blocking=True)
            output = self.autoencoder(batch)
            loss += float(loss_function(output, batch).item())
        return loss 
开发者ID:vlukiyanov,项目名称:pt-sdae,代码行数:19,代码来源:sklearn_api.py

示例2: transform

# 需要导入模块: from torch.utils.data import dataset [as 别名]
# 或者: from torch.utils.data.dataset import TensorDataset [as 别名]
def transform(self, X):
        if self.autoencoder is None:
            raise NotFittedError
        if issparse(X):
            X = X.todense()
        self.autoencoder.eval()
        ds = TensorDataset(torch.from_numpy(X.astype(np.float32)))
        dataloader = DataLoader(ds, batch_size=self.batch_size, shuffle=False)
        features_encoder = [[] for _ in self.autoencoder.encoder]
        features_decoder = [[] for _ in self.autoencoder.decoder]
        for index, batch in enumerate(dataloader):
            batch = batch[0]
            if self.cuda:
                batch = batch.cuda(non_blocking=True)
            for index, unit in enumerate(self.autoencoder.encoder):
                batch = unit(batch)
                features_encoder[index].append(batch.detach().cpu())
            for index, unit in enumerate(self.autoencoder.decoder):
                batch = unit(batch)
                features_decoder[index].append(batch.detach().cpu())
        return np.concatenate(
            [torch.cat(x).numpy() for x in features_encoder + features_decoder[:-1]],
            axis=1,
        ) 
开发者ID:vlukiyanov,项目名称:pt-sdae,代码行数:26,代码来源:sklearn_api.py

示例3: get_train_datasets

# 需要导入模块: from torch.utils.data import dataset [as 别名]
# 或者: from torch.utils.data.dataset import TensorDataset [as 别名]
def get_train_datasets(num_queries, num_materialized_samples):
    dicts, column_min_max_vals, min_val, max_val, labels_train, labels_test, max_num_joins, max_num_predicates, train_data, test_data = load_and_encode_train_data(
        num_queries, num_materialized_samples)
    train_dataset = make_dataset(*train_data, labels=labels_train, max_num_joins=max_num_joins,
                                 max_num_predicates=max_num_predicates)
    print("Created TensorDataset for training data")
    test_dataset = make_dataset(*test_data, labels=labels_test, max_num_joins=max_num_joins,
                                max_num_predicates=max_num_predicates)
    print("Created TensorDataset for validation data")
    return dicts, column_min_max_vals, min_val, max_val, labels_train, labels_test, max_num_joins, max_num_predicates, train_dataset, test_dataset 
开发者ID:andreaskipf,项目名称:learnedcardinalities,代码行数:12,代码来源:data.py

示例4: load

# 需要导入模块: from torch.utils.data import dataset [as 别名]
# 或者: from torch.utils.data.dataset import TensorDataset [as 别名]
def load(self, filename, vocabs):
        features = self.load_features(filename, vocabs)
        x_tensor = torch.tensor(features['x'], dtype=torch.long)
        num_sequences_word = (x_tensor.size(0) // self.nctx) * self.nctx
        x_tensor = x_tensor.narrow(0, 0, num_sequences_word).view(-1, self.nctx)
        return TensorDataset(x_tensor, x_tensor) 
开发者ID:dpressel,项目名称:mead-baseline,代码行数:8,代码来源:transformer_utils.py

示例5: fit

# 需要导入模块: from torch.utils.data import dataset [as 别名]
# 或者: from torch.utils.data.dataset import TensorDataset [as 别名]
def fit(self, X, y=None):
        if issparse(X):
            X = X.todense()
        ds = TensorDataset(torch.from_numpy(X.astype(np.float32)))
        self.autoencoder = StackedDenoisingAutoEncoder(
            self.dimensions, final_activation=self.final_activation
        )
        if self.cuda:
            self.autoencoder.cuda()
        ae.pretrain(
            ds,
            self.autoencoder,
            cuda=self.cuda,
            epochs=self.pretrain_epochs,
            batch_size=self.batch_size,
            optimizer=self.optimiser_pretrain,
            scheduler=self.scheduler,
            corruption=0.2,
            silent=True,
        )
        ae_optimizer = self.optimiser_train(self.autoencoder)
        ae.train(
            ds,
            self.autoencoder,
            cuda=self.cuda,
            epochs=self.finetune_epochs,
            batch_size=self.batch_size,
            optimizer=ae_optimizer,
            scheduler=self.scheduler(ae_optimizer),
            corruption=self.corruption,
            silent=True,
        )
        return self 
开发者ID:vlukiyanov,项目名称:pt-sdae,代码行数:35,代码来源:sklearn_api.py

示例6: _transform

# 需要导入模块: from torch.utils.data import dataset [as 别名]
# 或者: from torch.utils.data.dataset import TensorDataset [as 别名]
def _transform(X, autoencoder, batch_size, cuda):
    ds = TensorDataset(torch.from_numpy(X.astype(np.float32)))
    dataloader = DataLoader(ds, batch_size=batch_size, shuffle=False)
    features = []
    for batch in dataloader:
        batch = batch[0]
        if cuda:
            batch = batch.cuda(non_blocking=True)
        features.append(autoencoder.encoder(batch).detach().cpu())
    return torch.cat(features).numpy() 
开发者ID:vlukiyanov,项目名称:pt-sdae,代码行数:12,代码来源:sklearn_api.py

示例7: load_kuramoto_data_old

# 需要导入模块: from torch.utils.data import dataset [as 别名]
# 或者: from torch.utils.data.dataset import TensorDataset [as 别名]
def load_kuramoto_data_old(batch_size=1, suffix=''):
    feat_train = np.load('data/old_kuramoto/feat_train' + suffix + '.npy')
    edges_train = np.load('data/old_kuramoto/edges_train' + suffix + '.npy')
    feat_valid = np.load('data/old_kuramoto/feat_valid' + suffix + '.npy')
    edges_valid = np.load('data/old_kuramoto/edges_valid' + suffix + '.npy')
    feat_test = np.load('data/old_kuramoto/feat_test' + suffix + '.npy')
    edges_test = np.load('data/old_kuramoto/edges_test' + suffix + '.npy')

    # [num_sims, num_atoms, num_timesteps, num_dims]
    num_atoms = feat_train.shape[1]

    # Reshape to: [num_sims, num_atoms, num_timesteps, num_dims]
    edges_train = np.reshape(edges_train, [-1, num_atoms ** 2])
    edges_valid = np.reshape(edges_valid, [-1, num_atoms ** 2])
    edges_test = np.reshape(edges_test, [-1, num_atoms ** 2])

    feat_train = torch.FloatTensor(feat_train)
    edges_train = torch.LongTensor(edges_train)
    feat_valid = torch.FloatTensor(feat_valid)
    edges_valid = torch.LongTensor(edges_valid)
    feat_test = torch.FloatTensor(feat_test)
    edges_test = torch.LongTensor(edges_test)

    # Exclude self edges
    off_diag_idx = np.ravel_multi_index(
        np.where(np.ones((num_atoms, num_atoms)) - np.eye(num_atoms)),
        [num_atoms, num_atoms])
    edges_train = edges_train[:, off_diag_idx]
    edges_valid = edges_valid[:, off_diag_idx]
    edges_test = edges_test[:, off_diag_idx]

    train_data = TensorDataset(feat_train, edges_train)
    valid_data = TensorDataset(feat_valid, edges_valid)
    test_data = TensorDataset(feat_test, edges_test)

    train_data_loader = DataLoader(train_data, batch_size=batch_size)
    valid_data_loader = DataLoader(valid_data, batch_size=batch_size)
    test_data_loader = DataLoader(test_data, batch_size=batch_size)

    return train_data_loader, valid_data_loader, test_data_loader 
开发者ID:ethanfetaya,项目名称:NRI,代码行数:42,代码来源:utils.py

示例8: make_dataset

# 需要导入模块: from torch.utils.data import dataset [as 别名]
# 或者: from torch.utils.data.dataset import TensorDataset [as 别名]
def make_dataset(samples, predicates, joins, labels, max_num_joins, max_num_predicates):
    """Add zero-padding and wrap as tensor dataset."""

    sample_masks = []
    sample_tensors = []
    for sample in samples:
        sample_tensor = np.vstack(sample)
        num_pad = max_num_joins + 1 - sample_tensor.shape[0]
        sample_mask = np.ones_like(sample_tensor).mean(1, keepdims=True)
        sample_tensor = np.pad(sample_tensor, ((0, num_pad), (0, 0)), 'constant')
        sample_mask = np.pad(sample_mask, ((0, num_pad), (0, 0)), 'constant')
        sample_tensors.append(np.expand_dims(sample_tensor, 0))
        sample_masks.append(np.expand_dims(sample_mask, 0))
    sample_tensors = np.vstack(sample_tensors)
    sample_tensors = torch.FloatTensor(sample_tensors)
    sample_masks = np.vstack(sample_masks)
    sample_masks = torch.FloatTensor(sample_masks)

    predicate_masks = []
    predicate_tensors = []
    for predicate in predicates:
        predicate_tensor = np.vstack(predicate)
        num_pad = max_num_predicates - predicate_tensor.shape[0]
        predicate_mask = np.ones_like(predicate_tensor).mean(1, keepdims=True)
        predicate_tensor = np.pad(predicate_tensor, ((0, num_pad), (0, 0)), 'constant')
        predicate_mask = np.pad(predicate_mask, ((0, num_pad), (0, 0)), 'constant')
        predicate_tensors.append(np.expand_dims(predicate_tensor, 0))
        predicate_masks.append(np.expand_dims(predicate_mask, 0))
    predicate_tensors = np.vstack(predicate_tensors)
    predicate_tensors = torch.FloatTensor(predicate_tensors)
    predicate_masks = np.vstack(predicate_masks)
    predicate_masks = torch.FloatTensor(predicate_masks)

    join_masks = []
    join_tensors = []
    for join in joins:
        join_tensor = np.vstack(join)
        num_pad = max_num_joins - join_tensor.shape[0]
        join_mask = np.ones_like(join_tensor).mean(1, keepdims=True)
        join_tensor = np.pad(join_tensor, ((0, num_pad), (0, 0)), 'constant')
        join_mask = np.pad(join_mask, ((0, num_pad), (0, 0)), 'constant')
        join_tensors.append(np.expand_dims(join_tensor, 0))
        join_masks.append(np.expand_dims(join_mask, 0))
    join_tensors = np.vstack(join_tensors)
    join_tensors = torch.FloatTensor(join_tensors)
    join_masks = np.vstack(join_masks)
    join_masks = torch.FloatTensor(join_masks)

    target_tensor = torch.FloatTensor(labels)

    return dataset.TensorDataset(sample_tensors, predicate_tensors, join_tensors, target_tensor, sample_masks,
                                 predicate_masks, join_masks) 
开发者ID:andreaskipf,项目名称:learnedcardinalities,代码行数:54,代码来源:data.py

示例9: load_kuramoto_data

# 需要导入模块: from torch.utils.data import dataset [as 别名]
# 或者: from torch.utils.data.dataset import TensorDataset [as 别名]
def load_kuramoto_data(batch_size=1, suffix=''):
    feat_train = np.load('data/feat_train' + suffix + '.npy')
    edges_train = np.load('data/edges_train' + suffix + '.npy')
    feat_valid = np.load('data/feat_valid' + suffix + '.npy')
    edges_valid = np.load('data/edges_valid' + suffix + '.npy')
    feat_test = np.load('data/feat_test' + suffix + '.npy')
    edges_test = np.load('data/edges_test' + suffix + '.npy')

    # [num_sims, num_atoms, num_timesteps, num_dims]
    num_atoms = feat_train.shape[1]

    # Normalize each feature dim. individually
    feat_max = feat_train.max(0).max(0).max(0)
    feat_min = feat_train.min(0).min(0).min(0)

    feat_max = np.expand_dims(np.expand_dims(np.expand_dims(feat_max, 0), 0), 0)
    feat_min = np.expand_dims(np.expand_dims(np.expand_dims(feat_min, 0), 0), 0)

    # Normalize to [-1, 1]
    feat_train = (feat_train - feat_min) * 2 / (feat_max - feat_min) - 1
    feat_valid = (feat_valid - feat_min) * 2 / (feat_max - feat_min) - 1
    feat_test = (feat_test - feat_min) * 2 / (feat_max - feat_min) - 1

    # Reshape to: [num_sims, num_atoms, num_timesteps, num_dims]
    edges_train = np.reshape(edges_train, [-1, num_atoms ** 2])
    edges_valid = np.reshape(edges_valid, [-1, num_atoms ** 2])
    edges_test = np.reshape(edges_test, [-1, num_atoms ** 2])

    feat_train = torch.FloatTensor(feat_train)
    edges_train = torch.LongTensor(edges_train)
    feat_valid = torch.FloatTensor(feat_valid)
    edges_valid = torch.LongTensor(edges_valid)
    feat_test = torch.FloatTensor(feat_test)
    edges_test = torch.LongTensor(edges_test)

    # Exclude self edges
    off_diag_idx = np.ravel_multi_index(
        np.where(np.ones((num_atoms, num_atoms)) - np.eye(num_atoms)),
        [num_atoms, num_atoms])
    edges_train = edges_train[:, off_diag_idx]
    edges_valid = edges_valid[:, off_diag_idx]
    edges_test = edges_test[:, off_diag_idx]

    train_data = TensorDataset(feat_train, edges_train)
    valid_data = TensorDataset(feat_valid, edges_valid)
    test_data = TensorDataset(feat_test, edges_test)

    train_data_loader = DataLoader(train_data, batch_size=batch_size)
    valid_data_loader = DataLoader(valid_data, batch_size=batch_size)
    test_data_loader = DataLoader(test_data, batch_size=batch_size)

    return train_data_loader, valid_data_loader, test_data_loader 
开发者ID:ethanfetaya,项目名称:NRI,代码行数:54,代码来源:utils.py

示例10: load_motion_data

# 需要导入模块: from torch.utils.data import dataset [as 别名]
# 或者: from torch.utils.data.dataset import TensorDataset [as 别名]
def load_motion_data(batch_size=1, suffix=''):
    feat_train = np.load('data/motion_train' + suffix + '.npy')
    feat_valid = np.load('data/motion_valid' + suffix + '.npy')
    feat_test = np.load('data/motion_test' + suffix + '.npy')
    adj = np.load('data/motion_adj' + suffix + '.npy')

    # NOTE: Already normalized

    # [num_samples, num_nodes, num_timesteps, num_dims]
    num_nodes = feat_train.shape[1]

    edges_train = np.repeat(np.expand_dims(adj.flatten(), 0),
                            feat_train.shape[0], axis=0)
    edges_valid = np.repeat(np.expand_dims(adj.flatten(), 0),
                            feat_valid.shape[0], axis=0)
    edges_test = np.repeat(np.expand_dims(adj.flatten(), 0),
                           feat_test.shape[0], axis=0)

    feat_train = torch.FloatTensor(feat_train)
    edges_train = torch.LongTensor(np.array(edges_train, dtype=np.int64))
    feat_valid = torch.FloatTensor(feat_valid)
    edges_valid = torch.LongTensor(np.array(edges_valid, dtype=np.int64))
    feat_test = torch.FloatTensor(feat_test)
    edges_test = torch.LongTensor(np.array(edges_test, dtype=np.int64))

    # Exclude self edges
    off_diag_idx = np.ravel_multi_index(
        np.where(np.ones((num_nodes, num_nodes)) - np.eye(num_nodes)),
        [num_nodes, num_nodes])
    edges_train = edges_train[:, off_diag_idx]
    edges_valid = edges_valid[:, off_diag_idx]
    edges_test = edges_test[:, off_diag_idx]

    train_data = TensorDataset(feat_train, edges_train)
    valid_data = TensorDataset(feat_valid, edges_valid)
    test_data = TensorDataset(feat_test, edges_test)

    train_data_loader = DataLoader(train_data, batch_size=batch_size)
    valid_data_loader = DataLoader(valid_data, batch_size=batch_size)
    test_data_loader = DataLoader(test_data, batch_size=batch_size)

    return train_data_loader, valid_data_loader, test_data_loader 
开发者ID:ethanfetaya,项目名称:NRI,代码行数:44,代码来源:utils.py


注:本文中的torch.utils.data.dataset.TensorDataset方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。