当前位置: 首页>>代码示例>>Python>>正文


Python data.shape方法代码示例

本文整理汇总了Python中torch.utils.data.shape方法的典型用法代码示例。如果您正苦于以下问题:Python data.shape方法的具体用法?Python data.shape怎么用?Python data.shape使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.utils.data的用法示例。


在下文中一共展示了data.shape方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: data_transfrom

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import shape [as 别名]
def data_transfrom(self,data,other):
        data=data.astype(np.float32)
        if self.train:
            shape=np.fromstring(other[0],np.uint16)
            data=data.reshape(shape)
            # Random crop
            _, w, h = data.shape
            x1 = np.random.randint(0, w - 224)
            y1 = np.random.randint(0, h - 224)
            data=data[:,x1:x1+224 ,y1:y1 + 224]
            # HorizontalFlip
            #TODO horizontal flip
        else:
            data = data.reshape([3, 224, 224])
        data = (data - mean) / std
        tensor = torch.Tensor(data)
        del data
        return tensor 
开发者ID:hahnyuan,项目名称:nn_tools,代码行数:20,代码来源:imagenet.py

示例2: _read_from_lmdb

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import shape [as 别名]
def _read_from_lmdb(self):
        self.cur.next()
        if not self.cur.key():
            self.cur.first()
        dataset = pb2.Dataset().FromString(self.cur.value())
        for datum in dataset.datums:
            data = np.fromstring(datum.data, np.uint8)
            try:
                data = self.data_transfrom(data, datum.other)
            except:
                print 'cannot trans ', data.shape
                continue
            target = int(datum.target)
            target = self.target_transfrom(target)
            self.data.put(data)
            self.target.put(target)
            # print 'read_from_lmdb', time.time()-r
        del dataset

    # def read_from_lmdb(self):
    #     process=multiprocessing.Process(target=self._read_from_lmdb)
    #     process.start() 
开发者ID:hahnyuan,项目名称:nn_tools,代码行数:24,代码来源:imagenet.py

示例3: train

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import shape [as 别名]
def train(model, loader, epoch):
    scheduler.step()
    model.train()
    torch.set_grad_enabled(True)
    correct = 0
    dataset_size = 0
    for batch_idx, (data, target) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
        output, _ = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t{}'.format(
            epoch, batch_idx * args.batch_size, len(loader.dataset),
            100. * batch_idx * args.batch_size / len(loader.dataset), loss.item(), args.tag))
            logger.add_scalar('train_loss', loss.cpu().item(),
                    batch_idx + epoch * len(loader))
    return float(correct)/float(dataset_size) 
开发者ID:lianghongzhuo,项目名称:PointNetGPD,代码行数:27,代码来源:main_1v.py

示例4: test

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import shape [as 别名]
def test(model, loader):
    model.eval()
    torch.set_grad_enabled(False)
    test_loss = 0
    correct = 0
    dataset_size = 0
    da = {}
    db = {}
    res = []
    for data, target, obj_name in loader:
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        output, _ = model(data) # N*C
        test_loss += F.nll_loss(output, target, size_average=False).cpu().item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        for i, j, k in zip(obj_name, pred.data.cpu().numpy(), target.data.cpu().numpy()):
            res.append((i, j[0], k))

    test_loss /= len(loader.dataset)
    acc = float(correct)/float(dataset_size)
    return acc, test_loss 
开发者ID:lianghongzhuo,项目名称:PointNetGPD,代码行数:26,代码来源:main_1v.py

示例5: train

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import shape [as 别名]
def train(model, loader, epoch):
    scheduler.step()
    model.train()
    torch.set_grad_enabled(True)
    correct = 0
    dataset_size = 0
    for batch_idx, (data, target) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t{}'.format(
            epoch, batch_idx * len(data), len(loader.dataset),
            100. * batch_idx * len(data) / len(loader.dataset), loss.item(), args.tag))
            logger.add_scalar('train_loss', loss.cpu().item(),
                    batch_idx + epoch * len(loader))
    return float(correct)/float(dataset_size) 
开发者ID:lianghongzhuo,项目名称:PointNetGPD,代码行数:27,代码来源:main_1v_gpd.py

示例6: test

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import shape [as 别名]
def test(model, loader):
    model.eval()
    torch.set_grad_enabled(False)
    test_loss = 0
    correct = 0
    dataset_size = 0
    da = {}
    db = {}
    res = []
    for batch_idx, (data, target, obj_name) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        output = model(data) # N*C
        test_loss += F.nll_loss(output, target, size_average=False).cpu().item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        for i, j, k in zip(obj_name, pred.data.cpu().numpy(), target.data.cpu().numpy()):
            res.append((i, j[0], k))

    test_loss /= len(loader.dataset)
    acc = float(correct)/float(dataset_size)
    return acc, test_loss 
开发者ID:lianghongzhuo,项目名称:PointNetGPD,代码行数:26,代码来源:main_fullv_gpd.py

示例7: disparity_loader

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import shape [as 别名]
def disparity_loader(path):
    path_prefix = path.split('.')[0]
    # print(path_prefix)
    path1 = path_prefix + '_exception_assign_minus_1.npy'
    path2 = path_prefix + '.npy'
    path3 = path_prefix + '.pfm'
    import os.path as ospath
    if ospath.exists(path1):
        return np.load(path1)
    else:

        # from readpfm import readPFMreadPFM
        from readpfm import readPFM
        data, _ = readPFM(path3)
        np.save(path2, data)
        for i in range(data.shape[0]):
            for j in range(data.shape[1]):
                if j - data[i][j] < 0:
                    data[i][j] = -1
        np.save(path1, data)
        return data 
开发者ID:meteorshowers,项目名称:StereoNet-ActiveStereoNet,代码行数:23,代码来源:SecenFlowLoader.py

示例8: disparity_loader

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import shape [as 别名]
def disparity_loader(path):
    path_prefix = path.split('.')[0]
    path1 = path_prefix + '_exception_assign_minus_1.npy'
    path2 = path_prefix + '.npy'
    path3 = path_prefix + '.pfm'
    import os.path as ospath
    if ospath.exists(path1):
        return np.load(path1)
    else:
        if ospath.exists(path2):
            data = np.load(path2)
        else:
            from readpfm import readPFM
            data, _ = readPFM(path3)
            np.save(path2, data)
        for i in range(data.shape[0]):
            for j in range(data.shape[1]):
                if j - data[i][j] < 0:
                    data[i][j] = -1
        np.save(path1, data)
        return data 
开发者ID:meteorshowers,项目名称:StereoNet-ActiveStereoNet,代码行数:23,代码来源:SceneFlowLoader_demo.py

示例9: __init__

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import shape [as 别名]
def __init__(self, root, training=True):
        self.root = root
        self.training = training
        if self.training:
            self.filenames = train_files
        else:
            self.filenames = test_files
        for fn in self.filenames:
            fp = os.path.join(self.root, 'scenenn_seg_' + fn + '.hdf5')
            print(fp)
            with h5py.File(fp, 'r') as f:
                data = np.array(f['data'])
                label = np.array(f['label'])
                if not hasattr(self, 'data'):
                    self.data = data
                    self.label = label
                    self.num_points = data.shape[1]
                    self.num_channels = data.shape[2]
                elif data.shape[0] > 0:
                    self.data = np.concatenate((self.data, data))
                    self.label = np.concatenate((self.label, label)) 
开发者ID:hkust-vgd,项目名称:pointwise,代码行数:23,代码来源:datasets.py

示例10: train_discriminator

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import shape [as 别名]
def train_discriminator(optimizer, real_data, fake_data, discriminator, criterion):
    optimizer.zero_grad()
    
    # 1.1 Train on Real Data
    prediction_real = discriminator(real_data)
    y_real = Variable(torch.ones(prediction_real.shape[0], 1))
    if torch.cuda.is_available(): 
        D_real_loss = criterion(prediction_real, y_real.cuda())
    else: 
        D_real_loss = criterion(prediction_real, y_real)

    # 1.2 Train on Fake Data
    prediction_fake = discriminator(fake_data)
    y_fake = Variable(torch.zeros(prediction_fake.shape[0], 1))
    if torch.cuda.is_available(): 
        D_fake_loss = criterion(prediction_fake, y_fake.cuda())
    else: 
        D_fake_loss = criterion(prediction_fake, y_fake)
    
    D_loss = D_real_loss + D_fake_loss
    D_loss.backward()
    optimizer.step()
    
    # Return error
    return D_real_loss + D_fake_loss, prediction_real, prediction_fake, discriminator 
开发者ID:aspuru-guzik-group,项目名称:selfies,代码行数:27,代码来源:GAN.py

示例11: train_discriminator

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import shape [as 别名]
def train_discriminator(optimizer, real_data, fake_data, discriminator, criterion):
    optimizer.zero_grad()
    
    # 1.1 Train on Real Data
    prediction_real = discriminator(real_data)
    y_real = Variable(torch.ones(prediction_real.shape[0], 1))
    if torch.cuda.is_available(): 
        D_real_loss = criterion(prediction_real, y_real.cuda())
    else: 
        D_real_loss = criterion(prediction_real, y_real)

    # 1.2 Train on Fake Data
    prediction_fake = discriminator(fake_data)
    y_fake = Variable(torch.zeros(prediction_fake.shape[0], 1))
    if torch.cuda.is_available(): 
        D_fake_loss = criterion(prediction_fake, y_fake.cuda())
    else: 
        D_fake_loss = criterion(prediction_fake, y_fake)
    
    D_loss = D_real_loss + D_fake_loss
    D_loss.backward()
    optimizer.step()
    
    return D_real_loss + D_fake_loss, prediction_real, prediction_fake, discriminator 
开发者ID:aspuru-guzik-group,项目名称:selfies,代码行数:26,代码来源:GAN.py

示例12: test

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import shape [as 别名]
def test(epoch):  # testing data
    model.eval()
    start_time = time.time()
    with torch.no_grad():
        for iloader, xtrain, ytrain in loadtest:
            iloader=iloader.item()
            listofpred0 = []
            cnt,aveloss=0,0
            for ind in range(0, xtrain.shape[-1] - sampleSize, sampleSize):
                output = model(xtrain[:, :,ind:ind + sampleSize].to(device))
                loss = criterion(output, (ytrain[:, ind:ind + sampleSize].to(device)))
                cnt += 1
                aveloss += float(loss)
                _,output = torch.max(output,1)
                listofpred0.append(output.reshape(-1))
            aveloss /= cnt
            print('loss for test:{},num{},epoch{}'.format(aveloss, iloader,epoch))
            ans0 = quan_mu_law_decode(np.concatenate(listofpred0))
            if not os.path.exists('vsCorpus/'): os.makedirs('vsCorpus/')
            sf.write(savemusic.format(iloader), ans0, sample_rate)
            print('test stored done', np.round(time.time() - start_time)) 
开发者ID:ShichengChen,项目名称:WaveUNet,代码行数:23,代码来源:trainclassify.py

示例13: find_bounds_clr

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import shape [as 别名]
def find_bounds_clr(model, loader, optimizer, criterion, device, dtype, min_lr=8e-6, max_lr=8e-5, step_size=2000,
                    mode='triangular', save_path='.'):
    model.train()
    correct1, correct5 = 0, 0
    scheduler = CyclicLR(optimizer, base_lr=min_lr, max_lr=max_lr, step_size_up=step_size, mode=mode)
    epoch_count = step_size // len(loader)  # Assuming step_size is multiple of batch per epoch
    accuracy = []
    for _ in trange(epoch_count):
        for batch_idx, (data, target) in enumerate(tqdm(loader)):
            if scheduler is not None:
                scheduler.step()
            data, target = data.to(device=device, dtype=dtype), target.to(device=device)

            optimizer.zero_grad()
            output = model(data)

            loss = criterion(output, target)
            loss.backward()
            optimizer.step()

            corr = correct(output, target)
            accuracy.append(corr[0] / data.shape[0])

    lrs = np.linspace(min_lr, max_lr, step_size)
    plt.plot(lrs, accuracy)
    plt.show()
    plt.savefig(os.path.join(save_path, 'find_bounds_clr.pdf'))
    np.save(os.path.join(save_path, 'acc.npy'), accuracy)
    return 
开发者ID:Randl,项目名称:MobileNetV3-pytorch,代码行数:31,代码来源:run.py

示例14: maskData

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import shape [as 别名]
def maskData(self, data):
        """

        Args:
            data:

        Returns:

        """

        msk = nib.load(self.mask)
        mskD = msk.get_data()
        if not np.all(np.bitwise_or(mskD == 0, mskD == 1)):
            raise ValueError("Mask has incorrect values.")
        # nVox = np.sum(mskD.flatten())
        if data.shape[0:3] != mskD.shape:
            raise ValueError((data.shape, mskD.shape))

        msk_f = mskD.flatten()
        msk_idx = np.where(msk_f == 1)[0]

        if len(data.shape) == 3:
            data_masked = data.flatten()[msk_idx]

        if len(data.shape) == 4:
            data = np.transpose(data, (3, 0, 1, 2))
            data_masked = np.zeros((data.shape[0], int(mskD.sum())))
            for i, x in enumerate(data):
                data_masked[i] = x.flatten()[msk_idx]

        img = data_masked

        return np.array(img) 
开发者ID:rdevon,项目名称:cortex,代码行数:35,代码来源:nii_dataload.py

示例15: find_bounds_clr

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import shape [as 别名]
def find_bounds_clr(model, loader, optimizer, criterion, device, dtype, min_lr=8e-6, max_lr=8e-5, step_size=2000,
                    mode='triangular', save_path='.'):
    model.train()
    correct1, correct5 = 0, 0
    scheduler = CyclicLR(optimizer, base_lr=min_lr, max_lr=max_lr, step_size=step_size, mode=mode)
    epoch_count = step_size // len(loader)  # Assuming step_size is multiple of batch per epoch
    accuracy = []
    for _ in trange(epoch_count):
        for batch_idx, (data, target) in enumerate(tqdm(loader)):
            if scheduler is not None:
                scheduler.batch_step()
            data, target = data.to(device=device, dtype=dtype), target.to(device=device)

            optimizer.zero_grad()
            output = model(data)

            loss = criterion(output, target)
            loss.backward()
            optimizer.step()

            corr = correct(output, target)
            accuracy.append(corr[0] / data.shape[0])

    lrs = np.linspace(min_lr, max_lr, step_size)
    plt.plot(lrs, accuracy)
    plt.show()
    plt.savefig(os.path.join(save_path, 'find_bounds_clr.png'))
    np.save(os.path.join(save_path, 'acc.npy'), accuracy)
    return 
开发者ID:Randl,项目名称:MobileNetV2-pytorch,代码行数:31,代码来源:run.py


注:本文中的torch.utils.data.shape方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。