当前位置: 首页>>代码示例>>Python>>正文


Python hdf5.H5PYDataset方法代码示例

本文整理汇总了Python中fuel.datasets.hdf5.H5PYDataset方法的典型用法代码示例。如果您正苦于以下问题:Python hdf5.H5PYDataset方法的具体用法?Python hdf5.H5PYDataset怎么用?Python hdf5.H5PYDataset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在fuel.datasets.hdf5的用法示例。


在下文中一共展示了hdf5.H5PYDataset方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: load_imgs

# 需要导入模块: from fuel.datasets import hdf5 [as 别名]
# 或者: from fuel.datasets.hdf5 import H5PYDataset [as 别名]
def load_imgs(ntrain=None, ntest=None, batch_size=128, data_file=None):
    t = time()
    print('LOADING DATASET...')
    path = os.path.join(data_file)
    tr_data = H5PYDataset(path, which_sets=('train',))
    te_data = H5PYDataset(path, which_sets=('test',))

    if ntrain is None:
        ntrain = tr_data.num_examples
    else:
        ntrain = min(ntrain, tr_data.num_examples)

    if ntest is None:
        ntest = te_data.num_examples
    else:
        ntest = min(ntest, te_data.num_examples)
    print('name = %s, ntrain = %d, ntest = %d' % (data_file, ntrain, ntest))

    tr_scheme = ShuffledScheme(examples=ntrain, batch_size=batch_size)
    tr_stream = DataStream(tr_data, iteration_scheme=tr_scheme)

    te_scheme = ShuffledScheme(examples=ntest, batch_size=batch_size)
    te_stream = DataStream(te_data, iteration_scheme=te_scheme)
    print('%.2f secs to load data' % (time() - t))
    return tr_data, te_data, tr_stream, te_stream, ntrain, ntest 
开发者ID:junyanz,项目名称:iGAN,代码行数:27,代码来源:load.py

示例2: load_imgs_seq

# 需要导入模块: from fuel.datasets import hdf5 [as 别名]
# 或者: from fuel.datasets.hdf5 import H5PYDataset [as 别名]
def load_imgs_seq(ntrain=None, ntest=None, batch_size=128, data_file=None):
    t = time()
    print('LOADING DATASET...')
    path = os.path.join(data_file)
    tr_data = H5PYDataset(path, which_sets=('train',))
    te_data = H5PYDataset(path, which_sets=('test',))

    if ntrain is None:
        ntrain = tr_data.num_examples
    if ntest is None:
        ntest = te_data.num_examples

    tr_scheme = SequentialScheme(examples=ntrain, batch_size=batch_size)
    tr_stream = DataStream(tr_data, iteration_scheme=tr_scheme)

    te_scheme = SequentialScheme(examples=ntest, batch_size=batch_size)
    te_stream = DataStream(te_data, iteration_scheme=te_scheme)

    print('name = %s, ntrain = %d, ntest = %d' % (data_file, ntrain, ntest))
    print('%.2f seconds to load data' % (time() - t))

    return tr_data, te_data, tr_stream, te_stream, ntrain, ntest 
开发者ID:junyanz,项目名称:iGAN,代码行数:24,代码来源:load.py

示例3: load_imgs_raw

# 需要导入模块: from fuel.datasets import hdf5 [as 别名]
# 或者: from fuel.datasets.hdf5 import H5PYDataset [as 别名]
def load_imgs_raw(ntrain=None, ntest=None, data_file=None):
    t = time()
    print('LOADING DATASET...')
    path = os.path.join(data_file)
    tr_data = H5PYDataset(path, which_sets=('train',))
    te_data = H5PYDataset(path, which_sets=('test',))

    if ntrain is None:
        ntrain = tr_data.num_examples
    if ntest is None:
        ntest = te_data.num_examples

    print('name = %s, ntrain = %d, ntest = %d' % (data_file, ntrain, ntest))
    print('%.2f seconds to load data' % (time() - t))

    return tr_data, te_data, ntrain, ntest 
开发者ID:junyanz,项目名称:iGAN,代码行数:18,代码来源:load.py

示例4: faces

# 需要导入模块: from fuel.datasets import hdf5 [as 别名]
# 或者: from fuel.datasets.hdf5 import H5PYDataset [as 别名]
def faces(ntrain=None, nval=None, ntest=None, batch_size=128):
    path = os.path.join(data_dir, 'faces_364293_128px.hdf5')
    tr_data = H5PYDataset(path, which_sets=('train',))
    te_data = H5PYDataset(path, which_sets=('test',))

    if ntrain is None:
        ntrain = tr_data.num_examples
    if ntest is None:
        ntest = te_data.num_examples
    if nval is None:
        nval = te_data.num_examples

    tr_scheme = ShuffledScheme(examples=ntrain, batch_size=batch_size)
    tr_stream = DataStream(tr_data, iteration_scheme=tr_scheme)

    te_scheme = SequentialScheme(examples=ntest, batch_size=batch_size)
    te_stream = DataStream(te_data, iteration_scheme=te_scheme)

    val_scheme = SequentialScheme(examples=nval, batch_size=batch_size)
    val_stream = DataStream(tr_data, iteration_scheme=val_scheme)
    return tr_data, te_data, tr_stream, val_stream, te_stream 
开发者ID:Newmu,项目名称:dcgan_code,代码行数:23,代码来源:load.py

示例5: __init__

# 需要导入模块: from fuel.datasets import hdf5 [as 别名]
# 或者: from fuel.datasets.hdf5 import H5PYDataset [as 别名]
def __init__(self, path, which_set='train', load_size=None, crop_size=None, dtype=numpy.float32):
        from fuel.datasets.hdf5 import H5PYDataset

        self._dtype = dtype
        self._load_size = load_size
        self._crop_size = crop_size
        self._data_set = H5PYDataset(path, which_sets=(which_set,)) 
开发者ID:wuhuikai,项目名称:GP-GAN,代码行数:9,代码来源:dataset.py

示例6: __init__

# 需要导入模块: from fuel.datasets import hdf5 [as 别名]
# 或者: from fuel.datasets.hdf5 import H5PYDataset [as 别名]
def __init__(self, h5filename, ntrain=None, ntest=None, batch_size=1, folds=None):
        if folds is None:
            te_sets = ('test',)
            tr_sets = ('train',)
        else:
            notest = (folds[0] == folds[1]);
            te_sets = () if notest else ('fold_{}'.format(folds[1]),)
            tr_sets = tuple(['fold_{}'.format(i) for i in range(folds[0]) if i != folds[1]])
        self.batch_size = batch_size
        self.tr_data = H5PYDataset(h5filename, which_sets=tr_sets)
        self.te_data = None if notest else H5PYDataset(h5filename, which_sets=te_sets)
        self.ntrain = ntrain or self.tr_data.num_examples
        self.ntest = ntest or self.te_data.num_examples if self.te_data else 0 
开发者ID:woshialex,项目名称:diagnose-heart,代码行数:15,代码来源:utils.py

示例7: install_and_load

# 需要导入模块: from fuel.datasets import hdf5 [as 别名]
# 或者: from fuel.datasets.hdf5 import H5PYDataset [as 别名]
def install_and_load(self):
        path = os.path.join(
            os.path.dirname(__file__), 'WSJ0', 'wsj0-danet.hdf5')
        self.h5file = h5py.File(path, 'r')
        train_set = H5PYDataset(
            self.h5file, which_sets=('train',))
        valid_set = H5PYDataset(
            self.h5file, which_sets=('valid',))
        test_set = H5PYDataset(
            self.h5file, which_sets=('test',))
        self.subset = dict(
            train=train_set, valid=valid_set, test=test_set)
        self.is_loaded = True 
开发者ID:khaotik,项目名称:DaNet-Tensorflow,代码行数:15,代码来源:wsj0.py

示例8: get_dataset_iterator

# 需要导入模块: from fuel.datasets import hdf5 [as 别名]
# 或者: from fuel.datasets.hdf5 import H5PYDataset [as 别名]
def get_dataset_iterator(dataset, split, include_features=True, include_targets=False, unit_scale=True, label_transforms=False, return_length=False):
    """Get iterator for dataset, split, targets (labels) and scaling (from 255 to 1.0)"""
    sources = []
    sources = sources + ['features'] if include_features else sources
    sources = sources + ['targets'] if include_targets else sources
    if split == "all":
        splits = ('train', 'valid', 'test')
    elif split == "nontrain":
        splits = ('valid', 'test')
    else:
        splits = (split,)

    dataset_fname = find_in_data_path("{}.hdf5".format(dataset))
    h5_dataset = H5PYDataset(dataset_fname, which_sets=splits,
                             sources=sources)
    if unit_scale:
        h5_dataset.default_transformers = uint8_pixels_to_floatX(('features',))

    datastream = DataStream.default_stream(
        dataset=h5_dataset,
        iteration_scheme=SequentialExampleScheme(h5_dataset.num_examples))

    if label_transforms:
        # TODO: maybe refactor this common bit with get_custom_streams below
        datastream = AddLabelUncertainty(datastream,
                                         chance=0,
                                         which_sources=('targets',))

        datastream = RandomLabelStrip(datastream,
                                         chance=0,
                                         which_sources=('targets',))

        # HACK: allow variable stretch
        datastream = StretchLabels(datastream,
                                         length=128,
                                         which_sources=('targets',))


    it = datastream.get_epoch_iterator()
    if return_length:
        return it, h5_dataset.num_examples
    else:
        return it

# get images from dataset. numanchors=None to get all. image_size only needed for color conversion 
开发者ID:dribnet,项目名称:plat,代码行数:47,代码来源:fuel_helper.py


注:本文中的fuel.datasets.hdf5.H5PYDataset方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。