当前位置: 首页>>代码示例>>Python>>正文


Python h5py.File方法代码示例

本文整理汇总了Python中h5py.File方法的典型用法代码示例。如果您正苦于以下问题:Python h5py.File方法的具体用法?Python h5py.File怎么用?Python h5py.File使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在h5py的用法示例。


在下文中一共展示了h5py.File方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: download

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import File [as 别名]
def download(self, sid):
        '''
        ny.data['hcp'].download(sid) downloads all the data understood by neuropythy for the given
        HCP subject id; the data are downloaded from the Amazon S3 into the path given by the 
        'hcp_auto_path' config item then returns a list of the downloaded files.
        '''
        # we can do this in quite a sneaky way: get the subject, get their filemap, force all the
        # paths in the subject to be downloaded using the pseudo-path, return the cache path!
        sub   = self.subjects[sid]
        fmap  = sub.meta_data['file_map']
        ppath = fmap.path
        fls   = []
        logging.info('Downloading HCP subject %s structure data...' % (sid,))
        for fl in six.iterkeys(fmap.data_files):
            logging.info('  * Downloading file %s for subject %s' % (fl, sid))
            try:
                fls.append(ppath.local_path(fl))
            except ValueError as e:
                if len(e.args) != 1 or not e.args[0].startswith('getpath:'): raise
                else: logging.info('    (File %s not found for subject %s)' % (fl, sid))
        logging.info('Subject %s donwnload complete!' % (sid,))
        return fls
# we wrap this in a lambda so that it gets loaded when requested (in case the config changes between
# when this gets run and when the dataset gets requested) 
开发者ID:noahbenson,项目名称:neuropythy,代码行数:26,代码来源:hcp.py

示例2: __init__

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import File [as 别名]
def __init__(self, map_fname=None):
        """
        Args:
            map_fname (Optional[str]): Filename of the map. Defaults
                to :obj:`None`, meaning that the default location
                is used.
        """

        if map_fname is None:
            map_fname = os.path.join(
                data_dir(),
                'leike_ensslin_2019',
                'simple_cube.h5'
            )

        self._data = {}

        with h5py.File(map_fname) as f:
            self._data['mean'] = f['mean'][:]
            self._data['std'] = f['std'][:]

        self._shape = self._data['mean'].shape 
开发者ID:gregreen,项目名称:dustmaps,代码行数:24,代码来源:leike_ensslin_2019.py

示例3: fetch

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import File [as 别名]
def fetch(clobber=False):
    """
    Downloads the 3D dust map of Leike & Ensslin (2019).

    Args:
        clobber (Optional[bool]): If ``True``, any existing file will be
            overwritten, even if it appears to match. If ``False`` (the
            default), ``fetch()`` will attempt to determine if the dataset
            already exists. This determination is not 100\% robust against data
            corruption.
    """
    dest_dir = fname_pattern = os.path.join(data_dir(), 'leike_ensslin_2019')
    fname = os.path.join(dest_dir, 'simple_cube.h5')
    
    # Check if the FITS table already exists
    md5sum = 'f54e01c253453117e3770575bed35078'

    if (not clobber) and fetch_utils.check_md5sum(fname, md5sum):
        print('File appears to exist already. Call `fetch(clobber=True)` '
              'to force overwriting of existing file.')
        return

    # Download from the server
    url = 'https://zenodo.org/record/2577337/files/simple_cube.h5?download=1'
    fetch_utils.download_and_verify(url, md5sum, fname) 
开发者ID:gregreen,项目名称:dustmaps,代码行数:27,代码来源:leike_ensslin_2019.py

示例4: __init__

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import File [as 别名]
def __init__(self, bh_dir=None):
        """
        Args:
            bh_dir (Optional[str]): The directory containing the Burstein &
                Heiles dust map. Defaults to `None`, meaning that the default
                directory is used.
        """
        if bh_dir is None:
            bh_dir = os.path.join(data_dir_default, 'bh')

        f = h5py.File(os.path.join(bh_dir, 'bh.h5'), 'r')
        self._hinorth = f['hinorth'][:]
        self._hisouth = f['hisouth'][:]
        self._rednorth = f['rednorth'][:]
        self._redsouth = f['redsouth'][:]
        f.close() 
开发者ID:gregreen,项目名称:dustmaps,代码行数:18,代码来源:bh.py

示例5: test_NDArrayIter_h5py

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import File [as 别名]
def test_NDArrayIter_h5py():
    if not h5py:
        return

    data, labels = _init_NDArrayIter_data('ndarray')

    try:
        os.remove('ndarraytest.h5')
    except OSError:
        pass
    with h5py.File('ndarraytest.h5') as f:
        f.create_dataset('data', data=data)
        f.create_dataset('label', data=labels)
        
        _test_last_batch_handle(f['data'], f['label'])
        _test_last_batch_handle(f['data'], [])
        _test_last_batch_handle(f['data'])
    try:
        os.remove("ndarraytest.h5")
    except OSError:
        pass 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:23,代码来源:test_io.py

示例6: read_data

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import File [as 别名]
def read_data(data_fname):
  """ Read saved data in HDF5 format.

  Args:
    data_fname: The filename of the file from which to read the data.
  Returns:
    A dictionary whose keys will vary depending on dataset (but should
    always contain the keys 'train_data' and 'valid_data') and whose
    values are numpy arrays.
  """

  try:
    with h5py.File(data_fname, 'r') as hf:
      data_dict = {k: np.array(v) for k, v in hf.items()}
      return data_dict
  except IOError:
    print("Cannot open %s for reading." % data_fname)
    raise 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:20,代码来源:utils.py

示例7: shuffle

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import File [as 别名]
def shuffle(labels, num_epochs=50, path=None, start_time=time.time()):

    order_path = '{path}/order_{num_epochs}.h5' \
                       .format(path=path, num_epochs=num_epochs)
    if path is not None and os.path.isfile(order_path):
        with h5py.File(order_path, 'r') as f:
            order = f['order'][:]
    else:
        order = -np.ones([num_epochs, labels.size(0)], dtype=int)
        for epoch in range(num_epochs):
            order[epoch] = np.random.permutation(labels.size(0))
            print_freq = min([100, (num_epochs-1) // 5 + 1])
            print_me = (epoch == 0 or epoch == num_epochs-1 or (epoch+1) % print_freq == 0)
            if print_me:
                print('{epoch:4d}/{num_epochs:4d} e; '.format(epoch=epoch+1, num_epochs=num_epochs), end='')
                print('generate random order; {time:8.3f} s'.format(time=time.time()-start_time))
        
        if path is not None:
            with h5py.File(order_path, 'w') as f:
                f.create_dataset('order', data=order, compression='gzip', compression_opts=9)
    
    print('random order; {time:8.3f} s'.format(time=time.time()-start_time))
    return torch.from_numpy(order) 
开发者ID:kibok90,项目名称:cvpr2018-hnd,代码行数:25,代码来源:samplers.py

示例8: __init__

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import File [as 别名]
def __init__(self, path, ids, name='default',
                 max_examples=None, is_train=True):
        self._ids = list(ids)
        self.name = name
        self.is_train = is_train

        if max_examples is not None:
            self._ids = self._ids[:max_examples]

        filename = 'data.hdf5'

        file = os.path.join(path, filename)
        log.info("Reading %s ...", file)

        self.data = h5py.File(file, 'r')
        log.info("Reading Done: %s", file) 
开发者ID:clvrai,项目名称:SSGAN-Tensorflow,代码行数:18,代码来源:hdf5_loader.py

示例9: save_hdf5

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import File [as 别名]
def save_hdf5(X, y, path):
    """Save data as a HDF5 file.

    Args:
        X (numpy or scipy sparse matrix): Data matrix
        y (numpy array): Target vector.
        path (str): Path to the HDF5 file to save data.
    """

    with h5py.File(path, 'w') as f:
        is_sparse = 1 if sparse.issparse(X) else 0
        f['issparse'] = is_sparse
        f['target'] = y

        if is_sparse:
            if not sparse.isspmatrix_csr(X):
                X = X.tocsr()

            f['shape'] = np.array(X.shape)
            f['data'] = X.data
            f['indices'] = X.indices
            f['indptr'] = X.indptr
        else:
            f['data'] = X 
开发者ID:jeongyoonlee,项目名称:Kaggler,代码行数:26,代码来源:data_io.py

示例10: __init__

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import File [as 别名]
def __init__(self, ids, name='default',
                 max_examples=None, is_train=True):
        self._ids = list(ids)
        self.name = name
        self.is_train = is_train

        if max_examples is not None:
            self._ids = self._ids[:max_examples]

        filename = 'data.hdf5'

        file = os.path.join(__PATH__, filename)
        log.info("Reading %s ...", file)

        try:
            self.data = h5py.File(file, 'r+')
        except:
            raise IOError('Dataset not found. Please make sure the dataset was downloaded.')
        log.info("Reading Done: %s", file) 
开发者ID:clvrai,项目名称:Generative-Latent-Optimization-Tensorflow,代码行数:21,代码来源:cifar10.py

示例11: __init__

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import File [as 别名]
def __init__(self, n_pop=4, **specs):
        
        FortneyMarleyCahoyMix1.__init__(self, **specs)
        
        # number of category
        self.n_pop = int(n_pop)
        
        # read forecaster parameter file
        downloadsdir = get_downloads_dir()
        filename = 'fitting_parameters.h5'
        parampath = os.path.join(downloadsdir, filename)
        if not os.path.exists(parampath) and os.access(downloadsdir, os.W_OK|os.X_OK):
            fitting_url = 'https://raw.github.com/dsavransky/forecaster/master/fitting_parameters.h5'
            self.vprint("Fetching Forecaster fitting parameters from %s to %s" % (fitting_url, parampath))
            try:
                urlretrieve(fitting_url, parampath)
            except:
                self.vprint("Error: Remote fetch failed. Fetch manually or see install instructions.")

        assert os.path.exists(parampath), 'fitting_parameters.h5 must exist in /.EXOSIMS/downloads'

        h5 = h5py.File(parampath, 'r')
        self.all_hyper = h5['hyper_posterior'][:]
        h5.close() 
开发者ID:dsavransky,项目名称:EXOSIMS,代码行数:26,代码来源:Forecaster.py

示例12: save_h5_data_label_normal

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import File [as 别名]
def save_h5_data_label_normal(h5_filename, data, label, normal, 
		data_dtype='float32', label_dtype='uint8', noral_dtype='float32'):
    h5_fout = h5py.File(h5_filename)
    h5_fout.create_dataset(
            'data', data=data,
            compression='gzip', compression_opts=4,
            dtype=data_dtype)
    h5_fout.create_dataset(
            'normal', data=normal,
            compression='gzip', compression_opts=4,
            dtype=normal_dtype)
    h5_fout.create_dataset(
            'label', data=label,
            compression='gzip', compression_opts=1,
            dtype=label_dtype)
    h5_fout.close()


# Write numpy array data and label to h5_filename 
开发者ID:vinits5,项目名称:pointnet-registration-framework,代码行数:21,代码来源:data_prep_util.py

示例13: load_matlab_file

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import File [as 别名]
def load_matlab_file(path_file, name_field):
    """
    load '.mat' files
    inputs:
        path_file, string containing the file path
        name_field, string containig the field name (default='shape')
    warning:
        '.mat' files should be saved in the '-v7.3' format
    """
    db = h5py.File(path_file, 'r')
    ds = db[name_field]
    try:
        if 'ir' in ds.keys():
            data = np.asarray(ds['data'])
            ir = np.asarray(ds['ir'])
            jc = np.asarray(ds['jc'])
            out = sp.csc_matrix((data, ir, jc)).astype(np.float32)
    except AttributeError:
        # Transpose in case is a dense matrix because of the row- vs column- major ordering between python and matlab
        out = np.asarray(ds).astype(np.float32).T

    db.close()

    return out 
开发者ID:muhanzhang,项目名称:IGMC,代码行数:26,代码来源:preprocessing.py

示例14: write_amplitudes

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import File [as 别名]
def write_amplitudes(t1, t2, filename="t_amplitudes.hdf5"):
    task_list = generate_max_task_list(t2.shape)
    if rank == 0:
        print("writing t amplitudes to file")
        feri = h5py.File(filename, 'w')
        ds_type = t2.dtype
        out_t1  = feri.create_dataset('t1', t1.shape, dtype=ds_type)
        out_t2  = feri.create_dataset('t2', t2.shape, dtype=ds_type)

        task_list = generate_max_task_list(t1.shape)
        for block in task_list:
            which_slice = [slice(*x) for x in block]
            out_t1[tuple(which_slice)] = t1[tuple(which_slice)]
        task_list = generate_max_task_list(t2.shape)
        for block in task_list:
            which_slice = [slice(*x) for x in block]
            out_t2[tuple(which_slice)] = t2[tuple(which_slice)]
        feri.close()
    return 
开发者ID:pyscf,项目名称:pyscf,代码行数:21,代码来源:kccsd_rhf.py

示例15: read_eom_amplitudes

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import File [as 别名]
def read_eom_amplitudes(vec_shape, filename="reom_amplitudes.hdf5", vec=None):
    task_list = generate_max_task_list(vec_shape)
    read_success = False
    return False, None  # TODO: find a way to make the amplitudes are consistent
                        # with the signs of the eris/t-amplitudes when restarting
    print("attempting to read in eom amplitudes from file ", filename)
    if os.path.isfile(filename):
        print("reading eom amplitudes from file. shape=", vec_shape)
        feri = h5py.File(filename, 'r', driver='mpio', comm=MPI.COMM_WORLD)
        saved_v = feri['v']
        if vec is None:
            vec = np.empty(vec_shape,dtype=saved_v.dtype)
        assert(saved_v.shape == vec_shape)
        task_list = generate_max_task_list(vec.shape)
        for block in task_list:
            which_slice = [slice(*x) for x in block]
            vec[tuple(which_slice)] = saved_v[tuple(which_slice)]
        feri.close()
        read_success = True
    if vec is not None and vec_shape[-1] == 1:
        vec = vec.reshape(vec_shape[:-1])
    return read_success, vec 
开发者ID:pyscf,项目名称:pyscf,代码行数:24,代码来源:kccsd_rhf.py


注:本文中的h5py.File方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。