當前位置: 首頁>>代碼示例>>Python>>正文


Python pandas.HDFStore方法代碼示例

本文整理匯總了Python中pandas.HDFStore方法的典型用法代碼示例。如果您正苦於以下問題:Python pandas.HDFStore方法的具體用法?Python pandas.HDFStore怎麽用?Python pandas.HDFStore使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pandas的用法示例。


在下文中一共展示了pandas.HDFStore方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: iMain

# 需要導入模塊: import pandas [as 別名]
# 或者: from pandas import HDFStore [as 別名]
def iMain():
    """
    Read an hdf file generated by us to make sure
    we can recover its content and structure.
    Give the name of an hdf5 file as a command-line argument.
    """
    assert sys.argv, __doc__
    sFile = sys.argv[1]
    assert os.path.isfile(sFile)
    oHdfStore = pandas.HDFStore(sFile, mode='r')
    print oHdfStore.groups()
    # bug - no return value
    # oSignals = pandas.read_hdf(oHdfStore, '/servings/signals')
    mSignals = oHdfStore.select('/recipe/servings/mSignals', auto_close=False)    
    print mSignals
    print oHdfStore.get_node('/recipe')._v_attrs.metadata[0]['sUrl'] 
開發者ID:OpenTrading,項目名稱:OpenTrader,代碼行數:18,代碼來源:Omlette.py

示例2: _create_csi_index

# 需要導入模塊: import pandas [as 別名]
# 或者: from pandas import HDFStore [as 別名]
def _create_csi_index(store, key, column_name):
    """Create a CSI index on a column in an HDF5 file.

    The column must have been already specified in the data_columns call to
    to_hdf or it won't be stored correctly in the HDF5 file.

    Parameters
    ----------
    store : :class:`pandas.HDFStore`
        An HDF5 file opened as an instance of a :class:`pandas.HDFStore`
        object.
    key : str
        The key of the DataFrame to use.
    column_name : str
        The column to add a CSI index to.
    """
    key_store = store.get_storer(key)
    use_name = _map_column_name(key_store, column_name)
    column = key_store.table.colinstances[use_name]

    if not column.index.is_csi:
        column.remove_index()
        column.create_csindex() 
開發者ID:kboone,項目名稱:avocado,代碼行數:25,代碼來源:utils.py

示例3: write_models

# 需要導入模塊: import pandas [as 別名]
# 或者: from pandas import HDFStore [as 別名]
def write_models(self, tag=None):
        """Write the models of the light curves to disk.

        The models will be stored in the features directory using the dataset's
        name and the given features tag. Note that for now the models are
        stored as individual tables in the HDF5 file because there doesn't
        appear to be a good way to store fixed length arrays in pandas.

        WARNING: This is not the best way to implement this, and there are
        definitely much better ways. This also isn't thread-safe at all.

        Parameters
        ----------
        tag : str (optional)
            The tag for this version of the features. By default, this will use
            settings['features_tag'].
        """
        models_path = self.get_models_path(tag=tag)

        store = pd.HDFStore(models_path, "a")
        for model_name, model in self.models.items():
            model.to_hdf(store, model_name, mode="a")
        store.close() 
開發者ID:kboone,項目名稱:avocado,代碼行數:25,代碼來源:dataset.py

示例4: _write_pandas_data

# 需要導入模塊: import pandas [as 別名]
# 或者: from pandas import HDFStore [as 別名]
def _write_pandas_data(path: Path, entity_key: EntityKey, data: Union[PandasObj]):
    """Write data in a pandas format to an HDF file.

    This method currently supports :class:`pandas DataFrame` objects, with or
    with or without columns, and :class:`pandas.Series` objects.

    """
    if data.empty:
        # Our data is indexed, sometimes with no other columns. This leaves an
        # empty dataframe that store.put will silently fail to write in table
        # format.
        data = data.reset_index()
        if data.empty:
            raise ValueError("Cannot write an empty dataframe that does not have an index.")
        metadata = {'is_empty': True}
        data_columns = True
    else:
        metadata = {'is_empty': False}
        data_columns = None

    with pd.HDFStore(str(path), complevel=9) as store:
        store.put(entity_key.path, data, format="table", data_columns=data_columns)
        store.get_storer(entity_key.path).attrs.metadata = metadata  # NOTE: must use attrs. write this up 
開發者ID:ihmeuw,項目名稱:vivarium,代碼行數:25,代碼來源:hdf.py

示例5: _store_bg_data

# 需要導入模塊: import pandas [as 別名]
# 或者: from pandas import HDFStore [as 別名]
def _store_bg_data(store, base_name, min_ph_delays_us, best_bg, best_th,
                   BG_data, BG_data_e):
    if not base_name.endswith('/'):
        base_name = base_name + '/'
    store_name = store.filename
    group_name = '/' + base_name[:-1]
    store.create_carray(group_name, 'min_ph_delays_us', obj=min_ph_delays_us,
                        createparents=True)
    for ph_sel, values in BG_data.items():
        store.create_carray(group_name, str(ph_sel), obj=values)
    for ph_sel, values in BG_data_e.items():
        store.create_carray(group_name, str(ph_sel) + '_err', obj=values)
    store.close()
    store = pd.HDFStore(store_name)
    store[base_name + 'best_bg'] = best_bg
    store[base_name + 'best_th'] = best_th
    store.close() 
開發者ID:tritemio,項目名稱:FRETBursts,代碼行數:19,代碼來源:burstlib_ext.py

示例6: _load_bg_data

# 需要導入模塊: import pandas [as 別名]
# 或者: from pandas import HDFStore [as 別名]
def _load_bg_data(store, base_name, ph_streams):
    if not base_name.endswith('/'):
        base_name = base_name + '/'
    store_name = store.filename
    group_name = '/' + base_name[:-1]
    min_ph_delays = store.get_node(group_name, 'min_ph_delays_us')[:]
    BG_data = {}
    for ph_sel in ph_streams:
        BG_data[ph_sel] = store.get_node(group_name, str(ph_sel))[:]
    BG_data_e = {}
    for ph_sel in ph_streams:
        BG_data_e[ph_sel] = store.get_node(group_name, str(ph_sel) + '_err')[:]
    store.close()
    store = pd.HDFStore(store_name)
    best_bg = store[base_name + 'best_bg']
    best_th = store[base_name + 'best_th']
    store.close()
    return best_th, best_bg, BG_data, BG_data_e, min_ph_delays 
開發者ID:tritemio,項目名稱:FRETBursts,代碼行數:20,代碼來源:burstlib_ext.py

示例7: save

# 需要導入模塊: import pandas [as 別名]
# 或者: from pandas import HDFStore [as 別名]
def save(_filename, _dataframe, **options):
    if options.get("dataname"):
        _dataname = options.get("dataname")
    else:
        _dataname = "twint"

    if not options.get("type"):
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            _store = pd.HDFStore(_filename + ".h5")
            _store[_dataname] = _dataframe
            _store.close()
    elif options.get("type") == "Pickle":
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            _dataframe.to_pickle(_filename + ".pkl")
    else:
        print("""Please specify: filename, DataFrame, DataFrame name and type
              (HDF5, default, or Pickle)""") 
開發者ID:twintproject,項目名稱:twint,代碼行數:21,代碼來源:panda.py

示例8: read

# 需要導入模塊: import pandas [as 別名]
# 或者: from pandas import HDFStore [as 別名]
def read(_filename, **options):
    if not options.get("dataname"):
        _dataname = "twint"
    else:
        _dataname = options.get("dataname")

    if not options.get("type"):
        _store = pd.HDFStore(_filename + ".h5")
        _df = _store[_dataname]
        return _df
    elif options.get("type") == "Pickle":
        _df = pd.read_pickle(_filename + ".pkl")
        return _df
    else:
        print("""Please specify: DataFrame, DataFrame name (twint as default),
              filename and type (HDF5, default, or Pickle""") 
開發者ID:twintproject,項目名稱:twint,代碼行數:18,代碼來源:panda.py

示例9: _open_hdf5

# 需要導入模塊: import pandas [as 別名]
# 或者: from pandas import HDFStore [as 別名]
def _open_hdf5(self, file_path):
        """Return the file handle of an HDF5 file as an pd.HDFStore object

        Cache and return the file handle for the HDF5 file at <file_path>

        Args:
            file_path (str): The path of the desired file

        Return:
            The cached file handle
        """

        if (file_path not in self._file_handles or
                not self._file_handles[file_path].is_open):
            self._file_handles[file_path] = pd.HDFStore(file_path, 'r')

        return self._file_handles[file_path] 
開發者ID:LSSTDESC,項目名稱:gcr-catalogs,代碼行數:19,代碼來源:dc2_object.py

示例10: store_dataframes

# 需要導入模塊: import pandas [as 別名]
# 或者: from pandas import HDFStore [as 別名]
def store_dataframes(out_hdf, **kwargs):
    # DataFrames to serialize have to be passed by keyword arguments. An argument matrix1=DataFrame(...)
    # will be written into table 'matrix1' in the HDF file.

    complevel = kwargs.pop('complevel', 9)   # default complevel & complib values if
    complib = kwargs.pop('complib', 'zlib')  # not explicitly asked for as arguments

    if VERBOSE:
        print(now(), 'Storing %d DataFrames in file %s with compression settings %d %s...' % (len(kwargs), out_hdf, complevel, complib))

    store = pd.HDFStore(out_hdf, complevel=complevel, complib=complib)  # TODO: WRITE ONLY? it probably appends now
    for table_name, dataframe in kwargs.items():
        store[table_name] = dataframe
    store.close()

    if VERBOSE:
        print(now(), 'DataFrames stored in file.') 
開發者ID:FRED-2,項目名稱:OptiType,代碼行數:19,代碼來源:hlatyper.py

示例11: test_write_tables

# 需要導入模塊: import pandas [as 別名]
# 或者: from pandas import HDFStore [as 別名]
def test_write_tables(df, store_name):
    orca.add_table('table', df)

    @orca.step()
    def step(table):
        pass

    step_tables = orca.get_step_table_names(['step'])

    orca.write_tables(store_name, step_tables, None)
    with pd.HDFStore(store_name, mode='r') as store:
        assert 'table' in store
        pdt.assert_frame_equal(store['table'], df)

    orca.write_tables(store_name, step_tables, 1969)

    with pd.HDFStore(store_name, mode='r') as store:
        assert '1969/table' in store
        pdt.assert_frame_equal(store['1969/table'], df) 
開發者ID:UDST,項目名稱:orca,代碼行數:21,代碼來源:test_orca.py

示例12: test_run_and_write_tables_out_tables_provided

# 需要導入模塊: import pandas [as 別名]
# 或者: from pandas import HDFStore [as 別名]
def test_run_and_write_tables_out_tables_provided(df, store_name):
    table_names = ['table', 'table2', 'table3']
    for t in table_names:
        orca.add_table(t, df)

    @orca.step()
    def step(iter_var, table, table2):
        return

    orca.run(
        ['step'],
        iter_vars=range(1),
        data_out=store_name,
        out_base_tables=table_names,
        out_run_tables=['table'])

    with pd.HDFStore(store_name, mode='r') as store:

        for t in table_names:
            assert 'base/{}'.format(t) in store

        assert '0/table' in store
        assert '0/table2' not in store
        assert '0/table3' not in store 
開發者ID:UDST,項目名稱:orca,代碼行數:26,代碼來源:test_orca.py

示例13: write

# 需要導入模塊: import pandas [as 別名]
# 或者: from pandas import HDFStore [as 別名]
def write(self, frames):
        """
        Write the frames to the target HDF5 file, using the format used by
        ``pd.Panel.to_hdf``

        Parameters
        ----------
        frames : iter[(int, DataFrame)] or dict[int -> DataFrame]
            An iterable or other mapping of sid to the corresponding OHLCV
            pricing data.
        """
        with HDFStore(self._path, 'w',
                      complevel=self._complevel, complib=self._complib) \
                as store:
            panel = pd.Panel.from_dict(dict(frames))
            panel.to_hdf(store, 'updates')
        with tables.open_file(self._path, mode='r+') as h5file:
            h5file.set_node_attr('/', 'version', 0) 
開發者ID:enigmampc,項目名稱:catalyst,代碼行數:20,代碼來源:minute_bars.py

示例14: calculate_bgnd_from_masked_fulldata

# 需要導入模塊: import pandas [as 別名]
# 或者: from pandas import HDFStore [as 別名]
def calculate_bgnd_from_masked_fulldata(masked_image_file):
    """
    - Opens the masked_image_file hdf5 file, reads the /full_data node and 
      creates a "background" by taking the maximum value of each pixel over time.
    - Parses the file name to find a camera serial number
    - reads the pixel/um ratio from the masked_image_file
    """
    import numpy as np
    from tierpsy.helper.params import read_unit_conversions

    # read attributes of masked_image_file
    _, (microns_per_pixel, xy_units) , is_light_background = read_unit_conversions(masked_image_file)
    # get "background" and px2um
    with pd.HDFStore(masked_image_file, 'r') as fid:
        assert is_light_background, \
        'MultiWell recognition is only available for brightfield at the moment'
        img = np.max(fid.get_node('/full_data'), axis=0)
    
    camera_serial = parse_camera_serial(masked_image_file)
    
    return img, camera_serial, microns_per_pixel 
開發者ID:ver228,項目名稱:tierpsy-tracker,代碼行數:23,代碼來源:helper.py

示例15: ow_plate_summary

# 需要導入模塊: import pandas [as 別名]
# 或者: from pandas import HDFStore [as 別名]
def ow_plate_summary(fname):
    all_feats = read_feat_events(fname)
    
    with pd.HDFStore(fname, 'r') as fid:
        features_timeseries = fid['/features_timeseries']
    for cc in features_timeseries:
        all_feats[cc] = features_timeseries[cc].values
    
    wStats = WormStats()
    exp_feats = wStats.getWormStats(all_feats, np.nanmean)
    
    
    exp_feats = pd.DataFrame(exp_feats)
    
    valid_order = [x for x in exp_feats.columns if x not in wStats.extra_fields]
    exp_feats = exp_feats.loc[:, valid_order]
    
    return [exp_feats]
#%% 
開發者ID:ver228,項目名稱:tierpsy-tracker,代碼行數:21,代碼來源:process_ow.py


注:本文中的pandas.HDFStore方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。