当前位置: 首页>>代码示例>>Python>>正文


Python pandas.HDFStore类代码示例

本文整理汇总了Python中pandas.HDFStore的典型用法代码示例。如果您正苦于以下问题:Python HDFStore类的具体用法?Python HDFStore怎么用?Python HDFStore使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了HDFStore类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build_actualisation_groups

    def build_actualisation_groups(self, filename = None):
        '''
        Builds actualisation groups
        '''
        if filename is None:
            data_dir = CONF.get('paths', 'data_dir')
            fname = "actualisation_groups.h5"
            filename = os.path.join(data_dir, fname)

        store = HDFStore(filename)
        df = store['vars']
        coeff_list = sorted(unique(df['coeff'].dropna()))

        vars = dict()
        for coeff in coeff_list:
            vars[coeff] = list(df[ df['coeff']==coeff ]['var'])

        self.actualisation_vars = vars
        self.coeffs_df = store['names']
        self.coeffs_df['coeff'] = self.coeffs_df['coeff'].str.replace(' ','') # remove spaces



        yr = 1*self.survey_year
        self.coeffs_df['value'] = 1
        while yr < self.datesim_year:
            if yr in self.coeffs_df.columns:
                factor = self.coeffs_df[yr]
            else:
                factor =    1
            self.coeffs_df['value'] = self.coeffs_df['value']*factor
            yr += 1

        self.coeffs_df.set_index(['coeff'], inplace = True)
        store.close()
开发者ID:Iliato,项目名称:openfisca-qt,代码行数:35,代码来源:Inflation.py

示例2: SAVE_ChangeDictOrder

def SAVE_ChangeDictOrder(_processedEvents):
    '''Change the nesting order for the final HDF database - insted of correct/attention, it will go attention/present/correct etc'''


    h_path = "/Users/ryszardcetnarski/Desktop/Nencki/TD/HDF/"
    #Replace the '_EVENTS' because the path n HDF must match exactly, otherwise it was not savivng anything, weirdo
    all_event_names = sorted([name.replace('_EVENTS', '') for name in events_names if bef_aft_dict[bef_aft_switch + '_mat'] in name])

    store = HDFStore(h_path +bef_aft_dict[bef_aft_switch+ '_hdf'])

    for _data, recording in zip(_processedEvents, all_event_names):
        print('I')
        sname = recording.rfind("/") +1
        subId = recording[sname:-4].replace("-", "_")

        store[subId + '/events/attention/correct'] = _data['correct']['attention'].convert_objects()
        store[subId + '/events/motor/correct'] = _data['correct']['motor'].convert_objects()

        store[subId + '/events/attention/incorrect'] = _data['incorrect']['attention'].convert_objects()
        store[subId + '/events/motor/incorrect'] = _data['incorrect']['motor'].convert_objects()

        #print(_data['incorrect']['motor'].convert_objects())



    store.close()
开发者ID:ryscet,项目名称:TopDown,代码行数:26,代码来源:OpenData.py

示例3: save_temp

def save_temp(dataframe, name = None, year = None, config_files_directory = default_config_files_directory):
    """
    Save a temporary table

    Parameters
    ----------
    dataframe : pandas DataFrame
                the dataframe to save
    name : string, default None

    year : integer, default None
           year of the data
    """
    if year is None:
        raise Exception("year is needed")
    if name is None:
        raise Exception("name is needed")
    hdf_file_path = get_tmp_file_path(config_files_directory = config_files_directory)
    store = HDFStore(hdf_file_path)
    log.info("{}".format(store))
    store_path = "{}/{}".format(year, name)

    if store_path in store.keys():
        del store["{}/{}".format(year, name)]

    dataframe.to_hdf(hdf_file_path, store_path)

    store.close()
    return True
开发者ID:LouisePaulDelvaux,项目名称:openfisca-france-data,代码行数:29,代码来源:__init__.py

示例4: func_wrapper

 def func_wrapper(*args, **kwargs):
     temporary_store = HDFStore(file_path)
     try:
         return func(*args, temporary_store = temporary_store, **kwargs)
     finally:
         gc.collect()
         temporary_store.close()
开发者ID:openfisca,项目名称:openfisca-survey-manager,代码行数:7,代码来源:temporary.py

示例5: test

def test():

    directory = os.path.dirname(__file__)
    fname = os.path.join(directory, H5_FILENAME)
    store = HDFStore(fname)
    print store
    print store.keys()
开发者ID:Iliato,项目名称:openfisca-qt,代码行数:7,代码来源:age_structure.py

示例6: main

def main():
    # the loaded data is a DataFrame
    genedata = load_gene_dataset()
    
    # randomly split the dataset to three folds
    # this code should be improved in the future
    kfold = 3.0
    data_kfold = {}
    train, fold1 = train_test_split(genedata, test_size=1/kfold)
    data_kfold['fold1'] = fold1
    fold3, fold2 = train_test_split(train, test_size=0.5)
    data_kfold['fold2'] = fold2
    data_kfold['fold3'] = fold3
    
    # now we want to train a network for each fold
    # store the results in h5 file
    geneStore = HDFStore('predGeneExp1.h5')
    for i, key in enumerate(data_kfold):
        print(key)
        test_data = data_kfold[key]
        X_val, y_val = get_input_output(test_data)
        keys = data_kfold.keys()
        keys.remove(key)
        training_data = pd.concat([data_kfold[keys[0]],data_kfold[keys[1]]])
        X_train, y_train = get_input_output(training_data)
        print(keys)
        # use the these data to train the network
        main_training(key, X_train, y_train, X_val, y_val, geneStore)
   
    # the h5 must be closed after using
    geneStore.close()
开发者ID:MingjunZhong,项目名称:bioinformatics-splicing,代码行数:31,代码来源:read_gene_data.py

示例7: AddSeqComp

def AddSeqComp(mypath):
    """ Loads TestLogAll.h5 from the specified path, then calls 
    MeasurementGroupTools.AddSeqComp to recalculate seq components using FFT  

    Input:  Directory of the measurment campaign, e.g.: "aLabView2"
    Output: Results1.h5, Results1.pdf in the data subdirs.
    """
    from pandas import HDFStore, ExcelWriter
    import MeasurementGroupTools as mgt

    h5logs = HDFStore(mypath + "\\" + 'TestLogsAll.h5')
    TestLog = h5logs['TestLogsAll']

    dirs = TestLog[u'DirName'].unique()
    for dname in dirs:
        mysubdirpath = mypath + "\\" + dname
        print "Processing: " + dname
        mgt.AddSeqComp(mysubdirpath, TestLog, dname)

    h5logs.put('TestLogsAll',TestLog)
    h5logs.close()

    writer = ExcelWriter(mypath + "\\" + 'TestLogsAll.xlsx')
    TestLog.to_excel(writer,'TestLogsAll') # the second argument defines sheet name
    writer.save()

    return
开发者ID:jbebic,项目名称:pv-inverters-islanding-tests,代码行数:27,代码来源:MeasurementCampaignTools.py

示例8: show_temp

def show_temp(config_files_directory = default_config_files_directory):

    hdf_file_path = get_tmp_file_path(config_files_directory = config_files_directory)
    store = HDFStore(hdf_file_path)

    log.info("{}".format(store))
    store.close()
开发者ID:LouisePaulDelvaux,项目名称:openfisca-france-data,代码行数:7,代码来源:__init__.py

示例9: build_from_openfisca

def build_from_openfisca( directory = None):

    df_age_final = None
    for yr in range(2006,2010):
        simulation = SurveySimulation()
        simulation.set_config(year = yr)
        simulation.set_param()
        simulation.set_survey()


        df_age = get_age_structure(simulation)
        df_age[yr] = df_age['wprm']
        del df_age['wprm']
        if df_age_final is None:
            df_age_final = df_age
        else:
            df_age_final = df_age_final.merge(df_age)

    if directory is None:
        directory = os.path.dirname(__file__)

    fname = os.path.join(directory, H5_FILENAME)
    store = HDFStore(fname)
    print df_age_final.dtypes
    store.put("openfisca", df_age_final)
    store.close()
开发者ID:Iliato,项目名称:openfisca-qt,代码行数:26,代码来源:age_structure.py

示例10: download

def download():
    """ Convenience method that downloads all the weather data required
    for the machine learning examples.
    """
    reader = GSODDataReader()
    year_list = range(2001, 2012)
    austin = reader.collect_data(year_list, exact_station=True,
        station_name='AUSTIN CAMP MABRY', state='TX', country='US')
    houston = reader.collect_data(year_list, exact_station=True,
        station_name='HOUSTON/D.W. HOOKS', state='TX', country='US')
    new_york = reader.collect_data(year_list, exact_station=True,
        station_name='NEW YORK/LA GUARDIA', state='NY', country='US')
    newark = reader.collect_data(year_list, exact_station=True,
        station_name='NEWARK INTL AIRPORT', state='NJ', country='US')
    punta_arenas = reader.collect_data(year_list, exact_station=True,
        station_name='PUNTA ARENAS', country='CH')
    wellington = reader.collect_data(year_list, exact_station=True,
        station_name='WELLINGTON AIRPORT', country='NZ')
    store = HDFStore('weather.h5')
    store['austin'] = austin
    store['houston'] = houston
    store['nyc'] = new_york
    store['newark'] = newark
    store['punta_arenas'] = punta_arenas
    store['wellington'] = wellington
    store.close()
开发者ID:JuergenNeubauer,项目名称:pygotham,代码行数:26,代码来源:ml_data.py

示例11: _get

 def _get(self, path):
     s = HDFStore(self.path)
     d = None
     if path in s:
         d = s[path]
     s.close()
     return d
开发者ID:cfriedline,项目名称:gypsy_moth,代码行数:7,代码来源:hdfstorehelper.py

示例12: convert_fiducial

def convert_fiducial(filename, output_type="csv"):
    '''
    Converts the fiducial comparison HDF5 files into a CSV file.

    Parameters
    ----------
    filename : str
        HDF5 file.
    output_type : str, optional
           Type of file to output.
    '''

    store = HDFStore(filename)
    data_columns = dict()
    for key in store.keys():
        data = store[key].sort(axis=1)
        mean_data = data.mean(axis=1)
        data_columns[key[1:]] = mean_data
    store.close()

    df = DataFrame(data_columns)

    output_name = "".join(filename.split(".")[:-1]) + "." + output_type

    df.to_csv(output_name)
开发者ID:keflavich,项目名称:TurbuStat,代码行数:25,代码来源:convert_results.py

示例13: in_store

 def in_store(self, path):
     s = HDFStore(self.path)
     val = False
     if path in s:
         val = True
     s.close()
     return val
开发者ID:cfriedline,项目名称:gypsy_moth,代码行数:7,代码来源:hdfstorehelper.py

示例14: test_chunk

def test_chunk():
    print "debut"
    writer = None
    years = range(2011,2012)
    filename = destination_dir+'output3.h5'
    store = HDFStore(filename)
    for year in years:
        yr = str(year)
#        fname = "Agg_%s.%s" %(str(yr), "xls")
        simu = SurveySimulation()
        simu.set_config(year = yr)
        simu.set_param()
        import time

        tps = {}
        for nb_chunk in range(1,5):
            deb_chunk = time.clock()
            simu.set_config(survey_filename='C:\\Til\\output\\to_run_leg.h5', num_table=3, chunks_count=nb_chunk ,
                            print_missing=False)
            simu.compute()
            tps[nb_chunk] = time.clock() - deb_chunk

            voir = simu.output_table.table3['foy']
            print len(voir)
            pdb.set_trace()
            agg3 = Aggregates()
            agg3.set_simulation(simu)
            agg3.compute()
            df1 = agg3.aggr_frame
            print df1.to_string()

    print tps
    store.close()
开发者ID:Iliato,项目名称:openfisca-qt,代码行数:33,代码来源:chunk.py

示例15: put

 def put(self, path, obj):
     s = HDFStore(self.path)
     if path in s:
         print "updating %s" % path
         s.remove(path)
     s[path] = obj
     s.close()
开发者ID:lindsaymiles,项目名称:whitebark_pine,代码行数:7,代码来源:hdfstorehelper.py


注:本文中的pandas.HDFStore类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。