本文整理汇总了Python中opus_core.store.attribute_cache.AttributeCache.load_table方法的典型用法代码示例。如果您正苦于以下问题:Python AttributeCache.load_table方法的具体用法?Python AttributeCache.load_table怎么用?Python AttributeCache.load_table使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类opus_core.store.attribute_cache.AttributeCache
的用法示例。
在下文中一共展示了AttributeCache.load_table方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test
# 需要导入模块: from opus_core.store.attribute_cache import AttributeCache [as 别名]
# 或者: from opus_core.store.attribute_cache.AttributeCache import load_table [as 别名]
def test(self):
opus_core_path = OpusPackage().get_opus_core_path()
dbf_directory = os.path.join(
opus_core_path, 'tests', 'data', 'dbf')
table_name = 'test_logical'
cache_directory = self._temp_dir
year = 1000
exporter = ExportDbfTableToCacheCommand(
dbf_directory = dbf_directory,
table_name = table_name,
cache_directory = cache_directory,
year = year,
)
exporter.execute()
attribute_cache = AttributeCache(cache_directory=cache_directory)
old_time = SimulationState().get_current_time()
SimulationState().set_current_time(year)
values = attribute_cache.load_table(table_name)
self.assertEqual(set(['keyid', 'works']), set(values.keys()))
self.assert_(ma.allequal(array([1,2,3,4,5]), values['keyid']))
self.assert_(ma.allequal(array([1,1,-1,0,0]), values['works']))
SimulationState().set_current_time(old_time)
示例2: MergeCache
# 需要导入模块: from opus_core.store.attribute_cache import AttributeCache [as 别名]
# 或者: from opus_core.store.attribute_cache.AttributeCache import load_table [as 别名]
class MergeCache(Model):
"""Merge multiple years of one cache directory into a single one that can be used
for example for a warm start."""
def __init__(self, directory):
self.storage = AttributeCache(directory)
def run(self, year, cleanup_settings={}):
SimulationState().set_current_time(year)
tables = self.storage.get_table_names()
# cleanup
for table in tables:
tabdata = self.storage.load_table(table)
if table in cleanup_settings.keys():
for attr in cleanup_settings[table]:
if attr in tabdata.keys():
logger.log_status('Deleting attribute %s in %s.' % (attr, table))
del tabdata[attr]
self.storage.write_table(table, tabdata)
logger.log_status('Deleting all computed tables.')
self.storage.delete_computed_tables()
logger.log_status('Cache directory merged into %s' % year)
示例3: DataStructureModel
# 需要导入模块: from opus_core.store.attribute_cache import AttributeCache [as 别名]
# 或者: from opus_core.store.attribute_cache.AttributeCache import load_table [as 别名]
class DataStructureModel(Model):
"""
Checks the structure of datasets in a given cache (or run cache) when compared to a reference cache.
It writes out all columns that are missing as well as those that are not present in the reference cache.
It can also compare the sizes of the datasets.
"""
def __init__(self, reference_location=None):
"""
"reference_location" is the directory of the reference cache and should include the year.
If it is None, the simulation directory in its start year is taken.
"""
if reference_location is None:
reference_location = os.path.join(SimulationState().get_cache_directory(), "%s" % SimulationState().get_start_time())
self.reference_storage = flt_storage(reference_location)
def run(self, directory=None, check_size=True):
"""
"directory" is the cache to be compared to the reference. It should not include the year
as the model checks all years.
Set "check_sizes" to False if no size check of the datasets is required.
"""
if directory is None:
directory = SimulationState().get_cache_directory()
self.cache = AttributeCache(directory)
year_orig = SimulationState().get_current_time()
years = self.years_in_cache()
SimulationState().set_current_time(years[0])
storages = {}
for year in years:
storages[year] = flt_storage(os.path.join(self.cache.get_storage_location(), '%s' % year))
df = pd.DataFrame(columns=["Table", "Less-than-ref", "More-than-ref", "Year", "Size", "Size-ref"])
tables = self.cache.get_table_names()
for table in tables:
columns_list = self.cache.get_column_names(table)
columns = Set(columns_list)
ref_columns_list = self.reference_storage.get_column_names(table, lowercase=True)
ref_columns = Set(ref_columns_list)
more = columns.difference(ref_columns)
less = ref_columns.difference(columns)
samesize = True
if check_size:
table_size = self.cache.load_table(table, columns_list[0])[columns_list[0]].size
reftable_size = self.reference_storage.load_table(table, ref_columns_list[0])[ref_columns_list[0]].size
if table_size <> reftable_size:
samesize = False
if len(more) == 0 and len(less) == 0 and samesize:
continue
df.loc[df.shape[0]] = [table, ', '.join(less), ', '.join(more), '', 0, 0]
if len(more) == 0 and samesize:
continue
# if there are columns in the "more" column, write out the corresponding years
columns_and_years = self.cache._get_column_names_and_years(table)
more_years = []
for col, year in columns_and_years:
if col in more:
more_years.append(year)
df.loc[df.shape[0]-1, "Year"] = ', '.join(np.unique(np.array(more_years).astype("str")))
if not samesize: # there is difference in table sizes
df.loc[df.shape[0]-1, "Size"] = table_size
df.loc[df.shape[0]-1, "Size-ref"] = reftable_size
if not check_size or (df['Size'].sum()==0 and df['Size-ref'].sum()==0):
# remove the size columns if not used
del df['Size']
del df['Size-ref']
if df.shape[0] > 0:
logger.log_status("Differences in data structure relative to %s:" % self.reference_storage.get_storage_location())
logger.log_status(df)
else:
logger.log_status("Data structure corresponds to the one in %s" % self.reference_storage.get_storage_location())
return df
def years_in_cache(self):
return self.cache._get_sorted_list_of_years(start_with_current_year=False)