本文整理汇总了Python中opus_core.session_configuration.SessionConfiguration.replace_dataset方法的典型用法代码示例。如果您正苦于以下问题:Python SessionConfiguration.replace_dataset方法的具体用法?Python SessionConfiguration.replace_dataset怎么用?Python SessionConfiguration.replace_dataset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类opus_core.session_configuration.SessionConfiguration
的用法示例。
在下文中一共展示了SessionConfiguration.replace_dataset方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setUp
# 需要导入模块: from opus_core.session_configuration import SessionConfiguration [as 别名]
# 或者: from opus_core.session_configuration.SessionConfiguration import replace_dataset [as 别名]
def setUp(self):
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(table_name='households',
table_data={
'household_id': arange(10)+1,
# 'household_id':array([1, 2, 3, 4, 5, 6, 7, 8]),
# 'income' :array([1, 3, 2, 1, 3, 8, 5, 4]),
# #'category_id' :array([1, 2, 2, 1, 2, 3, 3, 2]),
# 'building_id' :array([1, 2, 4, 3, 3, 2, 4, 2]),
##'large_area_id':array([1, 1, 2, 3, 3, 1, 2, 1]),
#
'grid_id': arange(-1, 9, 1)+1,
'lucky':array([1,0,1, 0,1,1, 1,1,0, 0])
}
)
storage.write_table(table_name='gridcells',
table_data={
#'building_id': array([1, 2, 3, 4]),
#'large_area_id': array([1, 1, 3, 2]),
'grid_id': arange(15)+1,
'filter':array([0,1,1, 1,1,1, 1,1,1, 0,1,0, 1,1,1]),
'weight':array([0.1,9,15, 2,5,1, 6,2.1,.3, 4,3,1, 10,8,7])
}
)
dataset_pool = SessionConfiguration(in_storage=storage).get_dataset_pool()
#create households
self.households = Dataset(in_storage=storage, in_table_name='households', id_name="household_id", dataset_name="household")
# create gridcells
self.gridcells = Dataset(in_storage=storage, in_table_name='gridcells', id_name="grid_id", dataset_name="gridcell")
dataset_pool.replace_dataset('household', self.households)
dataset_pool.replace_dataset('gridcell', self.gridcells)
示例2: run
# 需要导入模块: from opus_core.session_configuration import SessionConfiguration [as 别名]
# 或者: from opus_core.session_configuration.SessionConfiguration import replace_dataset [as 别名]
def run(self, demographic_data_file,
household_dataset,
person_dataset,
year=None,
keep_attributes=None,
keep_attributes_p=None,
fill_value=-1,
demographic_attributes=None,
demographic_attributes_p=None,
dataset_pool=None
):
"""
demographic_data_file: an hdf5 file that contains households and persons data
in pandas DataFrame format.
Run paris/scripts/prepare_demographic_data.py to create
the file.
household_dataset: opus dataset of household
person_dataset: opus dataset of household
year: integer, optional
keep_attributes: list, attributes to keep from household dataset
keep_attributes_p: list, attributes to keep from person dataset
fill_value: fill attributes with fill_value for new households
demographic_attributes: dictionary, attributes to load from external
demographic file. The key of the dictionary is
attribute name for household data, its value is
the expression to compute the attribute value.
See unittest for example.
demographic_attributes_p: Same as demographic_attributes, for persons
dataset_pool: opus DatasetPool object, optional
"""
if dataset_pool is None:
dataset_pool = SessionConfiguration().get_dataset_pool()
if not os.path.isabs(demographic_data_file):
demographic_data_file = os.path.join(dataset_pool.get_storage().get_storage_location(), demographic_data_file)
if year is None:
year = SimulationState().get_current_time()
## this relies on the order of household_id in
## households data and persons attributes summarized
## by household_id is the same
fh = h5py.File(demographic_data_file, 'r')
year_str = str(year)
dmgh_current = fh[year_str]
household_id = household_dataset.get_id_name()[0]
person_id = person_dataset.get_id_name()[0]
hhs_new = compound_array_to_dataset(dmgh_current['household'],
table_name='households',
id_name=household_id,
dataset_name=household_dataset.dataset_name)
ps_new = compound_array_to_dataset(dmgh_current['person'],
table_name='persons',
id_name=person_id,
dataset_name=person_dataset.dataset_name)
dataset_pool.replace_dataset(household_dataset.dataset_name, hhs_new)
dataset_pool.replace_dataset(person_dataset.dataset_name, ps_new)
hh_ids = hhs_new[household_id]
n_hhs = hh_ids.size
results = {}
results[household_id] = hh_ids
for k, v in demographic_attributes.iteritems():
results[k] = hhs_new.compute_variables(v)
logger.log_status( ('Loaded demographic characteristics {0} for {1} ' +\
'households from external file {2}.').format(
demographic_attributes.keys(), n_hhs,
demographic_data_file) )
p_ids = ps_new[person_id]
n_ps = p_ids.size
results_p = {}
results_p[person_id] = p_ids
for k, v in demographic_attributes_p.iteritems():
results_p[k] = ps_new.compute_variables(v)
logger.log_status( ('Loaded demographic characteristics {0} for {1} ' +\
'persons from external file {2}.').format(
demographic_attributes_p.keys(), n_ps,
demographic_data_file) )
is_existing = in1d(hh_ids, household_dataset[household_id])
for attr in keep_attributes:
dtype = household_dataset[attr].dtype
values = fill_value * ones(n_hhs, dtype=dtype)
values[is_existing] = household_dataset.get_attribute_by_id(attr,
hh_ids[is_existing])
results[attr] = values
is_existing = in1d(p_ids, person_dataset[person_id])
for attr in keep_attributes_p:
if attr in person_dataset.get_known_attribute_names():
dtype = person_dataset[attr].dtype
if dtype.type is string_:
values = empty(n_ps, dtype=dtype)
else:
#.........这里部分代码省略.........
示例3: run
# 需要导入模块: from opus_core.session_configuration import SessionConfiguration [as 别名]
# 或者: from opus_core.session_configuration.SessionConfiguration import replace_dataset [as 别名]
def run(self, demographic_data_file,
household_dataset,
year=None,
keep_attributes=None,
fill_value=-1,
demographic_attributes=None,
dataset_pool=None
):
"""
demographic_data_file: an hdf5 file that contains households and persons data
in pandas DataFrame format.
Run paris/scripts/prepare_demographic_data.py to create
the file.
household_dataset: opus dataset of household
year: integer, optional
keep_attributes: list, attributes to keep from household dataset
fill_value: fill attributes with fill_value for new households
demographic_attributes: dictionary, attributes to load from external
demographic file. The key of the dictionary is
attribute name for household data, its value is
the expression to compute the attribute value.
See unittest for example.
dataset_pool: opus DatasetPool object, optional
"""
if dataset_pool is None:
dataset_pool = SessionConfiguration().get_dataset_pool()
hh_ds = household_dataset
if year is None:
year = SimulationState().get_current_time()
## this relies on the order of household_id in
## households data and persons attributes summarized
## by household_id is the same
fh = h5py.File(demographic_data_file, 'r')
year_str = str(year)
dmgh_current = fh[year_str]
#hh_dmgh = fh['household']
#ps_dmgh = fh['person']
#hh_dmgh_current = hh_dmgh[hh_dmgh[:,'year'] == year]
#ps_dmgh_current = ps_dmgh[ps_dmgh[:,'year'] == year]
hhs_new = compound_array_to_dataset(dmgh_current['household'],
table_name='households',
id_name=hh_ds.get_id_name(),
dataset_name=hh_ds.dataset_name)
ps = compound_array_to_dataset(dmgh_current['person'],
table_name='persons',
id_name='person_id',
dataset_name='person')
dataset_pool.replace_dataset(hh_ds.dataset_name, hhs_new)
dataset_pool.replace_dataset('person', ps)
hh_ids = hhs_new['household_id']
n_hhs = hh_ids.size
results = {}
results['household_id'] = hh_ids
for k, v in demographic_attributes.iteritems():
results[k] = hhs_new.compute_variables(v)
logger.log_status( ('Loaded demographic characteristics {} for {} ' +\
'households from external file {}.').format(
demographic_attributes.keys(), n_hhs,
demographic_data_file) )
is_existing = in1d(hh_ids, hh_ds['household_id'])
for attr in keep_attributes:
dtype = hh_ds[attr].dtype
values = fill_value * ones(n_hhs, dtype=dtype)
values[is_existing] = hh_ds.get_attribute_by_id(attr,
hh_ids[is_existing])
results[attr] = values
storage = StorageFactory().get_storage('dict_storage')
table_name = 'households'
storage.write_table(table_name=table_name,
table_data=results)
new_hh_ds = Dataset(in_storage=storage,
in_table_name=table_name,
id_name=household_dataset.get_id_name(),
dataset_name=household_dataset.dataset_name)
household_dataset = new_hh_ds
if dataset_pool is not None:
dataset_pool.replace_dataset(household_dataset.dataset_name,
household_dataset)
return household_dataset