本文整理汇总了Python中opus_core.session_configuration.SessionConfiguration类的典型用法代码示例。如果您正苦于以下问题:Python SessionConfiguration类的具体用法?Python SessionConfiguration怎么用?Python SessionConfiguration使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SessionConfiguration类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
def run(self, config, year, *args, **kwargs):
"""This is the main entry point. It gets the appropriate configuration info from the
travel_model_configuration part of this config, and then copies the specified
UrbanSim data into files for travel mdel to read.
"""
cache_directory = config['cache_directory']
simulation_state = SimulationState()
simulation_state.set_cache_directory(cache_directory)
simulation_state.set_current_time(year)
attribute_cache = AttributeCache()
dataset_pool = SessionConfiguration(new_instance=True,
package_order=config['dataset_pool_configuration'].package_order,
in_storage=attribute_cache).get_dataset_pool()
#cache_storage = AttributeCache().get_flt_storage_for_year(year_for_base_year_cache)
#datasets = DatasetFactory().create_datasets_from_flt(config.get('datasets_to_preload',{}),
#"urbansim",
#additional_arguments={'in_storage': attribute_cache})
zone_set = dataset_pool.get_dataset('travel_zone')
self.prepare_for_run(config['travel_model_configuration'], year)
self.create_travel_model_input_file(config=config,
year=year,
zone_set=zone_set,
datasets=dataset_pool,
*args, **kwargs)
示例2: run
def run(self, year=None, years_to_run=[]):
if year not in years_to_run:
return
logger.log_status("Finding buildings with over-assigned households...")
dataset_pool = SessionConfiguration().get_dataset_pool()
building = dataset_pool.get_dataset('building')
assigned_households = building.compute_variables('_assigned_hh = building.number_of_agents(household)')
building_type_ids = building.get_attribute('building_type_id')
overassigned = building.compute_variables('_overassigned = _assigned_hh > building.residential_units')
idx_overassigned = where(overassigned)[0]
num_overassigned = len(idx_overassigned)
logger.log_status("Found %d overassigned buildings" % num_overassigned)
new_res_units = building.get_attribute('_assigned_hh')[idx_overassigned]
building.modify_attribute('residential_units', new_res_units, idx_overassigned)
# make all over-assigned buildings of type 3
idx_sf_overassigned = where(logical_and(overassigned, logical_or(building_type_ids == 1,
building_type_ids == 2)))[0]
new_building_ids = ones(idx_sf_overassigned.size, dtype="i4")*3
building.modify_attribute('building_type_id', new_building_ids, idx_sf_overassigned)
# recalculate overassignment to see how we did
overassigned = building.compute_variables('_overassigned = _assigned_hh > building.residential_units')
idx_overassigned = where(overassigned)[0]
num_overassigned = len(idx_overassigned)
logger.log_status("%d overassigned remain" % num_overassigned)
示例3: __init__
def __init__(self, config):
ss = SimulationState(new_instance=True)
ss.set_current_time(config['base_year'])
ss.set_cache_directory(config['cache_directory'])
SessionConfiguration(new_instance=True,
package_order=config['dataset_pool_configuration'].package_order,
in_storage=AttributeCache())
#if not os.path.exists(config['cache_directory']): ## if cache exists, it will automatically skip
cacher = CreateBaseyearCache()
cache_dir = cacher.run(config)
if 'estimation_database_configuration' in config:
db_server = DatabaseServer(config['estimation_database_configuration'])
db = db_server.get_database(config['estimation_database_configuration'].database_name)
out_storage = StorageFactory().get_storage(
'sql_storage',
storage_location = db)
else:
output_cache = os.path.join(config['cache_directory'], str(config['base_year']+1))
out_storage = StorageFactory().get_storage('flt_storage', storage_location=output_cache)
dataset_pool = SessionConfiguration().get_dataset_pool()
households = dataset_pool.get_dataset("household")
buildings = dataset_pool.get_dataset("building")
zones = dataset_pool.get_dataset("zone")
zone_ids = zones.get_id_attribute()
capacity_attribute_name = "residential_units" #_of_use_id_%s" % id
capacity_variable_name = "%s=sanfrancisco.zone.aggregate_%s_from_building" % \
(capacity_attribute_name, capacity_attribute_name)
buildings.compute_variables("sanfrancisco.building.zone_id", dataset_pool=dataset_pool)
zones.compute_variables(capacity_variable_name, dataset_pool=dataset_pool)
building_zone_id = buildings.get_attribute('zone_id')
# is_household_unplace = datasets['household'].get_attribute("building_id") <= 0
is_household_unplaced = 1 #all households are unplaced
household_building_id = zeros(households.size(), dtype='int32')-1 #datasets['household'].get_attribute("building_id")
for zone_id in zone_ids:
capacity = zones.get_attribute_by_id(capacity_attribute_name, zone_id)
is_household_in_this_zone = (households.get_attribute('zone_id') == zone_id)
is_unplaced_household_in_this_zone = is_household_in_this_zone * is_household_unplaced
is_building_in_this_zone = (building_zone_id == zone_id)
# if not is_household_in_this_zone.sum() <= capacity:
if capacity == 0 or is_household_in_this_zone.sum()==0:
print "WARNING: zone %s has %s households but only %s units" % (zone_id, is_household_in_this_zone.sum(), capacity)
continue
prob = buildings.get_attribute(capacity_attribute_name) * is_building_in_this_zone / array(capacity, dtype=float64)
r = random(sum(is_unplaced_household_in_this_zone))
prob_cumsum = ncumsum(prob)
index_to_bldg = searchsorted(prob_cumsum, r)
household_building_id[where(is_unplaced_household_in_this_zone)] = buildings.get_attribute_by_index('building_id', index_to_bldg)
# import pdb;pdb.set_trace()
households.set_values_of_one_attribute('building_id', household_building_id)
households.write_dataset(out_table_name='households', out_storage=out_storage)
示例4: _compute_variable_for_prior_year
def _compute_variable_for_prior_year(self, dataset, full_name, time, resources=None):
"""Create a new dataset for this variable, compute the variable, and then return
the values for this variable."""
calling_dataset_pool = SessionConfiguration().get_dataset_pool()
calling_time = SimulationState().get_current_time()
SimulationState().set_current_time(time)
# Do not flush any variables when computing dependencies for a lag variable.
prior_flush_state = SimulationState().get_flush_datasets()
SimulationState().set_flush_datasets(False)
try:
# Get an empty dataset pool with same search paths.
my_dataset_pool = DatasetPool(
package_order=calling_dataset_pool.get_package_order(), storage=AttributeCache()
)
try:
ds = dataset.empty_dataset_like_me(in_storage=AttributeCache())
except FileNotFoundError:
## necessary when a dataset is not cached, but created on-the-fly, e.g submarket
ds = my_dataset_pool.get_dataset(dataset.dataset_name)
# Don't pass any datasets via resources, since they may be from a different time.
my_resources = Resources(resources)
for key in my_resources:
if isinstance(key, Dataset):
del my_resources[key]
ds.compute_variables(full_name, my_dataset_pool, resources=my_resources)
values = ds.get_attribute(full_name)
return values
finally:
SimulationState().set_current_time(calling_time)
SimulationState().set_flush_datasets(prior_flush_state)
示例5: __init__
def __init__(self,
resources=None,
in_storage=None,
out_storage=None,
in_table_name=None,
out_table_name=None,
attributes=None,
id_name=None,
nchunks=None,
debuglevel=0
):
try:
debug = SessionConfiguration().get('debuglevel', 0)
except:
debug = 0
debug = DebugPrinter(debug)
if debuglevel > debug.flag:
debug.flag = debuglevel
debug.print_debug("Creating object %s.%s" % (self.__class__.__module__, self.__class__.__name__), 2)
resources = ResourceFactory().get_resources_for_dataset(
self.dataset_name,
resources = resources,
in_storage = in_storage,
in_table_name_pair = (in_table_name,self.in_table_name_default),
attributes_pair = (attributes,self.attributes_default),
out_storage = out_storage,
out_table_name_pair = (out_table_name, self.out_table_name_default),
id_name_pair = (id_name, self.id_name_default),
nchunks_pair = (nchunks,self.nchunks_default),
debug_pair = (debug,None),
)
CoreDataset.__init__(self,resources = resources)
示例6: test_create_tripgen_travel_model_input_files
def test_create_tripgen_travel_model_input_files(self):
in_storage = StorageFactory().get_storage(
'sql_storage',
storage_location = self.database)
sc = SessionConfiguration(new_instance=True,
package_order = ['urbansim', 'psrc'],
in_storage=in_storage)
dataset_pool = sc.get_dataset_pool()
TravelModelInputFileWriter().run(self.tempdir_path, 2000, dataset_pool)
logger.log_status('tazdata path: ', self.tempdir_path)
# expected values - data format: {zone:{column_value:value}}
expected_tazdata = {1: [[1,1], [1,2]],
2: [[2,2]],
3: [],
4: [[2,2]]
}
# get real data from file
real_tazdata = {1:[],2:[], 3:[], 4:[]}
# income groups 1 to 4
for i in [1,2,3,4]:
tazdata_file = open(os.path.join(self.tempdir_path, 'tripgen', 'inputtg', 'tazdata.mf9%s' % i), 'r')
for a_line in tazdata_file.readlines():
if a_line[0].isspace():
numbers = a_line.split()
zone_id = int(numbers[0])
job_zone_id = int(numbers[1])
real_tazdata[i].append([zone_id, job_zone_id])
for group in expected_tazdata.keys():
self.assertEqual(real_tazdata[group], expected_tazdata[group],
"income group %d, columns did not match up."%group)
示例7: _do_flush_dependent_variables_if_required
def _do_flush_dependent_variables_if_required(self):
try:
if not SessionConfiguration().get('flush_variables', False):
return
except:
return
from opus_core.datasets.interaction_dataset import InteractionDataset
dataset = self.get_dataset()
dependencies = self.get_current_dependencies()
my_dataset_name = dataset.get_dataset_name()
for iattr in range(len(dependencies)): # iterate over dependent variables
dep_item = dependencies[iattr][0]
if isinstance(dep_item, str):
depvar_name = VariableName(dep_item)
else:
depvar_name = dep_item.get_variable_name() # dep_item should be an instance of AttributeBox
dataset_name = depvar_name.get_dataset_name()
if dataset_name == my_dataset_name:
ds = dataset
else:
ds = SessionConfiguration().get_dataset_from_pool(dataset_name)
#ds = dataset_pool.get_dataset('dataset_name')
if not isinstance(ds, InteractionDataset):
short_name = depvar_name.get_alias()
if short_name not in ds.get_id_name():
ds.flush_attribute(depvar_name)
示例8: __init__
def __init__(self, name_of_dataset_to_merge, in_table_name, attribute_cache, years_to_merge, *args, **kwargs):
"""Create a dataset that contains this many years of data from this dataset.
Years are from current year backwards, inclusive.
"""
self.name_of_dataset_to_merge = name_of_dataset_to_merge
self.years_to_merge = years_to_merge
self._validate_primary_attributes_same_for_all_years(name_of_dataset_to_merge, in_table_name, attribute_cache, years_to_merge)
# Add 'year' to id_names.
dataset_for_current_year = SessionConfiguration().get_dataset_from_pool(
self.name_of_dataset_to_merge)
id_names = dataset_for_current_year.get_id_name() + ['year']
self.base_id_name = dataset_for_current_year.get_id_name()
# Masquerade as a dataset of the right type (important for computing the right variables).
dataset_name = dataset_for_current_year.get_dataset_name()
AbstractDataset.__init__(self,
id_name=id_names,
in_table_name=in_table_name,
dataset_name=dataset_name,
*args, **kwargs)
coord_system = dataset_for_current_year.get_coordinate_system()
if coord_system is not None:
self._coordinate_system = coord_system
示例9: _compute_variable_for_prior_year
def _compute_variable_for_prior_year(self, dataset, full_name, time, resources=None):
"""Create a new dataset for this variable, compute the variable, and then return
the values for this variable."""
calling_dataset_pool = SessionConfiguration().get_dataset_pool()
calling_time = SimulationState().get_current_time()
SimulationState().set_current_time(time)
try:
# Get an empty dataset pool with same search paths.
my_dataset_pool = DatasetPool(
package_order=calling_dataset_pool.get_package_order(),
storage=AttributeCache())
ds = dataset.empty_dataset_like_me(in_storage=AttributeCache())
# Don't pass any datasets via resources, since they may be from a different time.
my_resources = Resources(resources)
for key in my_resources:
if isinstance(key, Dataset):
del my_resources[key]
ds.compute_variables(full_name, my_dataset_pool, resources=my_resources)
values = ds.get_attribute(full_name)
return values
finally:
SimulationState().set_current_time(calling_time)
示例10: __init__
def __init__(self, config):
if 'estimation_database_configuration' in config:
db_server = DatabaseServer(config['estimation_database_configuration'])
db = db_server.get_database(config['estimation_database_configuration'].database_name)
out_storage = StorageFactory().build_storage_for_dataset(
type='sql_storage', storage_location=db)
else:
out_storage = StorageFactory().get_storage(type='flt_storage',
storage_location=os.path.join(config['cache_directory'], str(config['base_year']+1)))
simulation_state = SimulationState()
simulation_state.set_cache_directory(config['cache_directory'])
simulation_state.set_current_time(config['base_year'])
attribute_cache = AttributeCache()
SessionConfiguration(new_instance=True,
package_order=config['dataset_pool_configuration'].package_order,
in_storage=attribute_cache)
if not os.path.exists(os.path.join(config['cache_directory'], str(config['base_year']))):
#raise RuntimeError, "datasets uncached; run prepare_estimation_data.py first"
CacheScenarioDatabase().run(config, unroll_gridcells=False)
for dataset_name in config['datasets_to_preload']:
SessionConfiguration().get_dataset_from_pool(dataset_name)
households = SessionConfiguration().get_dataset_from_pool("household")
household_ids = households.get_id_attribute()
workers = households.get_attribute("workers")
hh_ids = []
member_ids = []
is_worker = []
job_ids = []
for i in range(households.size()):
if workers[i] > 0:
hh_ids += [household_ids[i]] * workers[i]
member_ids += range(1, workers[i]+1)
is_worker += [1] * workers[i]
job_ids += [-1] * workers[i]
in_storage = StorageFactory().get_storage('dict_storage')
persons_table_name = 'persons'
in_storage.write_table(
table_name=persons_table_name,
table_data={
'person_id':arange(len(hh_ids))+1,
'household_id':array(hh_ids),
'member_id':array(member_ids),
'is_worker':array(is_worker),
'job_id':array(job_ids),
},
)
persons = PersonDataset(in_storage=in_storage, in_table_name=persons_table_name)
persons.write_dataset(out_storage=out_storage, out_table_name=persons_table_name)
示例11: run
def run(self):
"""Keeps household building type id attribute consistent with residential_building_type_id.
"""
dataset_pool = SessionConfiguration().get_dataset_pool()
household_set = dataset_pool.get_dataset("household")
household_set.delete_one_attribute("county")
county = household_set.compute_variables(
"_county = household.disaggregate(parcel.county_id, intermediates=[building])"
)
household_set.add_primary_attribute(name="county", data=county)
示例12: setup_environment
def setup_environment(cache_directory, year, package_order, additional_datasets={}):
gc.collect()
ss = SimulationState(new_instance=True)
ss.set_cache_directory(cache_directory)
ss.set_current_time(year)
ac = AttributeCache()
storage = ac.get_flt_storage_for_year(year)
sc = SessionConfiguration(new_instance=True,
package_order=package_order,
in_storage=ac)
logger.log_status("Setup environment for year %s. Use cache directory %s." % (year, storage.get_storage_location()))
dp = sc.get_dataset_pool()
for name, ds in additional_datasets.iteritems():
dp.replace_dataset(name, ds)
return dp
示例13: test_create_tripgen_travel_model_input_file
def test_create_tripgen_travel_model_input_file(self):
in_storage = StorageFactory().get_storage(
'sql_storage',
storage_location = self.database)
sc = SessionConfiguration(new_instance=True,
package_order = ['urbansim', 'psrc'],
in_storage=in_storage)
dataset_pool = sc.get_dataset_pool()
#zone_set = dataset_pool.get_dataset('zone')
#hh_set = dataset_pool.get_dataset('household')
#job_set = dataset_pool.get_dataset('job')
#taz_col_set = dataset_pool.get_dataset('constant_taz_column')
TravelModelInputFileWriter().run(self.tempdir_path, 2000, dataset_pool)
logger.log_status('tazdata path: ', self.tempdir_path)
# expected values - data format: {zone:{column_value:value}}
expected_tazdata = {1:{101: 19.9,
102: 2., 103: 0., 104:1., 105:0.,
106: 3., 107:11., 109:1.,
110:0., 111:0., 112:0., 113:0., 114:0.,
115:0., 116:0., 117:0., 118:0., 119:0.,
120:2., 121:42., 122:0., 123:0., 124:11.},
2:{101: 29.9,
102: 0., 103: 2., 104:1., 105:3.,
106: 1., 107:3., 109:0.,
110:0., 111:0., 112:0., 113:3., 114:0.,
115:0., 116:0., 117:0., 118:1., 119:1.,
120:0., 121:241., 122:0., 123:0., 124:3.}}
# get real data from file
real_tazdata = {1:{},2:{}}
tazdata_file = open(os.path.join(self.tempdir_path, 'tripgen', 'inputtg', 'tazdata.ma2'), 'r')
for a_line in tazdata_file.readlines():
if a_line[0].isspace():
numbers = a_line.replace(':', ' ').split() # data line format: 1 101: 15.5
zone_id = int(numbers[0])
column_var = int(numbers[1])
value = float(numbers[2])
if value != -1:
real_tazdata[zone_id][column_var] = value
for zone in expected_tazdata.keys():
for col_var in expected_tazdata[zone].keys():
self.assertAlmostEqual(real_tazdata[zone][col_var], expected_tazdata[zone][col_var], 3,\
"zone %d, column variable %d did not match up."%(zone, col_var))
示例14: _get_attribute_for_year
def _get_attribute_for_year(self, dataset_name, attribute_name, year):
"""Return the attribute values for this year."""
calling_dataset_pool = SessionConfiguration().get_dataset_pool()
calling_time = SimulationState().get_current_time()
SimulationState().set_current_time(year)
try:
my_dataset_pool = DatasetPool(
package_order=calling_dataset_pool.get_package_order(),
storage=AttributeCache())
dataset = my_dataset_pool.get_dataset(dataset_name)
attribute_name = attribute_name.replace('DDDD',repr(year))
dataset.compute_variables(attribute_name, my_dataset_pool)
values = dataset.get_attribute(attribute_name)
return values
finally:
SimulationState().set_current_time(calling_time)
示例15: prepare_for_run
def prepare_for_run(self, control_total_dataset_name=None, control_total_table=None, control_total_storage=None):
if (control_total_storage is None) or ((control_total_table is None) and (control_total_dataset_name is None)):
dataset_pool = SessionConfiguration().get_dataset_pool()
self.control_totals = dataset_pool.get_dataset( 'annual_%s_control_total' % self.dataset.get_dataset_name() )
return self.control_totals
if not control_total_dataset_name:
control_total_dataset_name = DatasetFactory().dataset_name_for_table(control_total_table)
self.control_totals = DatasetFactory().search_for_dataset(control_total_dataset_name,
package_order=SessionConfiguration().package_order,
arguments={'in_storage':control_total_storage,
'in_table_name':control_total_table,
'id_name':[]
}
)
return self.control_totals