本文整理汇总了Python中opus_core.datasets.dataset.DatasetSubset.get_id_attribute方法的典型用法代码示例。如果您正苦于以下问题:Python DatasetSubset.get_id_attribute方法的具体用法?Python DatasetSubset.get_id_attribute怎么用?Python DatasetSubset.get_id_attribute使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类opus_core.datasets.dataset.DatasetSubset
的用法示例。
在下文中一共展示了DatasetSubset.get_id_attribute方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _do_run
# 需要导入模块: from opus_core.datasets.dataset import DatasetSubset [as 别名]
# 或者: from opus_core.datasets.dataset.DatasetSubset import get_id_attribute [as 别名]
def _do_run(self, location_set, agent_set, agents_index, data_objects=None, resources=None):
location_id_name = location_set.get_id_name()[0]
jobsubset = DatasetSubset(agent_set, agents_index)
if jobsubset.size() <= 0:
return array([], dtype='int32')
#unplace jobs
agent_set.set_values_of_one_attribute(location_id_name,
resize(array([-1.0]), jobsubset.size()), agents_index)
sector_ids = jobsubset.get_attribute("sector_id")
sectors = unique(sector_ids)
counts = ndimage_sum(ones((jobsubset.size(),)), labels=sector_ids.astype('int32'), index=sectors.astype('int32'))
if sectors.size <=1 :
counts = array([counts])
variables = map(lambda x: "number_of_jobs_of_sector_"+str(int(x)), sectors)
compute_variables = map(lambda var: self.variable_package + "." +
location_set.get_dataset_name()+ "." + var, variables)
if data_objects is not None:
self.dataset_pool.add_datasets_if_not_included(data_objects)
self.dataset_pool.add_datasets_if_not_included({agent_set.get_dataset_name():agent_set})
location_set.compute_variables(compute_variables, dataset_pool=self.dataset_pool)
if self.filter is None:
location_index = arange(location_set.size())
else:
filter_values = location_set.compute_variables([self.filter], dataset_pool=self.dataset_pool)
location_index = where(filter_values > 0)[0]
if location_index.size <= 0:
logger.log_status("No locations available. Nothing to be done.")
return array([])
location_subset = DatasetSubset(location_set, location_index)
i=0
for sector in sectors:
distr = location_subset.get_attribute(variables[i])
if ma.allclose(distr.sum(), 0):
uniform_prob = 1.0/distr.size
distr = resize(array([uniform_prob], dtype='float64'), distr.size)
logger.log_warning("Probabilities in scaling model for sector " + str(sector) + " sum to 0.0. Substituting uniform distribution!")
# random_sample = sample(location_set.get_attribute("grid_id"), k=int(counts[i]), \
# probabilities = distr)
distr = distr/float(distr.sum())
random_sample = probsample_replace(location_subset.get_id_attribute(), size=int(counts[i]),
prob_array=distr)
idx = where(sector_ids == sector)[0]
#modify job locations
agent_set.set_values_of_one_attribute(location_id_name, random_sample, agents_index[idx])
i+=1
return agent_set.get_attribute_by_index(location_id_name, agents_index)
示例2: _do_run
# 需要导入模块: from opus_core.datasets.dataset import DatasetSubset [as 别名]
# 或者: from opus_core.datasets.dataset.DatasetSubset import get_id_attribute [as 别名]
def _do_run(self, location_set, agent_set, agents_index, resources=None):
location_id_name = location_set.get_id_name()[0]
asubset = DatasetSubset(agent_set, agents_index)
if asubset.size() <= 0:
return array([], dtype='int32')
#unplace agents
agent_set.modify_attribute(location_id_name,
resize(array([-1]), asubset.size()), agents_index)
if self.filter is None:
location_index = arange(location_set.size())
else:
filter_values = location_set.compute_variables([self.filter], dataset_pool=self.dataset_pool)
location_index = where(filter_values > 0)[0]
if location_index.size <= 0:
logger.log_status("No locations available. Nothing to be done.")
return array([])
location_subset = DatasetSubset(location_set, location_index)
if self.consider_capacity:
location_set.compute_variables([self.capacity_attribute],
dataset_pool=self.dataset_pool)
weights = location_subset[self.capacity_attribute]
if self.number_of_agents_attribute is not None:
location_set.compute_variables([self.number_of_agents_attribute],
dataset_pool=self.dataset_pool)
weights = clip(weights - location_subset[self.number_of_agents_attribute],
0, location_subset[self.capacity_attribute])
else:
weights = ones(location_subset.size())
if weights.sum() <=0:
logger.log_status("Locations' capacity sums to zero. Nothing to be done.")
return array([])
distr = weights/float(weights.sum())
random_sample = probsample_replace(location_subset.get_id_attribute(), size=asubset.size(),
prob_array=distr)
agent_set.modify_attribute(location_id_name, random_sample, agents_index)
return agent_set.get_attribute_by_index(location_id_name, agents_index)
示例3: HouseholdTransitionModel
# 需要导入模块: from opus_core.datasets.dataset import DatasetSubset [as 别名]
# 或者: from opus_core.datasets.dataset.DatasetSubset import get_id_attribute [as 别名]
class HouseholdTransitionModel(Model):
"""Creates and removes households from household_set. New households are duplicated from the existing households, keeping
the joint distribution of all characteristics.
"""
model_name = "Household Transition Model"
def __init__(self, location_id_name="grid_id", dataset_pool=None, debuglevel=0):
self.debug = DebugPrinter(debuglevel)
self.location_id_name = location_id_name
self.dataset_pool = self.create_dataset_pool(dataset_pool, ["urbansim", "opus_core"])
def run(self, year, household_set, control_totals, characteristics, resources=None):
self._do_initialize_for_run(household_set)
control_totals.get_attribute("total_number_of_households") # to make sure they are loaded
self.characteristics = characteristics
self.all_categories = self.characteristics.get_attribute("characteristic")
self.all_categories = array(map(lambda x: x.lower(), self.all_categories))
self.scaled_characteristic_names = get_distinct_names(self.all_categories).tolist()
self.marginal_characteristic_names = copy(control_totals.get_id_name())
index_year = self.marginal_characteristic_names.index("year")
self.marginal_characteristic_names.remove("year")
idx = where(control_totals.get_attribute("year")==year)[0]
self.control_totals_for_this_year = DatasetSubset(control_totals, idx)
self._do_run_for_this_year(household_set)
return self._update_household_set(household_set)
def _update_household_set(self, household_set):
index_of_duplicated_hhs = household_set.duplicate_rows(self.mapping_existing_hhs_to_new_hhs)
household_set.modify_attribute(name=self.location_id_name, data=-1 * ones((index_of_duplicated_hhs.size,),
dtype=household_set.get_data_type(self.location_id_name)),
index=index_of_duplicated_hhs)
household_set.remove_elements(self.remove_households)
if self.new_households[self.location_id_name].size > 0:
max_id = household_set.get_id_attribute().max()
self.new_households[self.household_id_name]=concatenate((self.new_households[self.household_id_name],
arange(max_id+1, max_id+self.new_households[self.location_id_name].size+1)))
household_set.add_elements(self.new_households, require_all_attributes=False)
difference = household_set.size()-self.household_size
self.debug.print_debug("Difference in number of households: %s"
" (original %s, new %s, created %s, deleted %s)"
% (difference,
self.household_size,
household_set.size(),
self.new_households[self.household_id_name].size + self.mapping_existing_hhs_to_new_hhs.size,
self.remove_households.size),
3)
if self.location_id_name in household_set.get_attribute_names():
self.debug.print_debug("Number of unplaced households: %s"
% where(household_set.get_attribute(self.location_id_name) <=0)[0].size,
3)
return difference
def _do_initialize_for_run(self, household_set):
self.household_id_name = household_set.get_id_name()[0]
self.new_households = {
self.location_id_name:array([], dtype=household_set.get_data_type(self.location_id_name, int32)),
self.household_id_name:array([], dtype=household_set.get_data_type(self.household_id_name, int32))
}
self.remove_households = array([], dtype='int32')
self.household_size = household_set.size()
self.max_id = household_set.get_id_attribute().max()
self.arrays_from_categories = {}
self.arrays_from_categories_mapping = {}
self.mapping_existing_hhs_to_new_hhs = array([], dtype=household_set.get_data_type(self.household_id_name, int32))
def _do_run_for_this_year(self, household_set):
self.household_set = household_set
groups = self.control_totals_for_this_year.get_id_attribute()
self.create_arrays_from_categories(self.household_set)
all_characteristics = self.arrays_from_categories.keys()
self.household_set.load_dataset_if_not_loaded(attributes = all_characteristics) # prevents from lazy loading to save runtime
idx_shape = []
number_of_combinations=1
num_attributes=len(all_characteristics)
for iattr in range(num_attributes):
attr = all_characteristics[iattr]
max_bins = self.arrays_from_categories[attr].max()+1
idx_shape.append(max_bins)
number_of_combinations=number_of_combinations*max_bins
if attr not in self.new_households.keys():
self.new_households[attr] = array([], dtype=self.household_set.get_data_type(attr, float32))
self.number_of_combinations = int(number_of_combinations)
idx_tmp = indices(tuple(idx_shape))
categories_index = zeros((self.number_of_combinations,num_attributes))
for i in range(num_attributes): #create indices of all combinations
categories_index[:,i] = idx_tmp[i].ravel()
categories_index_mapping = {}
for i in range(self.number_of_combinations):
categories_index_mapping[tuple(categories_index[i,].tolist())] = i
def get_category(values):
bins = map(lambda x, y: self.arrays_from_categories[x][int(y)], all_characteristics, values)
try:
#.........这里部分代码省略.........
示例4: run
# 需要导入模块: from opus_core.datasets.dataset import DatasetSubset [as 别名]
# 或者: from opus_core.datasets.dataset.DatasetSubset import get_id_attribute [as 别名]
def run(self, in_storage, out_storage=None, business_dsname="business", zone_dsname=None):
dataset_pool = DatasetPool(storage=in_storage, package_order=['psrc_parcel', 'urbansim_parcel', 'urbansim', 'opus_core'] )
seed(1)
allbusinesses = dataset_pool.get_dataset(business_dsname)
parcels = dataset_pool.get_dataset('parcel')
buildings = dataset_pool.get_dataset('building')
parcels.compute_variables(["urbansim_parcel.parcel.residential_units", "number_of_buildings = parcel.number_of_agents(building)",
"non_residential_sqft = (parcel.aggregate(building.non_residential_sqft)).astype(int32)",
"number_of_res_buildings = parcel.aggregate(urbansim_parcel.building.is_residential)",
"number_of_nonres_buildings = parcel.aggregate(urbansim_parcel.building.is_non_residential)",
"number_of_mixed_use_buildings = parcel.aggregate(urbansim_parcel.building.is_generic_building_type_6)"
],
dataset_pool=dataset_pool)
restypes = [12, 4, 19, 11, 34, 10, 33]
reslutypes = [13,14,15,24]
is_valid_business = ones(allbusinesses.size(), dtype='bool8')
parcels_not_matched = logical_and(in1d(allbusinesses["parcel_id"], parcels.get_id_attribute(), invert=True), allbusinesses["parcel_id"] > 0)
if(parcels_not_matched.sum() > 0):
is_valid_business[where(parcels_not_matched)] = False
logger.log_warning(message="No parcel exists for %s businesses (%s jobs)" % (parcels_not_matched.sum(),
allbusinesses[self.number_of_jobs_attr][where(parcels_not_matched)].sum()))
zero_parcel = allbusinesses["parcel_id"]<=0
if zero_parcel.sum() > 0:
is_valid_business[where(zero_parcel)] = False
logger.log_warning(message="%s businesses (%s jobs) located on zero parcel_id" % (zero_parcel.sum(),
allbusinesses[self.number_of_jobs_attr][where(zero_parcel)].sum()))
zero_size = logical_and(is_valid_business, allbusinesses[self.number_of_jobs_attr].round() == 0)
if(sum(zero_size) > 0):
is_valid_business[where(zero_size)] = False
logger.log_warning(message="%s businesses are of size 0." % sum(zero_size))
businesses = DatasetSubset(allbusinesses, index=where(is_valid_business)[0])
parcels.add_attribute(name="number_of_workplaces", data=parcels.sum_dataset_over_ids(businesses, constant=1))
has_single_res_buildings = logical_and(parcels["number_of_buildings"] == 1, parcels["number_of_res_buildings"] == 1) # 1 (1 residential)
parcels.add_attribute(data=has_single_res_buildings.astype("int32"), name="buildings_code")
has_mult_res_buildings = logical_and(parcels["number_of_buildings"] > 1, parcels["number_of_nonres_buildings"] == 0) # 2 (mult residential)
parcels.modify_attribute("buildings_code", data=2*ones(has_mult_res_buildings.sum()), index=where(has_mult_res_buildings))
has_single_nonres_buildings = logical_and(logical_and(parcels["number_of_buildings"] == 1, parcels["number_of_nonres_buildings"] == 1), parcels["number_of_mixed_use_buildings"] == 0) # 3 (1 non-res)
parcels.modify_attribute("buildings_code", data=3*ones(has_single_nonres_buildings.sum()), index=where(has_single_nonres_buildings))
has_mult_nonres_buildings = logical_and(logical_and(parcels["number_of_buildings"] > 1, parcels["number_of_res_buildings"] == 0), parcels["number_of_mixed_use_buildings"] == 0) # 4 (mult non-res)
parcels.modify_attribute("buildings_code", data=4*ones(has_mult_nonres_buildings.sum()), index=where(has_mult_nonres_buildings))
has_single_mixed_buildings = logical_and(parcels["number_of_buildings"] == 1, parcels["number_of_mixed_use_buildings"] == 1) # 5 (1 mixed-use)
parcels.modify_attribute("buildings_code", data=5*ones(has_single_mixed_buildings.sum()), index=where(has_single_mixed_buildings))
has_mult_mixed_buildings = logical_and(parcels["number_of_buildings"] > 1,
logical_or(logical_and(parcels["number_of_res_buildings"] > 0, parcels["number_of_nonres_buildings"] > 0),
logical_or(parcels["number_of_mixed_use_buildings"] > 1,
logical_and(parcels["number_of_res_buildings"] == 0,
parcels["number_of_mixed_use_buildings"] > 0)))) # 6
parcels.modify_attribute("buildings_code", data=6*ones(has_mult_mixed_buildings.sum()), index=where(has_mult_mixed_buildings))
has_no_building_res_lutype = logical_and(parcels["number_of_buildings"] == 0, in1d(parcels["land_use_type_id"], reslutypes)) # 7 (vacant with res LU type)
parcels.modify_attribute("buildings_code", data=7*ones(has_no_building_res_lutype.sum()), index=where(has_no_building_res_lutype))
has_no_building_nonres_lutype = logical_and(parcels["number_of_buildings"] == 0, in1d(parcels["land_use_type_id"], reslutypes)==0) # 8 (vacant with non-res LU type)
parcels.modify_attribute("buildings_code", data=8*ones(has_no_building_nonres_lutype.sum()), index=where(has_no_building_nonres_lutype))
business_sizes = businesses[self.number_of_jobs_attr].round().astype("int32")
business_location = {}
business_location1wrkpl = zeros(businesses.size(), dtype="int32")
business_location1wrkplres = zeros(businesses.size(), dtype="int32")
business_ids = businesses.get_id_attribute()
# sample one building for cases when sampling is required.
for ibusid in range(businesses.size()):
idx = where(buildings['parcel_id'] == businesses['parcel_id'][ibusid])[0]
bldgids = buildings['building_id'][idx]
business_location[business_ids[ibusid]] = bldgids
if bldgids.size == 1:
business_location1wrkpl[ibusid] = bldgids[0]
elif bldgids.size > 1:
business_location1wrkpl[ibusid] = bldgids[sample_noreplace(arange(bldgids.size), 1)]
if buildings['residential_units'][idx].sum() > 0:
# Residential buildings are sampled with probabilities proportional to residential units
business_location1wrkplres[ibusid] = bldgids[probsample_noreplace(arange(bldgids.size), 1, prob_array=buildings['residential_units'][idx])]
else:
business_location1wrkplres[ibusid] = business_location1wrkpl[ibusid]
home_based = zeros(business_sizes.sum(), dtype="bool8")
job_building_id = zeros(business_sizes.sum(), dtype="int32")
job_array_labels = business_ids.repeat(business_sizes)
job_assignment_case = zeros(business_sizes.sum(), dtype="int32")
processed_bindicator = zeros(businesses.size(), dtype="bool8")
business_codes = parcels.get_attribute_by_id("buildings_code", businesses["parcel_id"])
business_nworkplaces = parcels.get_attribute_by_id("number_of_workplaces", businesses["parcel_id"])
logger.log_status("Total number of jobs: %s" % home_based.size)
# 1. 1-2 worker business in 1 residential building
idx_sngl_wrk_1bld_fit = where(logical_and(business_sizes < 3, business_codes == 1))[0]
jidx = in1d(job_array_labels, business_ids[idx_sngl_wrk_1bld_fit])
home_based[jidx] = True
job_building_id[jidx] = business_location1wrkpl[idx_sngl_wrk_1bld_fit].repeat(business_sizes[idx_sngl_wrk_1bld_fit])
job_assignment_case[jidx] = 1
processed_bindicator[idx_sngl_wrk_1bld_fit] = True
logger.log_status("1. %s jobs (%s businesses) set as home-based due to 1-2 worker x 1 residential building fit." % (
business_sizes[idx_sngl_wrk_1bld_fit].sum(), idx_sngl_wrk_1bld_fit.size))
# 2. 1-2 worker business in multiple residential buildings
idx_sngl_wrk_multbld_fit = where(logical_and(logical_and(processed_bindicator==0, business_sizes < 3), business_codes == 2))[0]
jidx = in1d(job_array_labels, business_ids[idx_sngl_wrk_multbld_fit])
home_based[jidx] = True
#.........这里部分代码省略.........
示例5: EstablishmentReappearanceModel
# 需要导入模块: from opus_core.datasets.dataset import DatasetSubset [as 别名]
# 或者: from opus_core.datasets.dataset.DatasetSubset import get_id_attribute [as 别名]
#.........这里部分代码省略.........
else:
filter_indicator = 1
to_reappear = np.array([], dtype=np.int32)
#log header
if PrettyTable is not None:
status_log = PrettyTable()
status_log.set_field_names(column_names + ["actual", "target", "difference", "action", "N", "note"])
else:
logger.log_status("\t".join(column_names + ["actual", "target", "difference", "action", "N", "note"]))
error_log = ''
error_num = 1
def log_status():
##log status
action = "0"
N = "0"
if lucky_index is not None:
if actual_num < target_num:
action = "+" + str(action_num)
N = "+" + str(lucky_index.size)
if actual_num > target_num:
action = "-" + str(action_num)
N = "-" + str(lucky_index.size)
cat = [ str(self.control_totals[col][index]) for col in column_names]
cat += [str(actual_num), str(target_num), str(diff), action, N, error_str]
if PrettyTable is not None:
status_log.add_row(cat)
else:
logger.log_status("\t".join(cat))
for index, control_total_id in enumerate(self.control_totals.get_id_attribute()):
target_num = target[index]
actual_num = actual[index]
action_num = 0
n_num = 0
diff = target_num - actual_num
accounting = self.dataset[self.dataset_accounting_attribute]
lucky_index = None
error_str = ''
if actual_num < target_num:
indicator = self.dataset[id_name]==control_total_id
n_indicator = indicator.sum()
# do sampling from legitimate records
legit_index = np.where(np.logical_and(indicator, filter_indicator))[0]
legit_size = sum(accounting[legit_index])
if legit_size > diff:
##there are more establishments that are marked as 'disappeared' than the gap between target and actual
##sample required
mean_size = float(legit_size) / n_indicator if n_indicator != 0 else 1
n = int(np.ceil(diff / mean_size))
i = 0
while diff > 0 and action_num < diff:
if n > 1: # adjust number of records to sample in each iteration
n = int( np.ceil((diff - action_num) / (mean_size * STEP_SIZE**i)) )
sampleable_index = legit_index[np.logical_not(np.in1d(legit_index, to_reappear))]
if n < sampleable_index.size:
lucky_index = sample_noreplace(sampleable_index, n)
else: