当前位置: 首页>>代码示例>>Python>>正文


Python dataset.DatasetSubset类代码示例

本文整理汇总了Python中opus_core.datasets.dataset.DatasetSubset的典型用法代码示例。如果您正苦于以下问题:Python DatasetSubset类的具体用法?Python DatasetSubset怎么用?Python DatasetSubset使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了DatasetSubset类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: run

 def run(self, year, job_set, control_totals, job_building_types, data_objects=None, resources=None):
     self._do_initialize_for_run(job_set, job_building_types, data_objects)
     large_area_ids = control_totals.get_attribute("large_area_id")
     jobs_large_area_ids = job_set.compute_variables("washtenaw.job.large_area_id")
     unique_large_areas = unique(large_area_ids)
     is_year = control_totals.get_attribute("year")==year
     all_jobs_index = arange(job_set.size())
     sectors = unique(control_totals.get_attribute("sector_id")[is_year])
     self._compute_sector_variables(sectors, job_set)
     for area in unique_large_areas:
         idx = where(logical_and(is_year, large_area_ids == area))[0]
         self.control_totals_for_this_year = DatasetSubset(control_totals, idx)
         jobs_index = where(jobs_large_area_ids == area)[0]
         jobs_for_this_area = DatasetSubset(job_set, jobs_index)
         logger.log_status("ETM for area %s (currently %s jobs)" % (area, jobs_for_this_area.size()))
         last_remove_idx = self.remove_jobs.size
         self._do_run_for_this_year(jobs_for_this_area)
         add_jobs_size = self.new_jobs[self.location_id_name].size-self.new_jobs["large_area_id"].size
         remove_jobs_size = self.remove_jobs.size-last_remove_idx
         logger.log_status("add %s, remove %s, total %s" % (add_jobs_size, remove_jobs_size,
                                                            jobs_for_this_area.size()+add_jobs_size-remove_jobs_size))
         self.new_jobs["large_area_id"] = concatenate((self.new_jobs["large_area_id"],
                 array(add_jobs_size*[area], dtype="int32")))
         # transform indices of removing jobs into indices of the whole dataset
         self.remove_jobs[last_remove_idx:self.remove_jobs.size] = all_jobs_index[jobs_index[self.remove_jobs[last_remove_idx:self.remove_jobs.size]]]
     self._update_job_set(job_set)
     idx_new_jobs = arange(job_set.size()-self.new_jobs["large_area_id"].size, job_set.size())
     jobs_large_area_ids = job_set.compute_variables("washtenaw.job.large_area_id")
     jobs_large_area_ids[idx_new_jobs] = self.new_jobs["large_area_id"]
     job_set.delete_one_attribute("large_area_id")
     job_set.add_attribute(jobs_large_area_ids, "large_area_id", metadata=AttributeType.PRIMARY)
     # return an index of new jobs
     return arange(job_set.size()-self.new_jobs["large_area_id"].size, job_set.size())  
开发者ID:psrc,项目名称:urbansim,代码行数:33,代码来源:regional_employment_transition_model.py

示例2: estimate_mu

 def estimate_mu(self):
     iout = -1
     self.values_from_mr = {}
     for quantity in self.observed_data.get_quantity_objects():
         dataset_name = quantity.get_dataset_name()
         variable = quantity.get_variable_name()
         iout += 1
         dimension_reduced = False
         quantity_ids = quantity.get_dataset().get_id_attribute()
         for i in range(self.number_of_runs):
             ds = self._compute_variable_for_one_run(i, variable, dataset_name, self.get_calibration_year(), quantity)
             if isinstance(ds, InteractionDataset):
                 ds = ds.get_flatten_dataset()
             if i == 0: # first run
                 self.mu[iout] = zeros((self.y[iout].size, self.number_of_runs), dtype=float32)
                 ids = ds.get_id_attribute()
             else:
                 if ds.size() > ids.shape[0]:
                     ds = DatasetSubset(ds, ds.get_id_index(ids))
                     dimension_reduced = True
             scale = self.get_scales(ds, i+1, variable)
             matching_index = ds.get_id_index(quantity_ids)
             values = scale[matching_index] * ds.get_attribute(variable)[matching_index]
             self.mu[iout][:,i] = try_transformation(values, quantity.get_transformation())
             
         self.values_from_mr[variable.get_expression()] = self.mu[iout]
         if dimension_reduced:
             self.y[iout] = self.y[iout][quantity.get_dataset().get_id_index(ids)]
开发者ID:janowicz,项目名称:urbansim_drcog,代码行数:28,代码来源:bayesian_melding.py

示例3: choose_agents_to_move_from_overfilled_locations

 def choose_agents_to_move_from_overfilled_locations(self, capacity,
                                                     agent_set, agents_index, agents_locations):
     """Agents with the smallest number of units should move again.
     """
     if capacity is None:
         return array([], dtype='int32')
     index_valid_agents_locations = where(agents_locations > 0)[0]
     valid_agents_locations = agents_locations[index_valid_agents_locations].astype("int32")
     unique_locations = unique(valid_agents_locations).astype("int32")
     index_consider_capacity = self.choice_set.get_id_index(unique_locations)
     capacity_of_affected_locations = capacity[index_consider_capacity]
     overfilled = where(capacity_of_affected_locations < 0)[0]
     movers = array([], dtype='int32')
     indexed_individuals = DatasetSubset(agent_set, agents_index[index_valid_agents_locations])
     ordered_agent_indices = self.get_agents_order(indexed_individuals)
     sizes = indexed_individuals.get_attribute(self.units_full_name)[ordered_agent_indices]
     choice_ids = self.choice_set.get_id_attribute()
     for loc in overfilled:
         agents_to_move = where(valid_agents_locations == choice_ids[index_consider_capacity[loc]])[0]
         if agents_to_move.size > 0:
             n = int(-1*capacity_of_affected_locations[loc])
             this_sizes = sizes[agents_to_move]
             csum = this_sizes[arange(this_sizes.size-1,-1,-1)].cumsum() # ordered increasingly
             csum = csum[arange(csum.size-1, -1,-1)] # ordered back decreasingly
             w = where(csum < n)[0]
             if w.size < agents_to_move.size: #add one more agent in order the cumsum be larger than n
                 w = concatenate((array([agents_to_move.size-w.size-1]), w))
             idx = ordered_agent_indices[agents_to_move[w]]
             movers = concatenate((movers, idx))
     return movers
开发者ID:christianurich,项目名称:VIBe2UrbanSim,代码行数:30,代码来源:building_location_choice_model.py

示例4: run

 def run(self, location_set, development_event_set, *args, **kwargs):
     changed_indices, processed_development_event_indices = \
                     EventsCoordinator.run(self, location_set, 
                                            development_event_set, *args, **kwargs)
     if development_event_set is not None:
         subset = DatasetSubset(development_event_set, processed_development_event_indices)
         subset.write_dataset(out_storage=AttributeCache())
     return (changed_indices, processed_development_event_indices)                               
开发者ID:psrc,项目名称:urbansim,代码行数:8,代码来源:events_coordinator_and_storing.py

示例5: run

    def run(self, current_year_emme2_dir, current_year, dataset_pool, config=None):
        """Writes to the an emme2 input file in the [current_year_emme2_dir]/tripgen/inputtg/tazdata.ma2.
        """
        
        missing_dataset = ''
        try:
            missing_dataset = 'constant_taz_column'
            taz_col_set = dataset_pool.get_dataset("constant_taz_column")
            taz_col_set.load_dataset()
            missing_dataset = 'zone'
            zone_set = dataset_pool.get_dataset("zone")
            zone_set.load_dataset()
            missing_dataset = 'household'
            household_set = dataset_pool.get_dataset("household")
        except:
            raise Exception("Dataset %s is missing from dataset_pool" % missing_dataset)
        
        """specify travel input file name: [current_year_emme2_dir]/tripgen/inputtg/tazdata.ma2 """
        full_path = os.path.join(current_year_emme2_dir, 'tripgen', 'inputtg')
        if not os.path.exists(full_path):
            os.makedirs(full_path)
        tm_input_file = os.path.join(full_path, 'tazdata.ma2')
        
        tm_year = self._decade_floor(current_year)
        
        logger.log_status("calculating entries for emme2 input file")
        taz_col_set.compute_variables("zone_id=constant_taz_column.taz")
        current_taz_col = DatasetSubset(taz_col_set, index=where(taz_col_set.get_attribute("year")==tm_year)[0])
        
        current_taz_col._id_names = ['taz']
        current_taz_col._create_id_mapping()
        zone_set.join(current_taz_col, "pctmf", join_attribute='zone_id')
        zone_set.join(current_taz_col, "gqi", join_attribute='zone_id')
        zone_set.join(current_taz_col, "gqn", join_attribute='zone_id')
        zone_set.join(current_taz_col, "fteuniv", join_attribute='zone_id')
        zone_set.join(current_taz_col, "den", new_name='density', join_attribute='zone_id')

        value_122 = zeros(zone_set.size())
        index_122 = zone_set.try_get_id_index(array([58,59,60,71,72,73,84,85,86,150,251,266,489,578,687,688,797,868]))
        value_122[index_122[index_122 != -1]] = 1
        zone_set.add_attribute(data=value_122, name="v122")
        
        value_123 = zeros(zone_set.size())
        index_123 = zone_set.try_get_id_index(array([531,646,847,850,888,894,899,910]))
        value_123[index_123[index_123 != -1]] = 1
        zone_set.add_attribute(data=value_123, name="v123")
        
        value_124 = logical_not(value_122 + value_123)
        zone_set.add_attribute(data=value_124, name="v124")
                
        """specify which variables are passing from urbansim to travel model; the order matters"""
        variables_list = self.get_variables_list(dataset_pool)
        
        zone_set.compute_variables(variables_list, dataset_pool=dataset_pool )

        return self._write_to_file(zone_set, variables_list, tm_input_file)
开发者ID:christianurich,项目名称:VIBe2UrbanSim,代码行数:56,代码来源:travel_model_input_file_writer.py

示例6: run

    def run( self, vacancy_table, history_table, year, location_set, dataset_pool=None, resources=None ):
        self.dataset_pool=dataset_pool
        building_types = self.dataset_pool.get_dataset('building_type')
        target_vacancy_this_year = DatasetSubset(vacancy_table, index=where(vacancy_table.get_attribute("year")==year)[0])
        building_type_ids = target_vacancy_this_year.get_attribute('building_type_id')
        building_type_idx = building_types.get_id_index(building_type_ids)
        self.used_building_types = DatasetSubset(building_types, index=building_type_idx)
        project_types =  self.used_building_types.get_attribute('building_type_name')
        is_residential = self.used_building_types.get_attribute('is_residential')
        unit_names =  where(is_residential, 'residential_units', 'non_residential_sqft')
        specific_unit_names =  where(is_residential, 'residential_units', '_sqft')
        rates =  target_vacancy_this_year.get_attribute('target_total_vacancy')
        self.project_units = {}
        self.project_specific_units = {}
        target_rates = {}
        for i in range(self.used_building_types.size()):
            self.project_units[project_types[i]] = unit_names[i]
            if is_residential[i]:
                self.project_specific_units[project_types[i]] = specific_unit_names[i]
            else:
                self.project_specific_units[project_types[i]] = "%s%s" % (project_types[i], specific_unit_names[i])
            target_rates[building_type_ids[i]] = rates[i]
            
        self._compute_vacancy_and_total_units_variables(location_set, project_types, resources)
        self.pre_check( location_set, target_vacancy_this_year, project_types)
    
        projects = None
        for project_type_id, target_vacancy_rate in target_rates.iteritems():
            # determine current-year vacancy rates
            project_type = building_types.get_attribute_by_id('building_type_name', project_type_id)
            vacant_units_sum = location_set.get_attribute(self.variable_for_vacancy[project_type]).sum()
            units_sum = float( location_set.get_attribute(self.variable_for_total_units[project_type]).sum() )
            should_develop_units = int(round(max( 0, ( target_vacancy_rate * units_sum - vacant_units_sum ) /
                                         ( 1 - target_vacancy_rate ) )))
            logger.log_status(project_type + ": vacant units: %d, should be vacant: %f, sum units: %d"
                          % (vacant_units_sum, target_vacancy_rate * units_sum, units_sum))

            if not should_develop_units:
                logger.log_note(("Will not build any " + project_type + " units, because the current vacancy of %d units\n"
                             + "is more than the %d units desired for the vacancy rate of %f.")
                            % (vacant_units_sum,
                               target_vacancy_rate * units_sum,
                               target_vacancy_rate))
            #create projects
            if should_develop_units > 0:
                this_project = self._create_projects(should_develop_units, project_type, project_type_id, history_table,
                                                               location_set, units_sum, resources)
                if projects is None:
                    projects = this_project
                else:
                    projects.join_by_rows(this_project, change_ids_if_not_unique=True)
        return projects
开发者ID:,项目名称:,代码行数:52,代码来源:

示例7: run

    def run( self, model_configuration, vacancy_table, history_table, year, 
             location_set, resources=None):
        large_area_ids = vacancy_table.get_attribute("large_area_id")
        locations_large_area_ids = location_set.compute_variables("washtenaw.%s.large_area_id" % location_set.get_dataset_name())
        unique_large_areas = unique(large_area_ids)
        self._compute_vacancy_variables(location_set, 
                                        model_configuration['development_project_types'], 
                                        resources)

        projects = {}
        for area in unique_large_areas:
            location_index = where(locations_large_area_ids == area)[0]
            locations_for_this_area = DatasetSubset(location_set, location_index)
            logger.log_status("DPLCM for area %s", area)
            target_residential_vacancy_rate, target_non_residential_vacancy_rate = self._get_target_vacancy_rates(vacancy_table, year, area)
            for project_type in model_configuration['development_project_types']:
                # determine current-year vacancy rates
                vacant_units_sum = locations_for_this_area.get_attribute(self.variable_for_vacancy[project_type]).sum()
                units_sum = float( locations_for_this_area.get_attribute(self.units_variable[project_type]).sum() )
                vacant_rate = self.safe_divide(vacant_units_sum, units_sum)
                if model_configuration['development_project_types'][project_type]['residential']:
                    target_vacancy_rate = target_residential_vacancy_rate
                else:
                    target_vacancy_rate = target_non_residential_vacancy_rate
                should_develop_units = int(round(max( 0, ( target_vacancy_rate * units_sum - vacant_units_sum ) /
                                             ( 1 - target_vacancy_rate ) )))
                logger.log_status(project_type + ": vacant units: %d, should be vacant: %f, sum units: %d, will develop: %d"
                          % (vacant_units_sum, target_vacancy_rate * units_sum, units_sum, should_develop_units))
                #create projects
                if should_develop_units > 0:
                    project_dataset = self._create_projects(should_develop_units, project_type, history_table,
                                                                   locations_for_this_area, units_sum, 
                                                                   model_configuration['development_project_types'], 
                                                                   resources)
                    project_dataset.add_attribute(array(project_dataset.size()*[area]), "large_area_id", 
                                                  metadata=AttributeType.PRIMARY)
                    if (project_type not in projects.keys()) or (projects[project_type] is None):
                        projects[project_type] = project_dataset
                    else:
                        projects[project_type].join_by_rows(project_dataset, change_ids_if_not_unique=True)
 
        for project_type in model_configuration['development_project_types']:
            if project_type not in projects.keys():
                projects[project_type] = None
            if projects[project_type] is None:
                size = 0
            else:
                projects[project_type].add_submodel_categories()
                size = projects[project_type].size()
            logger.log_status("%s %s projects to be built" % (size, project_type))  
        return projects
开发者ID:christianurich,项目名称:VIBe2UrbanSim,代码行数:51,代码来源:regional_development_project_transition_model.py

示例8: run

    def run(self, year, household_set, control_totals, characteristics, resources=None):
#        self.person_set = person_set
        self._do_initialize_for_run(household_set)
        control_totals.get_attribute("total_number_of_households") # to make sure they are loaded
        self.characteristics = characteristics
        self.all_categories = self.characteristics.get_attribute("characteristic")
        self.all_categories = array(map(lambda x: x.lower(), self.all_categories))
        self.scaled_characteristic_names = get_distinct_names(self.all_categories).tolist()
        self.marginal_characteristic_names = copy(control_totals.get_id_name())
        index_year = self.marginal_characteristic_names.index("year")
        self.marginal_characteristic_names.remove("year")
        self.marginal_characteristic_names.remove(self.subarea_id_name)
        region_ids = control_totals.get_attribute(self.subarea_id_name)
        households_region_ids = household_set.compute_one_variable_with_unknown_package(variable_name="%s" % (self.subarea_id_name), dataset_pool=self.dataset_pool)

        unique_regions = unique(region_ids)
        is_year = control_totals.get_attribute("year")==year
        all_households_index = arange(household_set.size())
        for area in unique_regions:
            idx = where(logical_and(is_year, region_ids == area))[0]
            self.control_totals_for_this_year = DatasetSubset(control_totals, idx)
            households_index = where(households_region_ids == area)[0]
            if households_index.size == 0:
                continue
            households_for_this_area = DatasetSubset(household_set, households_index)
            logger.log_status("HTM for area %s (currently %s households)" % (area, households_for_this_area.size()))
            last_remove_idx = self.remove_households.size
            last_new_hhs_idx = self.mapping_existing_hhs_to_new_hhs.size
            self._do_run_for_this_year(households_for_this_area)
            add_hhs_size = self.new_households[self.location_id_name].size-self.new_households[self.subarea_id_name].size+self.mapping_existing_hhs_to_new_hhs.size-last_new_hhs_idx
            remove_hhs_size = self.remove_households.size-last_remove_idx
            logger.log_status("add %s, remove %s, total %s" % (add_hhs_size, remove_hhs_size,
                                                               households_for_this_area.size()+add_hhs_size-remove_hhs_size
                                                               ))
            self.new_households[self.subarea_id_name] = concatenate((self.new_households[self.subarea_id_name],
                                            array((self.new_households[self.location_id_name].size-self.new_households[self.subarea_id_name].size)*[area], dtype="int32")))
            # transform indices of removing households into indices of the whole dataset
            self.remove_households[last_remove_idx:self.remove_households.size] = all_households_index[households_index[self.remove_households[last_remove_idx:self.remove_households.size]]]
            # do the same for households to be duplicated
            self.mapping_existing_hhs_to_new_hhs[last_new_hhs_idx:self.mapping_existing_hhs_to_new_hhs.size] = all_households_index[households_index[self.mapping_existing_hhs_to_new_hhs[last_new_hhs_idx:self.mapping_existing_hhs_to_new_hhs.size]]]
            
        self._update_household_set(household_set)
        idx_new_households = arange(household_set.size()-self.new_households[self.subarea_id_name].size, household_set.size())
        #household_region_ids = household_set.compute_variables("urbansim_parcel.household.%s" % self.subarea_id_name)
        #household_region_ids[idx_new_households] = self.new_households[self.subarea_id_name]
        region_ids = household_set.get_attribute(self.subarea_id_name).copy()
        household_set.delete_one_attribute(self.subarea_id_name)
        household_set.add_attribute(region_ids, self.subarea_id_name, metadata=AttributeType.PRIMARY)
        # return an index of new households
        return idx_new_households
开发者ID:psrc,项目名称:urbansim,代码行数:50,代码来源:subarea_household_transition_model.py

示例9: run

    def run(self, chunk_specification, dataset, dataset_index=None, result_array_type=float32, **kwargs):
        """ 'chunk_specification' - determines number of chunks to use when computing over
                the dataset set.
            'dataset' - an object of class Dataset that is to be chunked.
            'dataset_index' - index of individuals in dataset to be chunked.
            'result_array_type' - type of the resulting array. Can be any numerical type of numpy array.
            **kwargs - keyword arguments.
            The method chunks dataset_index in the desired number of chunks (minimum is 1) and for each chunk it calls the method
            'run_chunk'. The order of the individuals entering the chunking is determined by the method 'get_agents_order'.
        """
        if dataset_index==None:
            dataset_index=arange(dataset.size())
        if not isinstance(dataset_index,ndarray):
            dataset_index=array(dataset_index)
        logger.log_status("Total number of individuals: %s" % dataset_index.size)
        result_array = zeros(dataset_index.size, dtype=result_array_type)

        if dataset_index.size <= 0:
            logger.log_status("Nothing to be done.")
            return result_array

        all_indexed_individuals = DatasetSubset(dataset, dataset_index)
        ordered_agent_indices = self.get_agents_order(all_indexed_individuals)# set order of individuals in chunks

        # TODO: Remove next six lines after we inherit chunk specification as a text string.
        if (chunk_specification is None):
            chunk_specification = {'nchunks':1}
        chunker = ChunkSpecification(chunk_specification)
        self.number_of_chunks = chunker.nchunks(dataset_index)
        chunksize = int(ceil(all_indexed_individuals.size()/float(self.number_of_chunks)))
        for ichunk in range(self.number_of_chunks):
            logger.start_block("%s chunk %d out of %d."
                               % (self.model_short_name, (ichunk+1), self.number_of_chunks))
            self.index_of_current_chunk = ichunk
            try:
                chunk_agent_indices = ordered_agent_indices[arange((ichunk*chunksize),
                                                                   min((ichunk+1)*chunksize,
                                                                       all_indexed_individuals.size()))]
                logger.log_status("Number of agents in this chunk: %s" % chunk_agent_indices.size)
                result_array[chunk_agent_indices] = self.run_chunk(dataset_index[chunk_agent_indices],
                                                                   dataset, **kwargs).astype(result_array_type)
            finally:
                logger.end_block()

        return result_array
开发者ID:christianurich,项目名称:VIBe2UrbanSim,代码行数:45,代码来源:chunk_model.py

示例10: run

    def run(self, agent_set, **kwargs):

        large_areas = agent_set.get_attribute(self.large_area_id_name)
        valid_large_area = where(large_areas > 0)[0]
        if valid_large_area.size > 0:
            unique_large_areas = unique(large_areas[valid_large_area])
            cond_array = zeros(agent_set.size(), dtype="bool8")
            cond_array[valid_large_area] = True
            result = array([], dtype="int32")
            for area in unique_large_areas:
                new_index = where(logical_and(cond_array, large_areas == area))[0]
                agent_subset =  DatasetSubset(agent_set, new_index)
                logger.log_status("ARM for area %s (%s agents)" % (area, agent_subset.size()))
                this_result = AgentRelocationModel.run(self, agent_subset, **kwargs)
                result = concatenate((result, new_index[this_result]))
        no_large_area = where(large_areas <= 0)[0]
        result = concatenate((result, no_large_area))
        return result
开发者ID:christianurich,项目名称:VIBe2UrbanSim,代码行数:18,代码来源:regional_agent_relocation_model.py

示例11: _write_input_file_1

    def _write_input_file_1(self, current_year_emme2_dir, input_dir, current_year, dataset_pool, config=None):
        missing_dataset = ''
        try:
            missing_dataset = 'group_quarter'
            taz_col_set = dataset_pool.get_dataset("group_quarter")
            taz_col_set.load_dataset()
            missing_dataset = 'zone'
            zone_set = dataset_pool.get_dataset("zone")
            zone_set.load_dataset()
            missing_dataset = 'household'
            household_set = dataset_pool.get_dataset("household")
        except:
            raise Exception("Dataset %s is missing from dataset_pool" % missing_dataset)
        
        """specify travel input file name """
        if not os.path.exists(input_dir):
            os.makedirs(input_dir)
        tm_input_file = os.path.join(input_dir, 'tazdata.in')
        
        tm_year = self._get_tm_year(current_year, taz_col_set)
        
        logger.log_status("calculating entries for emme%s input file" % self.emme_version)
        taz_col_set.compute_variables("zone_id=group_quarter.taz")
        current_taz_col = DatasetSubset(taz_col_set, index=where(taz_col_set.get_attribute("year")==tm_year)[0])
        
        current_taz_col._id_names = ['taz']
        current_taz_col._create_id_mapping()
        zone_set.join(current_taz_col, "gqdorm", join_attribute='zone_id')
        zone_set.join(current_taz_col, "gqmil", join_attribute='zone_id')
        zone_set.join(current_taz_col, "gqoth", join_attribute='zone_id')
        zone_set.join(current_taz_col, "fteuniv", join_attribute='zone_id')
              
        """specify which variables are passing from urbansim to travel model; the order matters"""
        variables_list = self.get_variables_list(dataset_pool)
        
        zone_set.compute_variables(variables_list, dataset_pool=dataset_pool )

        return self._write_to_file(zone_set, variables_list, tm_input_file, tm_year)
开发者ID:,项目名称:,代码行数:38,代码来源:

示例12: run

 def run(self, year, household_set, control_totals, characteristics, resources=None):
     self._do_initialize_for_run(household_set)
     control_totals.get_attribute("total_number_of_households") # to make sure they are loaded
     self.characteristics = characteristics
     self.all_categories = self.characteristics.get_attribute("characteristic")
     self.all_categories = array(map(lambda x: x.lower(), self.all_categories))
     self.scaled_characteristic_names = get_distinct_names(self.all_categories).tolist()
     self.marginal_characteristic_names = copy(control_totals.get_id_name())
     index_year = self.marginal_characteristic_names.index("year")
     self.marginal_characteristic_names.remove("year")
     idx = where(control_totals.get_attribute("year")==year)[0]
     self.control_totals_for_this_year = DatasetSubset(control_totals, idx)
     self._do_run_for_this_year(household_set)
     return self._update_household_set(household_set)
开发者ID:psrc,项目名称:urbansim,代码行数:14,代码来源:household_transition_model.py

示例13: _do_run

    def _do_run(self, location_set, agent_set, agents_index, data_objects=None, resources=None):
        location_id_name = location_set.get_id_name()[0]
        jobsubset = DatasetSubset(agent_set, agents_index)
        if jobsubset.size() <= 0:
            return array([], dtype='int32')
        #unplace jobs
        agent_set.set_values_of_one_attribute(location_id_name, 
                                              resize(array([-1.0]), jobsubset.size()), agents_index)
        sector_ids = jobsubset.get_attribute("sector_id")
        sectors = unique(sector_ids)
        counts = ndimage_sum(ones((jobsubset.size(),)), labels=sector_ids.astype('int32'), index=sectors.astype('int32'))
        if sectors.size <=1 :
            counts = array([counts])
        variables = map(lambda x: "number_of_jobs_of_sector_"+str(int(x)), sectors)
        compute_variables = map(lambda var: self.variable_package + "." + 
            location_set.get_dataset_name()+ "." + var, variables)
        if data_objects is not None:
            self.dataset_pool.add_datasets_if_not_included(data_objects)
        self.dataset_pool.add_datasets_if_not_included({agent_set.get_dataset_name():agent_set})
        location_set.compute_variables(compute_variables, dataset_pool=self.dataset_pool)
        if self.filter is None:
            location_index = arange(location_set.size())
        else:
            filter_values = location_set.compute_variables([self.filter], dataset_pool=self.dataset_pool)
            location_index = where(filter_values > 0)[0]
        if location_index.size <= 0:
            logger.log_status("No locations available. Nothing to be done.")
            return array([])
        location_subset = DatasetSubset(location_set, location_index)
        i=0
        for sector in sectors:
            distr = location_subset.get_attribute(variables[i])
            if ma.allclose(distr.sum(), 0):
                uniform_prob = 1.0/distr.size
                distr = resize(array([uniform_prob], dtype='float64'), distr.size)
                logger.log_warning("Probabilities in scaling model for sector " + str(sector) + " sum to 0.0.  Substituting uniform distribution!")
#                random_sample = sample(location_set.get_attribute("grid_id"), k=int(counts[i]), \
#                                   probabilities = distr)
            distr = distr/float(distr.sum())
            random_sample = probsample_replace(location_subset.get_id_attribute(), size=int(counts[i]), 
                                       prob_array=distr)
            idx = where(sector_ids == sector)[0]
            #modify job locations
            agent_set.set_values_of_one_attribute(location_id_name, random_sample, agents_index[idx])
            i+=1
        return agent_set.get_attribute_by_index(location_id_name, agents_index)
开发者ID:psrc,项目名称:urbansim,代码行数:46,代码来源:scaling_jobs_model.py

示例14: run

    def run(self, year=None,
            dataset_pool=None,  **kwargs):
        """
        """
        if dataset_pool is None:
            dataset_pool = SessionConfiguration().get_dataset_pool()

        if year is None:
            year = SimulationState().get_current_time()
        
        this_year_index = where(self.scheduled_events.get_attribute('year')==year)[0]
        scheduled_events_for_this_year = DatasetSubset(self.scheduled_events, this_year_index)
        scheduled_events_for_this_year.load_dataset_if_not_loaded()
        column_names = list(set( self.scheduled_events.get_known_attribute_names() ) - set( [ 'year', 'action', 'attribute', 'amount', 'event_id', '_hidden_id_'] ))
        column_names.sort()
#        column_values = dict([ (name, scheduled_events_for_this_year.get_attribute(name)) for name in column_names])
        
        for index in range(scheduled_events_for_this_year.size()):
            indicator = ones( self.dataset.size(), dtype='bool' )
            event_attr = {}
            for attribute in column_names:
                if attribute in self.dataset.get_known_attribute_names():
                    dataset_attribute = self.dataset.get_attribute(attribute)
                else:
                    ## this is done inside the loop because some action may delete computed attributes, such as dataset.add_elements()
                    try:
                        dataset_attribute = self.dataset.compute_one_variable_with_unknown_package(attribute, dataset_pool=dataset_pool)
                    except:
                        raise ValueError, "attribute %s used in scheduled events dataset can not be found in dataset %s" % (attribute, self.dataset.get_dataset_name())
                
#                if attribute in column_names: 
                aval = scheduled_events_for_this_year.get_attribute(attribute)[index]
                if aval == -1:
                    continue    # ignore if column value is -1
                else:
                    indicator *= dataset_attribute == aval
                    event_attr.update({attribute:aval})
            
            #agents in dataset satisfying all conditions are identified by indicator
            legit_index = where(indicator)[0]
            
            this_event = scheduled_events_for_this_year.get_data_element(index)
            if not hasattr(this_event, 'attribute'):
                action_attr_name = ''
            else:
                action_attr_name = this_event.attribute
            action_function = getattr(self, '_' + this_event.action.strip().lower())
            action_function( amount=this_event.amount,
                             attribute=action_attr_name,
                             dataset=self.dataset, 
                             index=legit_index,
                             data_dict=event_attr )
            
            self.post_run(self.dataset, legit_index, **kwargs)

        return self.dataset
开发者ID:christianurich,项目名称:VIBe2UrbanSim,代码行数:56,代码来源:scheduled_events_model.py

示例15: _convert_lccm_input

    def _convert_lccm_input(self, flt_directory_in, flt_directory_out):
        gc.collect()
        t1 = time()
        lc =  LandCoverDataset(in_storage = StorageFactory().get_storage('flt_storage', storage_location = flt_directory_in), 
            out_storage = StorageFactory().get_storage('flt_storage', storage_location = flt_directory_out))
#        lc.get_header() # added 23 june 2009 by mm
        mask = lc.get_mask()
        idx = where(mask==0)[0]
        lcsubset = DatasetSubset(lc, idx)
        print "Converting:"
        lcsubset.write_dataset(attributes=["relative_x"], out_table_name="land_covers")
        lc.delete_one_attribute("relative_x")
        lcsubset.write_dataset(attributes=["relative_y"], out_table_name="land_covers")
        lc.delete_one_attribute("relative_y")
        lc.flush_dataset()
        gc.collect()
#        lc_names = lc.get_primary_attribute_names()
        for attr in lc.get_primary_attribute_names():
            print "   ", attr
            lcsubset.write_dataset(attributes=[attr], out_table_name="land_covers")
            lc.delete_one_attribute(attr)
        logger.log_status("Data conversion done. " + str(time()-t1) + " s")
开发者ID:christianurich,项目名称:VIBe2UrbanSim,代码行数:22,代码来源:lc_convert2.py


注:本文中的opus_core.datasets.dataset.DatasetSubset类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。