当前位置: 首页>>代码示例>>Python>>正文


Python DebugPrinter.print_debug方法代码示例

本文整理汇总了Python中opus_core.misc.DebugPrinter.print_debug方法的典型用法代码示例。如果您正苦于以下问题:Python DebugPrinter.print_debug方法的具体用法?Python DebugPrinter.print_debug怎么用?Python DebugPrinter.print_debug使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在opus_core.misc.DebugPrinter的用法示例。


在下文中一共展示了DebugPrinter.print_debug方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from opus_core.misc import DebugPrinter [as 别名]
# 或者: from opus_core.misc.DebugPrinter import print_debug [as 别名]
    def __init__(self, resources=None, in_storage=None, out_storage=None,
                  in_table_name=None, attributes=None,
                  out_table_name=None, id_name=None,
                  nchunks=None, other_in_table_names=None,
                  debuglevel=0):
        debug = DebugPrinter(debuglevel)
        debug.print_debug("Creating DevelopmentGroupDataset object.",2)
        resources = ResourceCreatorDevelopmentGroups().get_resources_for_dataset(
            resources = resources,
            in_storage = in_storage,
            out_storage = out_storage,
            in_table_name = in_table_name,
            out_table_name = out_table_name,
            attributes = attributes,
            id_name = id_name,
            id_name_default = self.id_name_default,
            nchunks = nchunks,
            debug = debug
            )

        Dataset.__init__(self,resources = resources)

        if isinstance(other_in_table_names,list):
            for place_name in other_in_table_names: #load other tables
                ds = Dataset(resources = resources)
                ds.load_dataset(in_table_name=place_name)
                self.connect_datasets(ds)
开发者ID:christianurich,项目名称:VIBe2UrbanSim,代码行数:29,代码来源:development_group_dataset.py

示例2: __init__

# 需要导入模块: from opus_core.misc import DebugPrinter [as 别名]
# 或者: from opus_core.misc.DebugPrinter import print_debug [as 别名]
 def __init__(self, resources=None, dataset1=None, dataset2=None, index1 = None, index2 = None, 
             debuglevel=0):
     debug = DebugPrinter(debuglevel)
     debug.print_debug("Creating object %s.%s" % (self.__class__.__module__, self.__class__.__name__), 2)
     
     local_resources = Resources(resources)
     local_resources.merge_if_not_None({"dataset1":dataset1, 
         "dataset2":dataset2, "debug":debug, 
         "index1":index1, "index2":index2})
     CoreInteractionDataset.__init__(self, resources = local_resources)
     
     
开发者ID:christianurich,项目名称:VIBe2UrbanSim,代码行数:12,代码来源:interactions.py

示例3: __init__

# 需要导入模块: from opus_core.misc import DebugPrinter [as 别名]
# 或者: from opus_core.misc.DebugPrinter import print_debug [as 别名]
    def __init__(
        self,
        categories=array([1]),
        resources=None,
        what=None,
        attribute_name=None,
        data=None,
        names=None,
        in_storage=None,
        out_storage=None,
        in_table_name=None,
        attributes=None,
        out_table_name=None,
        id_name=None,
        nchunks=None,
        debuglevel=0,
    ):
        """
        'what' must be a string, such as 'residential' or 'commercial'.
        """
        debug = DebugPrinter(debuglevel)
        debug.print_debug("Creating DevelopmentProjectDataset object for %s projects." % what, 2)

        self.categories = categories
        self.what = what
        self.attribute_name = attribute_name
        attributes_default = AttributeType.PRIMARY
        dataset_name = "development_project"
        nchunks_default = 1

        if data <> None:
            in_storage = StorageFactory().get_storage("dict_storage")

            in_storage.write_table(table_name="development_projects", table_data=data)
            in_table_name = "development_projects"

        resources = ResourceFactory().get_resources_for_dataset(
            dataset_name,
            resources=resources,
            in_storage=in_storage,
            out_storage=out_storage,
            in_table_name_pair=(in_table_name, None),
            out_table_name_pair=(out_table_name, None),
            attributes_pair=(attributes, attributes_default),
            id_name_pair=(id_name, self.id_name_default),
            nchunks_pair=(nchunks, nchunks_default),
            debug_pair=(debug, None),
        )

        self.category_variable_name = resources.get("category_variable_name", self.category_variable_name_default)
        Dataset.__init__(self, resources=resources)
开发者ID:psrc,项目名称:urbansim,代码行数:53,代码来源:development_project_dataset.py

示例4: run

# 需要导入模块: from opus_core.misc import DebugPrinter [as 别名]
# 或者: from opus_core.misc.DebugPrinter import print_debug [as 别名]
    def run(self, projects, types, units, year=0, location_id_name="grid_id", debuglevel=0):
        debug = DebugPrinter(debuglevel)
        grid_ids_for_any_project = array([], dtype=int32)
        grid_ids_by_project_type = {}
        for project_type in types:
            grid_ids_by_project_type[project_type] = array([], dtype=int32)
            if projects[project_type] <> None:
                grid_ids_by_project_type[project_type] = projects[project_type].get_attribute(location_id_name)
            grid_ids_for_any_project = unique(concatenate((grid_ids_for_any_project, 
                                                                  grid_ids_by_project_type[project_type])))
        grid_ids_for_any_project = grid_ids_for_any_project[where(grid_ids_for_any_project>0)]
        if not len(grid_ids_for_any_project): return
        
        result_data = {location_id_name: grid_ids_for_any_project, 
                       "scheduled_year":(year*ones((grid_ids_for_any_project.size,))).astype(int32)}
        for unit in units:
            result_data[unit] = zeros((grid_ids_for_any_project.size,), dtype=int32)
        for project_type in types:
            result_data["%s_improvement_value" % project_type] = zeros((grid_ids_for_any_project.size,), dtype=int32)
            
        grid_idx=0
        for grid_id in grid_ids_for_any_project:
            for i in range(0,len(types)):
                project_type = types[i]
                my_projects = projects[project_type]
                w = where(my_projects.get_attribute(location_id_name) == grid_id)[0]
                if w.size>0:
                    unit_variable = units[i]
                    result_data[unit_variable][grid_idx] = \
                        my_projects.get_attribute_by_index( 
                            my_projects.get_attribute_name(), w).sum()
                    result_data["%s_improvement_value" % project_type][grid_idx] = \
                        my_projects.get_attribute_by_index( 
                            "improvement_value", w).sum()
            grid_idx += 1  
        
        storage = StorageFactory().get_storage('dict_storage')

        eventset_table_name = 'development_events_generated'        
        storage.write_table(table_name=eventset_table_name, table_data=result_data)

        eventset = DevelopmentEventDataset(
            in_storage = storage, 
            in_table_name = eventset_table_name, 
            id_name = [location_id_name, "scheduled_year"],
            ) 
                                      
        debug.print_debug("Number of events: " + str(grid_ids_for_any_project.size), 3)
        return eventset
开发者ID:psrc,项目名称:urbansim,代码行数:51,代码来源:development_event_transition_model.py

示例5: __init__

# 需要导入模块: from opus_core.misc import DebugPrinter [as 别名]
# 或者: from opus_core.misc.DebugPrinter import print_debug [as 别名]
    def __init__(self, resources=None, what="household", in_storage=None,
                 in_table_name=None, out_storage=None, out_table_name=None, 
                 id_name=None, nchunks=None, debuglevel=0):
        ## TODO remove "what" arguement
        
        debug = DebugPrinter(debuglevel)
        debug.print_debug("Creating ControlTotalDataset object for "+what+".",2)
        
        if not self.in_table_name_default:
            self.in_table_name_default = "annual_" + what + "_control_totals"
        if not self.out_table_name_default:         
            self.out_table_name_default = "annual_" + what + "_control_totals"
            
        attributes_default = AttributeType.PRIMARY
        #dataset_name = "control_total"
        nchunks_default = 1

        resources = ResourceFactory().get_resources_for_dataset(
            self.dataset_name,
            resources=resources,
            in_storage=in_storage,
            out_storage=out_storage,
            in_table_name_pair=(in_table_name,self.in_table_name_default),
            attributes_pair=(None, attributes_default),
            out_table_name_pair=(out_table_name, self.out_table_name_default),
            id_name_pair=(id_name,self.id_name_default),
            nchunks_pair=(nchunks,nchunks_default),
            debug_pair=(debug,None)
            )
        
        table_name = resources["in_table_name"]
        if resources['id_name'] is None or len(resources['id_name'])== 0:
            #if both self.id_name_default and id_name argument in __init__ is unspecified, 
            #ControlTotalDataset would use all attributes not beginning with "total"
            #as id_name
            id_names = []
            column_names = resources["in_storage"].get_column_names(table_name)
            for column_name in column_names:
                if not re.search('^total', column_name):
                    id_names.append(column_name)
            resources.merge({"id_name":resources["id_name"] + id_names})

        Dataset.__init__(self, resources = resources)
开发者ID:christianurich,项目名称:VIBe2UrbanSim,代码行数:45,代码来源:control_total_dataset.py

示例6: ActiveDevelopmentsModel

# 需要导入模块: from opus_core.misc import DebugPrinter [as 别名]
# 或者: from opus_core.misc.DebugPrinter import print_debug [as 别名]
class ActiveDevelopmentsModel(Model):
    """
    
    If you have questions, contact Jesse Ayers at MAG:  [email protected]
    
    """

    model_name = "Active Developments Model"
    model_short_name = "ADM"

    def __init__(self, debuglevel=0):
        self.debug = DebugPrinter(debuglevel)
        self.debuglevel = debuglevel

    def run(
        self,
        percent_active_development=100,
        build_minimum_units=False,
        year=None,
        start_year=None,
        dataset_pool=None,
        capacity_this_year_variable="mag_zone.active_development.capacity_this_year",
    ):
        # General TODO:
        #    - deal w/ "other_spaces" columns
        #    - look at generalizing the weight used when building units
        #    - build unit test for minimum build feature

        # LIST OF MODEL ASSUMPTIONS:
        #    - TODO: can i generalize the need for these pre-defined variables?
        #    - the model expects variables to exist that correspond to this naming pattern
        #      for every is_developing building_type_name in the building_types dataset:
        #        - total_<building_type_name>_units_col
        #        - occupied_<building_type_name>_units_col
        #    - building_type_name must be unique, lowercase, contain no spaces
        #    - target_vacancy.is_developing defines which building_types are considered

        # Minimum build feature
        #    - The user can specify 2 additional columns in the building_types dataset:
        #        - adm_minimum_annual_build_units
        #        - adm_minimum_annual_build_max_year
        #    - If these fields are present, and the "build_minimum_units" run option is set to True
        #        - The model will utilize the information in the fields to build the minimum # of units annually
        #          specified in the building_types table up to the maximum year specified in the table.  This feature
        #          is designed to simulate the case when demand is too low to build new units, some will be built anyway

        # CURRENT LIST OF KNOWN ISSUES:
        #    -

        # Get current simulation year
        if year is None:
            simulation_year = SimulationState().get_current_time()
        else:
            simulation_year = year

        # only run if start_year
        if start_year:
            if start_year > simulation_year:
                return

        # Get the percent_active_development
        # convert it to a float
        percent_active_development = percent_active_development / 100.0

        # Get the dataset pool
        if dataset_pool is None:
            dataset_pool = SessionConfiguration().get_dataset_pool()
        else:
            dataset_pool = dataset_pool

        # get the active_developments dataset, subset it for actually active projects
        # compute some variables
        developments_dataset = dataset_pool.get_dataset("active_developments")
        active_developments_capacity = developments_dataset.compute_variables([capacity_this_year_variable])
        # TODO: need to further filter active developments, not only by start_year<=simulation_year,
        #       but also by whether they are built out, etc.
        active_developments_index = where(developments_dataset.get_attribute("start_year") <= simulation_year)[0]
        active_developments_capacity_this_year = active_developments_capacity[active_developments_index]
        # debug help
        self.debug.print_debug("\n*** BEGIN DEBUG INFO:", 1)
        self.debug.print_debug("len(active_developments_index) = %s" % len(active_developments_index), 1)
        self.debug.print_debug("len(active_developments_index) = %s" % len(active_developments_index), 1)
        self.debug.print_debug(
            "len(active_developments_capacity_this_year) = %s" % len(active_developments_capacity_this_year), 1
        )
        self.debug.print_debug("END DEBUG INFO ***\n", 1)

        # get the target_vacancy_rates dataset
        target_vacancy_rates_dataset = dataset_pool.get_dataset("target_vacancy")
        # get target vacancy rates for this simulation_year
        this_year_index = where(target_vacancy_rates_dataset.get_attribute("year") == simulation_year)[0]
        target_vacancies_for_this_year = DatasetSubset(target_vacancy_rates_dataset, this_year_index)
        # get some columns
        bldg_types = target_vacancies_for_this_year.get_attribute("building_type_id")
        tgt_vacancies = target_vacancies_for_this_year.get_attribute("target_vacancy")
        # get unique building types
        unique_building_types = unique1d(bldg_types)
        # build a dictionary containing building_type_id:{'target_vacancy_rate':<float>}
        developing_building_types_info = {}
        for unique_building_type in unique_building_types:
#.........这里部分代码省略.........
开发者ID:psrc,项目名称:urbansim,代码行数:103,代码来源:active_developments_model.py

示例7: Variable

# 需要导入模块: from opus_core.misc import DebugPrinter [as 别名]
# 或者: from opus_core.misc.DebugPrinter import print_debug [as 别名]

#.........这里部分代码省略.........
        except:
            return
        from opus_core.datasets.interaction_dataset import InteractionDataset
        dataset = self.get_dataset()
        dependencies = self.get_current_dependencies()
        my_dataset_name = dataset.get_dataset_name()
        for iattr in range(len(dependencies)): # iterate over dependent variables
            dep_item = dependencies[iattr][0]
            if isinstance(dep_item, str):
                depvar_name = VariableName(dep_item)
            else:
                depvar_name = dep_item.get_variable_name() # dep_item should be an instance of AttributeBox
            dataset_name = depvar_name.get_dataset_name()
            if dataset_name == my_dataset_name:
                ds = dataset
            else:
                ds = SessionConfiguration().get_dataset_from_pool(dataset_name)
                #ds = dataset_pool.get_dataset('dataset_name')
            if not isinstance(ds, InteractionDataset):
                short_name = depvar_name.get_alias()
                if short_name not in ds.get_id_name():   
                    ds.flush_attribute(depvar_name)
        
    def compute(self, dataset_pool):
        """Returns the result of this variable.  Private use only."""
        raise NotImplementedError, "compute() method not implemented for this variable."
    
    def is_lag_variable(self):
        """Not a lag variable unless this function has been overridden to return True"""
        return False

    def _compute_and_check(self, dataset_pool):
        if has_this_method(self, "pre_check"):
            self.debug.print_debug("Running pre_check() for " + self.__class__.__module__,4)
            self.pre_check(dataset_pool)
        else:
            self.debug.print_debug("No pre_check() defined for " + self.__class__.__module__,4)
        values = self.compute(dataset_pool)
        if has_this_method(self, "post_check"):
            self.debug.print_debug("Running post_check() for " + self.__class__.__module__,4)
            self.post_check(values, dataset_pool)
        else:
            self.debug.print_debug("No post_check() defined for " + self.__class__.__module__,4)
        return values
        
    def compute_with_dependencies(self, dataset_pool, arguments={}):
        self._solve_dependencies(dataset_pool)
        if self.should_check(arguments):
            self.debug.print_debug("Computing and checking " + self.__class__.__module__,3)
            values = self._compute_and_check(dataset_pool)
        else:
            values = self.compute(dataset_pool)
        self.number_of_compute_runs += 1
        if self._return_type:
            return self._cast_values(values, arguments)
        return values

    if longlong == int32:
        __long_size = 2**31 - 1
    else:
        __long_size = 2**63 - 1
        
    _max_storable_value = {"bool8":1,
                            "int8":2**7 - 1,
                            "uint8":2**8 - 1,
                            "int16":2**15 - 1,
开发者ID:christianurich,项目名称:VIBe2UrbanSim,代码行数:70,代码来源:variable.py

示例8: RateBasedModel

# 需要导入模块: from opus_core.misc import DebugPrinter [as 别名]
# 或者: from opus_core.misc.DebugPrinter import print_debug [as 别名]
class RateBasedModel(Model):
    """Chooses agents for relocation (according to probabilities computed by the probabilities class).
    It includes all jobs that are unplaced. If probabilities is set to None, only unplaced agents are chosen.
    The run method returns indices of the chosen agents.
    """
    model_name = 'Rate Based Model'
    
    def __init__(self,
                 probabilities = "opus_core.upc.rate_based_probabilities",
                 choices = "opus_core.random_choices",
                 model_name = None,
                 debuglevel=0,
                 resources=None
                 ):
        if model_name is not None:
            self.model_name = model_name
        self.debug = DebugPrinter(debuglevel)
        self.upc_sequence = None
        if probabilities is not None:
            self.upc_sequence = UPCFactory().get_model(utilities=None,
                                                       probabilities=probabilities,
                                                       choices=choices,
                                                       debuglevel=debuglevel)
        self.resources = merge_resources_if_not_None(resources)
        
    def run(self, agent_set, 
            resources=None, 
            reset_attribute_value={}):
        self.resources.merge(resources)
        
        if agent_set.size()<=0:
            agent_set.get_id_attribute()
            if agent_set.size()<= 0:
                self.debug.print_debug("Nothing to be done.",2)
                return array([], dtype='int32')

        if self.upc_sequence and (self.upc_sequence.probability_class.rate_set or self.resources.get('rate_set', None)):
            self.resources.merge({agent_set.get_dataset_name():agent_set}) #to be compatible with old-style one-relocation_probabilities-module-per-model
            self.resources.merge({'agent_set':agent_set})
            choices = self.upc_sequence.run(resources=self.resources)
            # choices have value 1 for agents that should be relocated, otherwise 0.
            movers_indices = where(choices>0)[0]
        else:
            movers_indices = array([], dtype='int32')

        if reset_attribute_value and movers_indices.size > 0:
            for key, value in reset_attribute_value.items():
                agent_set.modify_attribute(name=key, 
                                           data=resize(asarray(value), movers_indices.size),
                                           index=movers_indices)            
        
        logger.log_status("Number of agents sampled based on rates: " + str(movers_indices.size))
        return movers_indices

    def prepare_for_run(self, what=None, 
                        rate_dataset_name="rate",
                        rate_storage=None, 
                        rate_table=None, 
                        probability_attribute=None,
                        sample_rates=False, 
                        n=100, 
                        multiplicator=1, 
                        flush_rates=True):
        """
        what - unused, argument kept to be compatible with old code 
        """
        from opus_core.datasets.dataset_factory import DatasetFactory
        from opus_core.session_configuration import SessionConfiguration
        
        if (rate_storage is None) or ((rate_table is None) and (rate_dataset_name is None)):
            return self.resources
        if not rate_dataset_name:
            rate_dataset_name = DatasetFactory().dataset_name_for_table(rate_table)
        
        rates = DatasetFactory().search_for_dataset(rate_dataset_name,
                                                    package_order=SessionConfiguration().package_order,
                                                    arguments={'in_storage':rate_storage, 
                                                               'in_table_name':rate_table,
                                                           }
                                                    )
        if probability_attribute is not None:
            rates.probability_attribute = probability_attribute
        if sample_rates:
            cache_storage=None
            if flush_rates:
                cache_storage=rate_storage
            rates.sample_rates(n=n, cache_storage=cache_storage,
                                multiplicator=multiplicator)
        self.resources.merge({rate_dataset_name:rates}) #to be compatible with old-style one-relocation_probabilities-module-per-model
        self.resources.merge({'rate_set':rates})
        return self.resources
开发者ID:psrc,项目名称:urbansim,代码行数:93,代码来源:rate_based_model.py

示例9: upc_sequence

# 需要导入模块: from opus_core.misc import DebugPrinter [as 别名]
# 或者: from opus_core.misc.DebugPrinter import print_debug [as 别名]
class upc_sequence(object):
    """
        Invokes computation of utilities, probabilities and choices.
    """

    def __init__(self, utility_class=None, probability_class=None, choice_class=None, resources=None, debuglevel=0):
        """utility_class, probability_class, choice_class are objects of the corresponding classes.
            They must have a method 'run'.
        """
        self.utility_class = utility_class
        self.probability_class = probability_class
        self.choice_class = choice_class
        self.resources = resources
        if self.resources == None:
            self.resources = Resources()
        self.utilities = None
        self.probabilities = None
        self.choices = None
        self.debug = DebugPrinter(debuglevel)

    def run(self, data=None, coefficients=None, resources=None):
        local_resources = Resources()
        if resources:
            local_resources.merge(resources)
        last_result = self.compute_utilities(data=data, coefficients=coefficients, resources=local_resources)
        this_result = self.compute_probabilities(resources=local_resources)
        if this_result <> None:
            last_result = this_result
        this_result = self.compute_choices(resources=local_resources)
        if this_result <> None:
            last_result = this_result
        return last_result

    def compute_utilities(self, data=None, coefficients=None, resources=None):
        if self.utility_class is None:
            self.debug.print_debug("No utilities class given.", 10)
            return None
        self.debug.print_debug("compute_utilities ...", 3)
        self.utilities = self.utility_class.run(data, coefficients, resources=resources)
        return self.utilities

    def compute_probabilities(self, resources=None):
        if self.probability_class is None:
            self.debug.print_debug("No probabilities class given.", 10)
            return None
        self.debug.print_debug("compute_probabilities ...", 3)
        self.probabilities = self.probability_class.run(self.utilities, resources=resources)
        return self.probabilities

    def compute_choices(self, resources=None):
        if self.choice_class is None:
            self.debug.print_debug("No choices class given.", 10)
            return None
        self.debug.print_debug("compute_choices ...", 3)
        self.choices = self.choice_class.run(self.probabilities, resources=resources)
        return self.choices

    def get_utilities(self):
        return self.utilities

    def get_probabilities(self):
        return self.probabilities

    def write_probability_sums(self):
        self.probability_class.check_sum(self.probabilities)

    def get_choices(self):
        return self.choices

    def get_choice_histogram(self, min=None, max=None, bins=None):
        """Give an array that represents a histogram of choices."""
        if max == None:
            max = self.choices.max() + 1
        if min == None:
            min = self.choices.min()
        if bins == None:
            bins = max - min
        return histogram(self.get_choices(), min, max, bins)

    def get_probabilities_sum(self):
        """Return probabilities sum along the first axis.
        """
        probs = self.get_probabilities()
        if probs.ndim < 2:
            return probs.sum()
        return reshape(sum(probs, 0), probs.shape[1])

    def plot_choice_histograms(self, capacity, main=""):
        self.plot_histogram(numrows=2)
        self.plot_histogram_with_capacity(capacity)

    def plot_histogram(self, main="", numrows=1, numcols=1, fignum=1):
        """Plot a histogram of choices and probability sums. Expects probabilities as (at least) a 2D array.
        """
        from matplotlib.pylab import bar, xticks, yticks, title, text, axis, figure, subplot

        probabilities = self.get_probabilities()
        if probabilities.ndim < 2:
            raise StandardError, "probabilities must have at least 2 dimensions."
        alts = probabilities.shape[1]
#.........这里部分代码省略.........
开发者ID:apdjustino,项目名称:DRCOG_Urbansim,代码行数:103,代码来源:upc_sequence.py

示例10: BusinessTransitionModel

# 需要导入模块: from opus_core.misc import DebugPrinter [as 别名]
# 或者: from opus_core.misc.DebugPrinter import print_debug [as 别名]
class BusinessTransitionModel(Model):
    """Creates and removes businesses from business_set."""

    model_name = "Business Transition Model"
    location_id_name = "building_id"
    variable_package = "urbansim_parcel"

    def __init__(self, debuglevel=0):
        self.debug = DebugPrinter(debuglevel)

    def run(self, year, business_set,
            control_totals,
            data_objects=None,
            resources=None):
        business_id_name = business_set.get_id_name()[0]
        control_totals.get_attribute("total_number_of_businesses")
        idx = where(control_totals.get_attribute("year")==year)
        sectors = unique(control_totals.get_attribute_by_index("building_use_id", idx))
        max_id = business_set.get_id_attribute().max()
        business_size = business_set.size()
        new_businesses = {self.location_id_name:array([], dtype='int32'),
                          "building_use_id":array([], dtype='int32'),
                          business_id_name:array([], dtype='int32'),
                          "sqft":array([], dtype=int32),
                          "employees":array([], dtype=int32),}
        compute_resources = Resources(data_objects)
#        compute_resources.merge({job_building_types.get_dataset_name():job_building_types, "debug":self.debug})
        business_set.compute_variables(
            map(lambda x: "%s.%s.is_sector_%s"
                    % (self.variable_package, business_set.get_dataset_name(), x),
                sectors),
            resources = compute_resources)
        remove_businesses = array([], dtype='int32')

        for sector in sectors:
            total_businesses = control_totals.get_data_element_by_id((year,sector)).total_number_of_businesses
            is_in_sector = business_set.get_attribute("is_sector_%s" % sector)
            diff = int(total_businesses - is_in_sector.astype(int8).sum())

            if diff < 0: #
                w = where(is_in_sector == 1)[0]
                sample_array, non_placed, size_non_placed = \
                    get_array_without_non_placed_agents(business_set, w, -1*diff,
                                                         self.location_id_name)
                remove_businesses = concatenate((remove_businesses, non_placed,
                                           sample_noreplace(sample_array, max(0,abs(diff)-size_non_placed))))

            if diff > 0: #
                new_businesses[self.location_id_name]=concatenate((new_businesses[self.location_id_name],zeros((diff,), dtype="int32")))
                new_businesses["building_use_id"]=concatenate((new_businesses["building_use_id"],
                                                               sector*ones((diff,), dtype="int32")))

                available_business_index = where(is_in_sector)[0]
                sampled_business = probsample_replace(available_business_index, diff, None)

                new_businesses["sqft"] = concatenate((new_businesses["sqft"],
                                                     business_set.get_attribute("sqft")[sampled_business]))
                new_businesses["employees"] = concatenate((new_businesses["employees"],
                                                           business_set.get_attribute("employees")[sampled_business]))

                new_max_id = max_id+diff
                new_businesses[business_id_name]=concatenate((new_businesses[business_id_name], arange(max_id+1, new_max_id+1)))
                max_id = new_max_id

        business_set.remove_elements(remove_businesses)
        business_set.add_elements(new_businesses, require_all_attributes=False)
        difference = business_set.size()-business_size
        self.debug.print_debug("Difference in number of businesses: %s (original %s,"
            " new %s, created %s, deleted %s)"
                % (difference,
                   business_size,
                   business_set.size(),
                   new_businesses[business_id_name].size,
                   remove_businesses.size),
            3)
        self.debug.print_debug("Number of unplaced businesses: %s"
            % where(business_set.get_attribute(self.location_id_name) <=0)[0].size,
            3)
        return difference

    def prepare_for_run(self, storage, in_table_name, id_name, **kwargs):
        from urbansim.datasets.control_total_dataset import ControlTotalDataset
        control_totals = ControlTotalDataset(in_storage=storage,
                                             in_table_name=in_table_name,
                                             id_name=id_name
                                         )
#        sample_control_totals(storage, control_totals, **kwargs)
        return control_totals
开发者ID:christianurich,项目名称:VIBe2UrbanSim,代码行数:90,代码来源:business_transition_model.py

示例11: DevelopmentEventTransitionModel

# 需要导入模块: from opus_core.misc import DebugPrinter [as 别名]
# 或者: from opus_core.misc.DebugPrinter import print_debug [as 别名]
class DevelopmentEventTransitionModel(Model):
    """From given types of development projects, e.g. 'residential' or 'commercial', create
    development events, one for a gridcell. Only placed projects are considered.
    It returns an object of class DevelopmentEventDataset.
    """
    def __init__(self, resources=None, debuglevel=0):
        self.debug = DebugPrinter(debuglevel)
        self.resources = resources
        self.model_name = "Development Event Transition Model"
        
    def run(self, developments, year=0, landuse_types=None, units=None, resources=None):
#        landuse_types = ['residential', 'commercial', 'industrial', 'governmental']
#        units=['residential_units', 'commercial_sqft','industrial_sqft','governmental_sqft']
        
        if not isinstance(resources, Resources):
            resources = Resources()

        grid_ids_for_project = array([], dtype=int32)
        if developments <> None:
            grid_ids_for_project = developments.get_attribute("grid_id")
        grid_ids_for_project = unique(grid_ids_for_project)
        grid_ids_for_project = grid_ids_for_project[where(grid_ids_for_project>0)]
        
        if len(grid_ids_for_project)==0: return
        sizes = grid_ids_for_project.size
        result_data = {"grid_id": grid_ids_for_project, 
                       "scheduled_year":(year*ones((sizes,), dtype=int16)),
                       "development_type_id": zeros((sizes,),dtype=int16),
                   }
        
        for unit in units:
            result_data[unit] = zeros((sizes,), dtype=int32)
        for project_type in landuse_types:
            result_data["%s_improvement_value" % project_type] = zeros((sizes,), dtype=int32)
            
        grid_idx=0
        for grid_id in grid_ids_for_project:
            w = where(developments.get_attribute('grid_id') == grid_id)[0]
            if w.size>0:
                result_data["development_type_id"][grid_idx] = \
                    developments.get_attribute_by_index("development_type_id", w[0])
                for unit_variable in units:
                    result_data[unit_variable][grid_idx] = \
                        developments.get_attribute_by_index(unit_variable , w).sum()
                    result_data["%s_improvement_value" % unit_variable.split('_')[0]][grid_idx] = \
                        developments.get_attribute_by_index("improvement_value", w).sum()
            grid_idx += 1
            
        storage = StorageFactory().get_storage('dict_storage')

        eventset_table_name = 'eventset'        
        storage.write_table(
                table_name=eventset_table_name,
                table_data=result_data,
            )
        
        eventset = DevelopmentEventDataset(
            in_storage = storage,
            in_table_name = eventset_table_name, 
            id_name=['grid_id', 'scheduled_year'],
            )
            
        self.debug.print_debug('Number of events: ' + str(grid_ids_for_project.size), 3)
        
        return eventset

    def prepare_for_run(self, model_configuration):
        all_types = []
        all_units = []
        for atype in model_configuration['landuse_development_types']:
            all_types.append(atype)
            all_units.append(model_configuration['landuse_development_types'][atype]['units'])
        return  (all_types, all_units)
开发者ID:christianurich,项目名称:VIBe2UrbanSim,代码行数:75,代码来源:development_event_transition_model.py

示例12: HouseholdTransitionModel

# 需要导入模块: from opus_core.misc import DebugPrinter [as 别名]
# 或者: from opus_core.misc.DebugPrinter import print_debug [as 别名]
class HouseholdTransitionModel(Model):
    """Creates and removes households from household_set. New households are duplicated from the existing households, keeping 
       the joint distribution of all characteristics. 
    """

    model_name = "Household Transition Model"

    def __init__(self, location_id_name="grid_id", dataset_pool=None, debuglevel=0):
        self.debug = DebugPrinter(debuglevel)
        self.location_id_name = location_id_name
        self.dataset_pool = self.create_dataset_pool(dataset_pool, ["urbansim", "opus_core"])

    def run(self, year, household_set, control_totals, characteristics, resources=None):
        self._do_initialize_for_run(household_set)
        control_totals.get_attribute("total_number_of_households") # to make sure they are loaded
        self.characteristics = characteristics
        self.all_categories = self.characteristics.get_attribute("characteristic")
        self.all_categories = array(map(lambda x: x.lower(), self.all_categories))
        self.scaled_characteristic_names = get_distinct_names(self.all_categories).tolist()
        self.marginal_characteristic_names = copy(control_totals.get_id_name())
        index_year = self.marginal_characteristic_names.index("year")
        self.marginal_characteristic_names.remove("year")
        idx = where(control_totals.get_attribute("year")==year)[0]
        self.control_totals_for_this_year = DatasetSubset(control_totals, idx)
        self._do_run_for_this_year(household_set)
        return self._update_household_set(household_set)
        
    def _update_household_set(self, household_set):
        index_of_duplicated_hhs = household_set.duplicate_rows(self.mapping_existing_hhs_to_new_hhs)
        household_set.modify_attribute(name=self.location_id_name, data=-1 * ones((index_of_duplicated_hhs.size,), 
                                                                              dtype=household_set.get_data_type(self.location_id_name)),
                                                                          index=index_of_duplicated_hhs)
        household_set.remove_elements(self.remove_households)
        if self.new_households[self.location_id_name].size > 0:
            max_id = household_set.get_id_attribute().max()
            self.new_households[self.household_id_name]=concatenate((self.new_households[self.household_id_name],
                                                             arange(max_id+1, max_id+self.new_households[self.location_id_name].size+1)))
            household_set.add_elements(self.new_households, require_all_attributes=False)

        difference = household_set.size()-self.household_size
        self.debug.print_debug("Difference in number of households: %s"
            " (original %s, new %s, created %s, deleted %s)"
                % (difference,
                   self.household_size,
                   household_set.size(),
                   self.new_households[self.household_id_name].size + self.mapping_existing_hhs_to_new_hhs.size,
                   self.remove_households.size),
            3)
        if self.location_id_name in household_set.get_attribute_names():
            self.debug.print_debug("Number of unplaced households: %s"
                % where(household_set.get_attribute(self.location_id_name) <=0)[0].size,
                3)
        return difference

    def _do_initialize_for_run(self, household_set):
        self.household_id_name = household_set.get_id_name()[0]
        self.new_households = {
           self.location_id_name:array([], dtype=household_set.get_data_type(self.location_id_name, int32)),
           self.household_id_name:array([], dtype=household_set.get_data_type(self.household_id_name, int32))
                   }
        self.remove_households = array([], dtype='int32')
        self.household_size = household_set.size()
        self.max_id = household_set.get_id_attribute().max()
        self.arrays_from_categories = {}
        self.arrays_from_categories_mapping = {}
        self.mapping_existing_hhs_to_new_hhs = array([], dtype=household_set.get_data_type(self.household_id_name, int32))
        
    def _do_run_for_this_year(self, household_set):
        self.household_set = household_set
        groups = self.control_totals_for_this_year.get_id_attribute()
        self.create_arrays_from_categories(self.household_set)

        all_characteristics = self.arrays_from_categories.keys()
        self.household_set.load_dataset_if_not_loaded(attributes = all_characteristics) # prevents from lazy loading to save runtime
        idx_shape = []
        number_of_combinations=1
        num_attributes=len(all_characteristics)
        for iattr in range(num_attributes):
            attr = all_characteristics[iattr]
            max_bins = self.arrays_from_categories[attr].max()+1
            idx_shape.append(max_bins)
            number_of_combinations=number_of_combinations*max_bins
            if attr not in self.new_households.keys():
                self.new_households[attr] = array([], dtype=self.household_set.get_data_type(attr, float32))

        self.number_of_combinations = int(number_of_combinations)
        idx_tmp = indices(tuple(idx_shape))
        
        categories_index = zeros((self.number_of_combinations,num_attributes))

        for i in range(num_attributes): #create indices of all combinations
            categories_index[:,i] = idx_tmp[i].ravel()

        categories_index_mapping = {}
        for i in range(self.number_of_combinations):
            categories_index_mapping[tuple(categories_index[i,].tolist())] = i

        def get_category(values):
            bins = map(lambda x, y: self.arrays_from_categories[x][int(y)], all_characteristics, values)
            try:
#.........这里部分代码省略.........
开发者ID:psrc,项目名称:urbansim,代码行数:103,代码来源:household_transition_model.py

示例13: create_from_parcel_and_development_template

# 需要导入模块: from opus_core.misc import DebugPrinter [as 别名]
# 或者: from opus_core.misc.DebugPrinter import print_debug [as 别名]

#.........这里部分代码省略.........
                                                                          in_storage=storage,
                                                                          in_table_name='development_project_proposals',
                                                                          )
        return development_project_proposals
    
    def _compute_filter(proposals):
        if filter_attribute is not None:
            proposals.compute_variables(filter_attribute, dataset_pool=dataset_pool,
                                                          resources=Resources(resources))
            filter_index = where(proposals.get_attribute(filter_attribute) > 0)[0]
            return filter_index
        return None
    
    def _subset_by_filter(proposals):
        filter_index = _compute_filter(proposals)
        if filter_index is not None:
            proposals.subset_by_index(filter_index, flush_attributes_if_not_loaded=False)
        return proposals


    if parcel_index is not None:
        index1 = parcel_index
    else:
        index1 = arange(parcel_dataset.size())

    if template_index is not None:
        index2 = template_index
    else:
        index2 = arange(development_template_dataset.size())

    has_constraint_dataset = True
    try:
        constraints = dataset_pool.get_dataset("development_constraint") 
        constraints.load_dataset_if_not_loaded()
    except:
        has_constraint_dataset = False

    if has_constraint_dataset:
        constraint_types = unique(constraints.get_attribute("constraint_type"))  #unit_per_acre, far etc
        development_template_dataset.compute_variables(map(lambda x: "%s.%s" % (template_opus_path, x), constraint_types), dataset_pool)
            
        parcel_dataset.get_development_constraints(constraints, dataset_pool, 
                                                   index=index1, 
                                                   consider_constraints_as_rules=consider_constraints_as_rules)
        generic_land_use_type_ids = development_template_dataset.compute_variables("urbansim_parcel.development_template.generic_land_use_type_id",
                                                       dataset_pool=dataset_pool)
    parcel_ids = parcel_dataset.get_id_attribute()
    template_ids = development_template_dataset.get_id_attribute()
    
    proposal_parcel_ids = array([],dtype="int32")
    proposal_template_ids = array([],dtype="int32")
    logger.start_block("Combine parcels, templates and constraints")
    for i_template in index2:
        this_template_id = template_ids[i_template]
        fit_indicator = ones(index1.size, dtype="bool8")
        if has_constraint_dataset:
            generic_land_use_type_id = generic_land_use_type_ids[i_template]
            for constraint_type, constraint in parcel_dataset.development_constraints[generic_land_use_type_id].iteritems():
                template_attribute = development_template_dataset.get_attribute(constraint_type)[i_template]  #density converted to constraint variable name
                if template_attribute == 0:
                    continue
                min_constraint = constraint[:, 0].copy()
                max_constraint = constraint[:, 1].copy()
                ## treat -1 as unconstrainted
                w_unconstr = min_constraint == -1
                if w_unconstr.any():
                    min_constraint[w_unconstr] = template_attribute
                
                w_unconstr = max_constraint == -1
                if w_unconstr.any():
                    max_constraint[w_unconstr] = template_attribute

                fit_indicator = logical_and(fit_indicator, 
                                            logical_and(template_attribute >= min_constraint,
                                                        template_attribute <= max_constraint))
                

                if constraint_type == "units_per_acre":
                    res_units_capacity = parcel_dataset.get_attribute("parcel_sqft")[index1] * max_constraint / 43560.0 
                    debug.print_debug("template_id %s (GLU ID %s) max total residential capacity %s, %s of them fit constraints " % (this_template_id, generic_land_use_type_id, res_units_capacity.sum(), (res_units_capacity * fit_indicator).sum() ), 12)
                else:
                    non_res_capacity = parcel_dataset.get_attribute("parcel_sqft")[index1] * max_constraint
                    debug.print_debug("template_id %s (GLU ID %s) max total non residential capacity %s, %s of them fit constraints " % (this_template_id, generic_land_use_type_id, non_res_capacity.sum(), (non_res_capacity * fit_indicator).sum() ), 12)
                
        proposal_parcel_ids = concatenate((proposal_parcel_ids, parcel_ids[index1[fit_indicator]]))
        proposal_template_ids = concatenate( (proposal_template_ids, resize(array([this_template_id]), fit_indicator.sum())))
        
    logger.end_block()
    proposals = _create_project_proposals(proposal_parcel_ids, proposal_template_ids)
    proposals = _subset_by_filter(proposals)

    # eliminate proposals with zero units_proposed
    units_proposed = proposals.compute_variables([proposed_units_variable], dataset_pool = dataset_pool)
    where_up_greater_zero = where(units_proposed > 0)[0]
    if where_up_greater_zero.size > 0:
        proposals.subset_by_index(where_up_greater_zero, flush_attributes_if_not_loaded=False)
    
    logger.log_status("proposal set created with %s proposals." % proposals.size())
    #proposals.flush_dataset_if_low_memory_mode()
    return proposals
开发者ID:christianurich,项目名称:VIBe2UrbanSim,代码行数:104,代码来源:development_project_proposal_dataset.py

示例14: RegressionModel

# 需要导入模块: from opus_core.misc import DebugPrinter [as 别名]
# 或者: from opus_core.misc.DebugPrinter import print_debug [as 别名]
class RegressionModel(ChunkModel):

    model_name = "Regression Model"
    model_short_name = "RM"

    def __init__(self, regression_procedure="opus_core.linear_regression",
                  submodel_string=None,
                  run_config=None, estimate_config=None, debuglevel=0, dataset_pool=None):
 
        self.debug = DebugPrinter(debuglevel)

        self.dataset_pool = self.create_dataset_pool(dataset_pool)

        self.regression = RegressionModelFactory().get_model(name=regression_procedure)
        if self.regression == None:
            raise StandardError, "No regression procedure given."

        self.submodel_string = submodel_string

        self.run_config = run_config
        if self.run_config == None:
            self.run_config = Resources()
        if not isinstance(self.run_config,Resources) and isinstance(self.run_config, dict):
            self.run_config = Resources(self.run_config)

        self.estimate_config = estimate_config
        if self.estimate_config == None:
            self.estimate_config = Resources()
        if not isinstance(self.estimate_config,Resources) and isinstance(self.estimate_config, dict):
            self.estimate_config = Resources(self.estimate_config)
            
        self.data = {}
        self.coefficient_names = {}
        ChunkModel.__init__(self)
        self.get_status_for_gui().initialize_pieces(3, pieces_description = array(['initialization', 'computing variables', 'submodel: 1']))

    def run(self, specification, coefficients, dataset, index=None, chunk_specification=None,
            data_objects=None, run_config=None, initial_values=None, procedure=None, debuglevel=0):
        """'specification' is of type EquationSpecification,
            'coefficients' is of type Coefficients,
            'dataset' is of type Dataset,
            'index' are indices of individuals in dataset for which
                        the model runs. If it is None, the whole dataset is considered.
            'chunk_specification' determines  number of chunks in which the simulation is processed.
            'data_objects' is a dictionary where each key is the name of an data object
            ('zone', ...) and its value is an object of class  Dataset.
           'run_config' is of type Resources, it gives additional arguments for the run.
           If 'procedure' is given, it overwrites the regression_procedure of the constructor.
           'initial_values' is an array of the initial values of the results. It will be overwritten
           by the results for those elements that are handled by the model (defined by submodels in the specification).
           By default the results are initialized with 0.
            'debuglevel' overwrites the constructor 'debuglevel'.
        """
        self.debug.flag = debuglevel
        if run_config == None:
            run_config = Resources()
        if not isinstance(run_config,Resources) and isinstance(run_config, dict):
            run_config = Resources(run_config)
        self.run_config = run_config.merge_with_defaults(self.run_config)
        self.run_config.merge({"debug":self.debug})
        if data_objects is not None:
            self.dataset_pool.add_datasets_if_not_included(data_objects)
        self.dataset_pool.replace_dataset(dataset.get_dataset_name(), dataset)
        if procedure is not None: 
            self.regression = RegressionModelFactory().get_model(name=procedure)
        if initial_values is None:
            self.initial_values = zeros((dataset.size(),), dtype=float32)
        else:
            self.initial_values = zeros((dataset.size(),), dtype=initial_values.dtype)
            self.initial_values[index] = initial_values
            
        if dataset.size()<=0: # no data loaded yet
            dataset.get_id_attribute()
        if index == None:
            index = arange(dataset.size())
            
        result = ChunkModel.run(self, chunk_specification, dataset, index, float32,
                                 specification=specification, coefficients=coefficients)
        return result

    def run_chunk (self, index, dataset, specification, coefficients):
        self.specified_coefficients = SpecifiedCoefficients().create(coefficients, specification, neqs=1)
        compute_resources = Resources({"debug":self.debug})
        submodels = self.specified_coefficients.get_submodels()
        self.get_status_for_gui().update_pieces_using_submodels(submodels=submodels, leave_pieces=2)
        self.map_agents_to_submodels(submodels, self.submodel_string, dataset, index,
                                      dataset_pool=self.dataset_pool, resources = compute_resources)
        variables = self.specified_coefficients.get_full_variable_names_without_constants()
        self.debug.print_debug("Compute variables ...",4)
        self.increment_current_status_piece()
        dataset.compute_variables(variables, dataset_pool = self.dataset_pool, resources = compute_resources)
        data = {}
        coef = {}
        outcome=self.initial_values[index].copy()
        for submodel in submodels:
            coef[submodel] = SpecifiedCoefficientsFor1Submodel(self.specified_coefficients,submodel)
            self.coefficient_names[submodel] = coef[submodel].get_coefficient_names_without_constant()[0,:]
            self.debug.print_debug("Compute regression for submodel " +str(submodel),4)
            self.increment_current_status_piece()
            self.data[submodel] = dataset.create_regression_data(coef[submodel],
#.........这里部分代码省略.........
开发者ID:apdjustino,项目名称:DRCOG_Urbansim,代码行数:103,代码来源:regression_model.py

示例15: BusinessTransitionModel

# 需要导入模块: from opus_core.misc import DebugPrinter [as 别名]
# 或者: from opus_core.misc.DebugPrinter import print_debug [as 别名]
class BusinessTransitionModel(Model):
    """Creates and removes businesses from business_set."""
    
    model_name = "Business Transition Model"
    location_id_name = "building_id"
    variable_package = "sanfrancisco"
    
    def __init__(self, debuglevel=0):
        self.debug = DebugPrinter(debuglevel)
        
    def run(self, year, business_set, 
            control_totals, 
            dataset_pool=None, 
            resources=None):
        self.business_id_name = business_set.get_id_name()[0]
        control_for_businesses = False # If this is False, it is controlled for jobs
        if "total_number_of_businesses" in control_totals.get_known_attribute_names():
            control_for_businesses = True
            control_totals.get_attribute("total_number_of_businesses")
        else:
            control_totals.get_attribute("total_number_of_jobs")
        idx = where(control_totals.get_attribute("year")==year)
        sectors = unique(control_totals.get_attribute_by_index("sector_id", idx))
        self.max_id = business_set.get_id_attribute().max()
        business_size = business_set.size()
        self.new_businesses = {self.location_id_name:array([], dtype='int32'), 
                          "sector_id":array([], dtype='int32'),
                          self.business_id_name:array([], dtype='int32'), 
                          "sqft":array([], dtype=int32),
                          "employment":array([], dtype='int32'),
                          "activity_id":array([], dtype='int32')}

        business_set.compute_variables(
            map(lambda x: "%s.%s.is_of_sector_%s" 
                    % (self.variable_package, business_set.get_dataset_name(), x), sectors), 
                dataset_pool=dataset_pool, resources = resources)
        self.remove_businesses = array([], dtype='int32')
            
        for sector in sectors:
            b_is_in_sector = business_set.get_attribute("is_of_sector_%s" % sector)
            if control_for_businesses:
                total_businesses = control_totals.get_data_element_by_id((year,sector)).total_number_of_businesses
                diff = int(total_businesses - b_is_in_sector.astype(int8).sum())
                self._do_sector_for_businesses(sector, diff, business_set, b_is_in_sector)
            else:
                total_jobs = control_totals.get_data_element_by_id((year,sector)).total_number_of_jobs
                diff = int(total_jobs - business_set.get_attribute_by_index("employment", b_is_in_sector).sum())
                self._do_sector_for_jobs(sector, diff, business_set, b_is_in_sector)
             
        business_set.remove_elements(self.remove_businesses)
        business_set.add_elements(self.new_businesses, require_all_attributes=False)
        difference = business_set.size()-business_size
        self.debug.print_debug("Difference in number of businesses: %s (original %s,"
            " new %s, created %s, deleted %s)" 
                % (difference, 
                   business_size, 
                   business_set.size(), 
                   self.new_businesses[self.business_id_name].size, 
                   self.remove_businesses.size), 
            3)
        self.debug.print_debug("Number of unplaced businesses: %s" 
            % where(business_set.get_attribute(self.location_id_name) <=0)[0].size, 
            3)
        return difference
    
    def _do_sector_for_businesses(self, sector, diff, business_set, is_in_sector):
        available_business_index = where(is_in_sector)[0]
        if diff < 0: #
            sample_array, non_placed, size_non_placed = \
                get_array_without_non_placed_agents(business_set, available_business_index, -1*diff, 
                                                     self.location_id_name)
            self.remove_businesses = concatenate((self.remove_businesses, non_placed, 
                                       sample_noreplace(sample_array, max(0,abs(diff)-size_non_placed))))
                            
        if diff > 0: #
            self.new_businesses[self.location_id_name]=concatenate((self.new_businesses[self.location_id_name],zeros((diff,))))
            self.new_businesses["sector_id"]=concatenate((self.new_businesses["sector_id"], sector*ones((diff,))))
            sampled_business = probsample_replace(available_business_index, diff, None)
            self.new_businesses["sqft"] = concatenate((self.new_businesses["sqft"],
                                                 business_set.get_attribute("sqft")[sampled_business]))
            self.new_businesses["employment"] = concatenate((self.new_businesses["employment"],
                                                       business_set.get_attribute("employment")[sampled_business]))
            self.new_businesses["activity_id"] = concatenate((self.new_businesses["activity_id"],
                                                       business_set.get_attribute("activity_id")[sampled_business]))
            
            new_max_id = self.max_id+diff
            self.new_businesses[self.business_id_name]=concatenate((self.new_businesses[self.business_id_name], 
                                                                    arange(self.max_id+1, new_max_id+1)))
            self.max_id = new_max_id
                
    def _do_sector_for_jobs(self, sector, diff, business_set, b_is_in_sector):
        # diff is a difference in jobs (not businesses)
        employment = business_set.get_attribute('employment')
        available_business_index = where(b_is_in_sector)[0]
        if diff < 0: #
            placed, non_placed, size_non_placed = \
                get_array_without_non_placed_agents(business_set, available_business_index, -1*available_business_index.size, 
                                                     self.location_id_name)
            consider_for_removing = concatenate((permutation(non_placed), permutation(placed)))
            empl_cumsum = cumsum(employment[consider_for_removing])
#.........这里部分代码省略.........
开发者ID:christianurich,项目名称:VIBe2UrbanSim,代码行数:103,代码来源:business_transition_model.py


注:本文中的opus_core.misc.DebugPrinter.print_debug方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。