当前位置: 首页>>代码示例>>Python>>正文


Python SessionConfiguration.compute_variables方法代码示例

本文整理汇总了Python中opus_core.session_configuration.SessionConfiguration.compute_variables方法的典型用法代码示例。如果您正苦于以下问题:Python SessionConfiguration.compute_variables方法的具体用法?Python SessionConfiguration.compute_variables怎么用?Python SessionConfiguration.compute_variables使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在opus_core.session_configuration.SessionConfiguration的用法示例。


在下文中一共展示了SessionConfiguration.compute_variables方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: run

# 需要导入模块: from opus_core.session_configuration import SessionConfiguration [as 别名]
# 或者: from opus_core.session_configuration.SessionConfiguration import compute_variables [as 别名]
 def run(self, year, condition=None, max_iter=10):
     """
     'year' is the current year of the simulation.
     'condition' should be a boolean expression defined on any dataset.
     The method iterates over the given models until all values of the expression are True. 
     'max_iter' gives the maximum number of iterations to run, if 'condition' is not fulfilled.
     If it is None, there is no limit and thus, the condition must be fulfilled in order to terminate.
     If 'condition' is None, the set of models is run only once.
     """
     self.config['years'] = (year, year)
     if condition is None:
         return self.model_system.run_in_same_process(self.config)
     dataset_pool = SessionConfiguration().get_dataset_pool()
     variable_name = VariableName(condition)
     dataset = dataset_pool.get_dataset(variable_name.get_dataset_name())
     condition_value = dataset.compute_variables(variable_name, dataset_pool=dataset_pool)
     result = None
     iter = 1
     while not alltrue(condition_value):
         result = self.model_system.run_in_same_process(self.config)
         if max_iter is None or iter > max_iter:
             break
         iter = iter + 1
         # force to recompute the condition
         dataset = SessionConfiguration().get_dataset_pool().get_dataset(variable_name.get_dataset_name())
         dataset.delete_computed_attributes()
         condition_value = dataset.compute_variables(variable_name, 
                                                     dataset_pool=SessionConfiguration().get_dataset_pool())
     if not alltrue(condition_value):
         logger.log_status('%s did not converge. Maximum number of iterations (%s) reached.' % (self.model_name, max_iter))
     else:
         logger.log_status('%s converged in %s iterations.' % (self.model_name, iter-1))  
     return result
开发者ID:apdjustino,项目名称:DRCOG_Urbansim,代码行数:35,代码来源:iterative_meta_model.py

示例2: _test_generate_results

# 需要导入模块: from opus_core.session_configuration import SessionConfiguration [as 别名]
# 或者: from opus_core.session_configuration.SessionConfiguration import compute_variables [as 别名]
    def _test_generate_results(self, indicator_name, dataset_name, expression, source):

        # grab the first base_year_data in results_manager/simulation_runs and
        # fetch the year for it
        base_year = self.project.find("results_manager/simulation_runs/run[@name='base_year_data']/end_year")
        if base_year is None:
            return False, "Project doesn't have any base year data to check against"

        start_year = int(base_year.text)
        result_generator = OpusResultGenerator(self.project)
        result_generator.set_data(
               source_data_name = 'base_year_data',
               indicator_name = indicator_name,
               dataset_name = dataset_name,
               years = [start_year,],
               indicator_definition = (expression, source))

        interface = IndicatorFrameworkInterface(self.project)
        src_data = interface.get_source_data(source_data_name = 'base_year_data', years = [start_year,])
        SimulationState().set_current_time(start_year)
        SimulationState().set_cache_directory(src_data.cache_directory)
        SessionConfiguration(
            new_instance = True,
            package_order = src_data.dataset_pool_configuration.package_order,
            in_storage = AttributeCache())


        dataset = SessionConfiguration().get_dataset_from_pool(dataset_name)
        if isinstance(dataset,InteractionDataset):
            #create a subset if its an interaction dataset...
            dataset_arguments = {
                 'index1':numpy.random.randint(0,dataset.dataset1.size(), size=100),
                 'index2':numpy.random.randint(0,dataset.dataset2.size(), size=100)
            }
            SessionConfiguration().delete_datasets()
            dataset = SessionConfiguration().get_dataset_from_pool(dataset_name,
                                                                   dataset_arguments = dataset_arguments)

        try:
            dataset.compute_variables(names = [expression])
            return True, None
        except Exception, e:
            type, value, tb = sys.exc_info()
            stack_dump = ''.join(traceback.format_exception(type, value, tb))
            errors = "{}\n\n{}".format(e, stack_dump)
            return False, errors
开发者ID:janowicz,项目名称:urbansim_drcog,代码行数:48,代码来源:variable_validator.py

示例3: visualize

# 需要导入模块: from opus_core.session_configuration import SessionConfiguration [as 别名]
# 或者: from opus_core.session_configuration.SessionConfiguration import compute_variables [as 别名]
    def visualize(self, 
                  indicators_to_visualize,
                  computed_indicators):
        """Create a map for the given indicator, save it to the cache
        directory's 'indicators' sub-directory."""
        
        #TODO: eliminate this example indicator stuff
        example_indicator = computed_indicators[indicators_to_visualize[0]]
        source_data = example_indicator.source_data        
        dataset_to_attribute_map = {}
        
        package_order = source_data.get_package_order()
            
            
        self._create_input_stores(years = source_data.years)

        for name, computed_indicator in computed_indicators.items():
            if name not in indicators_to_visualize: continue
            
            if computed_indicator.source_data != source_data:
                raise Exception('result templates in indicator batch must all be the same.')
            dataset_name = computed_indicator.indicator.dataset_name
            if dataset_name == 'parcel':
                raise Exception('Cannot create a Matplotlib map for parcel dataset. Please plot at a higher geographic aggregation')
            if dataset_name not in dataset_to_attribute_map:
                dataset_to_attribute_map[dataset_name] = []
            dataset_to_attribute_map[dataset_name].append(name)
        
        viz_metadata = []
        for dataset_name, indicator_names in dataset_to_attribute_map.items():  
            attributes = [(name,computed_indicators[name].get_computed_dataset_column_name())
                          for name in indicator_names]                  
            for year in source_data.years:
                SessionConfiguration(
                    new_instance = True,
                    package_order = package_order,
                    in_storage = AttributeCache()) 
                SimulationState().set_cache_directory(source_data.cache_directory)
                SimulationState().set_current_time(year)
                dataset = SessionConfiguration().get_dataset_from_pool(dataset_name)
                dataset.load_dataset()

                if dataset.get_coordinate_system() is not None:
                    dataset.compute_variables(names = dataset.get_coordinate_system())
                
                for indicator_name, computed_name in attributes:
                        
                    indicator = computed_indicators[indicator_name]
                    
                    table_data = self.input_stores[year].load_table(
                        table_name = dataset_name,
                        column_names = [computed_name])

                    if computed_name in table_data:

                        table_name = self.get_name(
                            dataset_name = dataset_name,
                            years = [year],
                            attribute_names = [indicator_name])
                        
                        if self.scale: 
                            min_value, max_value = self.scale
                        else:
                            min_value, max_value = (None, None)
                        
                        file_path = os.path.join(self.storage_location,
                                             table_name+ '.' + self.get_file_extension())
                        
                        dataset.add_attribute(name = str(computed_name), 
                                              data = table_data[computed_name])
                        
                        if not os.path.exists(file_path):
                            dataset.plot_map(
                                 name = str(computed_name),
                                 min_value = min_value, 
                                 max_value = max_value, 
                                 file = str(file_path), 
                                 my_title = str(indicator_name), 
                                 #filter = where(table_data[computed_name] != -1)
                                 #filter = 'urbansim.gridcell.is_fully_in_water'                                 
                            )
                        
        #                    self.plot_map(dataset = dataset, 
        #                                  attribute_data = table_data[computed_name], 
        #                                  min_value = min_value, 
        #                                  max_value = max_value, 
        #                                  file = file_path, 
        #                                  my_title = indicator_name, 
        #                                  filter = where(table_data[computed_name] != -1))
        
                        metadata = ([indicator_name], table_name, [year])
                        viz_metadata.append(metadata)
                    else:
                        logger.log_warning('There is no computed indicator %s'%computed_name)
                
        visualization_representations = []
        for indicator_names, table_name, years in viz_metadata:
            visualization_representations.append(
                self._get_visualization_metadata(
                    computed_indicators = computed_indicators,
#.........这里部分代码省略.........
开发者ID:apdjustino,项目名称:DRCOG_Urbansim,代码行数:103,代码来源:matplotlib_map.py

示例4: visualize

# 需要导入模块: from opus_core.session_configuration import SessionConfiguration [as 别名]
# 或者: from opus_core.session_configuration.SessionConfiguration import compute_variables [as 别名]
    def visualize(self, indicators_to_visualize, computed_indicators):
        """Create a map for the given indicator, save it to the cache
        directory's 'indicators' sub-directory."""

        # TODO: eliminate this example indicator stuff
        example_indicator = computed_indicators[indicators_to_visualize[0]]
        source_data = example_indicator.source_data
        dataset_to_attribute_map = {}

        package_order = source_data.get_package_order()

        self._create_input_stores(years=source_data.years)

        for name, computed_indicator in computed_indicators.items():
            if name not in indicators_to_visualize:
                continue

            if computed_indicator.source_data != source_data:
                raise Exception("result templates in indicator batch must all be the same.")
            dataset_name = computed_indicator.indicator.dataset_name
            if dataset_name not in dataset_to_attribute_map:
                dataset_to_attribute_map[dataset_name] = []
            dataset_to_attribute_map[dataset_name].append(name)

        viz_metadata = []
        for dataset_name, indicator_names in dataset_to_attribute_map.items():
            attributes = [
                (name, computed_indicators[name].get_computed_dataset_column_name()) for name in indicator_names
            ]

            for year in source_data.years:
                SessionConfiguration(new_instance=True, package_order=package_order, in_storage=AttributeCache())
                SimulationState().set_cache_directory(source_data.cache_directory)
                SimulationState().set_current_time(year)
                dataset = SessionConfiguration().get_dataset_from_pool(dataset_name)
                dataset.load_dataset()

                if dataset.get_coordinate_system() is not None:
                    dataset.compute_variables(names=dataset.get_coordinate_system())

                for indicator_name, computed_name in attributes:

                    indicator = computed_indicators[indicator_name]

                    table_data = self.input_stores[year].load_table(
                        table_name=dataset_name, column_names=[computed_name]
                    )

                    if computed_name in table_data:

                        table_name = self.get_name(
                            dataset_name=dataset_name, years=[year], attribute_names=[indicator_name]
                        )

                        if self.scale:
                            min_value, max_value = self.scale
                        else:
                            min_value, max_value = (None, None)

                        file_path = os.path.join(
                            self.storage_location, "anim_" + table_name + "." + MapnikMap.get_file_extension(self)
                        )

                        dataset.add_attribute(name=str(computed_name), data=table_data[computed_name])

                        dataset.plot_map(
                            name=str(computed_name),
                            min_value=min_value,
                            max_value=max_value,
                            file=str(file_path),
                            my_title=str(indicator_name),
                            color_list=self.color_list,
                            range_list=self.range_list,
                            label_list=self.label_list,
                            is_animation=True,
                            year=year,
                            resolution=self.resolution,
                            page_dims=self.page_dims,
                            map_lower_left=self.map_lower_left,
                            map_upper_right=self.map_upper_right,
                            legend_lower_left=self.legend_lower_left,
                            legend_upper_right=self.legend_upper_right
                            # filter = where(table_data[computed_name] != -1)
                            # filter = 'urbansim.gridcell.is_fully_in_water'
                        )

                        # metadata = ([indicator_name], table_name, [year])
                        # viz_metadata.append(metadata)
                    else:
                        logger.log_warning("There is no computed indicator %s" % computed_name)

            for indicator_name, computed_name in attributes:
                self.create_animation(
                    dataset_name=dataset_name,
                    year_list=source_data.years,
                    indicator_name=str(indicator_name),
                    viz_metadata=viz_metadata,
                )

        visualization_representations = []
#.........这里部分代码省略.........
开发者ID:psrc,项目名称:urbansim,代码行数:103,代码来源:mapnik_animated_map.py


注:本文中的opus_core.session_configuration.SessionConfiguration.compute_variables方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。