当前位置: 首页>>代码示例>>Python>>正文


Python SessionConfiguration._remove_dataset方法代码示例

本文整理汇总了Python中opus_core.session_configuration.SessionConfiguration._remove_dataset方法的典型用法代码示例。如果您正苦于以下问题:Python SessionConfiguration._remove_dataset方法的具体用法?Python SessionConfiguration._remove_dataset怎么用?Python SessionConfiguration._remove_dataset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在opus_core.session_configuration.SessionConfiguration的用法示例。


在下文中一共展示了SessionConfiguration._remove_dataset方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: run

# 需要导入模块: from opus_core.session_configuration import SessionConfiguration [as 别名]
# 或者: from opus_core.session_configuration.SessionConfiguration import _remove_dataset [as 别名]
    def run(self, config, year, storage_type='sql'):
        """ 
        """
        
        tm_config = config['travel_model_configuration']
        database_server_config = tm_config.get("database_server_configuration", 'simtravel_database_server')
        database_name = tm_config.get("database_name", 'mag_zone_baseyear')
        
        cache_directory = config['cache_directory']
        simulation_state = SimulationState()
        simulation_state.set_cache_directory(cache_directory)
        simulation_state.set_current_time(year)
        attribute_cache = AttributeCache()
        dataset_pool = SessionConfiguration(new_instance=True,
                                            package_order=config['dataset_pool_configuration'].package_order,
                                            in_storage=attribute_cache).get_dataset_pool()

        if storage_type == 'sql':
            db_server = DatabaseServer(DatabaseConfiguration(
                                                             database_name = database_name,
                                                             database_configuration = database_server_config
                                                             )
                                                             )
            if not db_server.has_database(database_name): 
                print "Db doesn't exist creating one"
                db_server.create_database(database_name)
            db = db_server.get_database(database_name) 
            output_storage = sql_storage(storage_location = db)
        elif storage_type == 'csv':
            csv_directory = os.path.join(cache_directory, 'csv', str(year))
            output_storage = csv_storage(storage_location=csv_directory)
        else:
            raise ValueError, "Unsupported output storage type {}".format(storage_type)
                                                            
        logger.start_block('Compute and export data to openAMOS...')

        hh = dataset_pool.get_dataset('household')
        hh_recs = dataset_pool.get_dataset('households_recs')
        #hh_recs.add_attribute(0,"htaz1")
        #hh_recs.flush_dataset()
        #syn_hh = dataset_pool.get_dataset('synthetic_household')

        hh_variables = ['houseid=household.household_id',
                        "hhsize=household.number_of_agents(person)",
                        "one=(household.household_id>0).astype('i')",
                        "inclt35k=(household.income<35000).astype('i')",
                        "incge35k=(household.income>=35000).astype('i')",
                        "incge50k=(household.income>=50000).astype('i')",
                        "incge75k=(household.income>=75000).astype('i')",
                        "incge100k=(household.income>=100000).astype('i')",
                        "inc35t50=((household.income>=35000) & (household.income<50000)).astype('i')",
                        "inc50t75=((household.income>=50000) & (household.income<75000)).astype('i')",
                        "inc75t100=((household.income>=75000) & (household.income<100000)).astype('i')",
                        'htaz = ((houseid>0)*(household.disaggregate(building.zone_id)-100) + (houseid<=0)*0)',
                        #'htaz = ((houseid>0) & (htaz1>100))*(htaz1-100)+((houseid>0) & (htaz1==-1))*1122',
                        "withchild = (household.aggregate(person.age<18)>0).astype('i')",
                        "noc = household.aggregate(person.age<18)",
                        "numadlt = household.aggregate(person.age>=18)",
                        "hinc=household.income",
                        "wif=household.workers",
                        #"wif=household.aggregate(mag_zone.person.is_employed)",
                        'numwrkr=household.workers',
                        #'numwrkr=household.aggregate(mag_zone.person.is_employed)',
                        'nwrkcnt=household.number_of_agents(person) - household.workers',
                        #'nwrkcnt=household.number_of_agents(person) - household.aggregate(mag_zone.person.is_employed)',

                        'yrbuilt=mag_zone.household.yrbuilt',
                        'mag_zone.household.sparent',
                        'mag_zone.household.rur',
                        'mag_zone.household.urb',
                        'zonetid = household.disaggregate(building.zone_id)',
                        ]
        
        self.prepare_attributes(hh, hh_variables)
        attrs_to_export = hh_recs.get_known_attribute_names()
       
        self.write_dataset(hh, attrs_to_export, output_storage)
        dataset_pool._remove_dataset(hh.dataset_name)

        persons = dataset_pool.get_dataset('person')
        persons.out_table_name_default = 'persons'

        # Recoding invalid work and school locations to some random valid values
        persons_recs = dataset_pool.get_dataset('persons_recs')
        persons_recs.add_attribute(persons['person_id'],"personuniqueid")
        persons_recs.add_attribute(persons['marriage_status'],"marstat")
        persons_recs.add_attribute(persons['student_status'],"schstat")


        """
        persons_recs.add_attribute(persons['wtaz0'],"htaz_act")
        persons_recs.add_attribute(0,"wtaz_rec")
        persons_recs.add_attribute(0,"wtaz_rec1")
        persons_recs.add_attribute(0,"wtaz_rec2")

        persons_recs.add_attribute(0,"wtaz1_1")
        persons_recs.add_attribute(0,"wtaz1_2")
        persons_recs.add_attribute(0,"wtaz1_3")
        #persons_recs.add_attribute(persons['student_status'],"schstat")
        """
#.........这里部分代码省略.........
开发者ID:,项目名称:,代码行数:103,代码来源:

示例2: post_run

# 需要导入模块: from opus_core.session_configuration import SessionConfiguration [as 别名]
# 或者: from opus_core.session_configuration.SessionConfiguration import _remove_dataset [as 别名]
 def post_run(self):
   dataset_pool = SessionConfiguration().get_dataset_pool()
   dataset_pool._remove_dataset('price_shifter')
   dataset_pool._remove_dataset('cost_shifter')
开发者ID:,项目名称:,代码行数:6,代码来源:

示例3: range

# 需要导入模块: from opus_core.session_configuration import SessionConfiguration [as 别名]
# 或者: from opus_core.session_configuration.SessionConfiguration import _remove_dataset [as 别名]
    
    if refinements is None:
        refinements = dataset_pool.get_dataset('refinement')
        years = refinements.get_attribute('year')
        if start_year is None: start_year = years.min()
        if end_year is None: end_year = years.max()

    for year in range(start_year, end_year+1):
        logger.start_block("Doing refinement for %s" % year )
        simulation_state.set_current_time(year)
        
        ## reload refinements, from original refinement_directory or dataset_pool, in case it's been changed by refinement model
        if refinements_storage is not None:
            refinements = DatasetFactory().search_for_dataset('refinement', package_order, arguments={'in_storage':refinements_storage})
        else:
            refinements = dataset_pool.get_dataset('refinement')
            
        if options.backup:
            src_dir = os.path.join(options.cache_directory, str(year))
            dst_dir = os.path.join(options.cache_directory, 'backup', str(year))
            if os.path.exists(src_dir):
                logger.log_status("Backing up %s to %s" % (src_dir, dst_dir))
                copytree(src_dir, dst_dir)
        RefinementModel().run(refinements, current_year=year, dataset_pool=dataset_pool)
        if dataset_pool.has_dataset('refinement'):
            #avoid caching refinements
            dataset_pool._remove_dataset('refinement')  
        dataset_pool.flush_loaded_datasets()
        dataset_pool.remove_all_datasets()
        logger.end_block()
开发者ID:psrc,项目名称:urbansim,代码行数:32,代码来源:do_refinement.py


注:本文中的opus_core.session_configuration.SessionConfiguration._remove_dataset方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。