本文整理汇总了Python中opus_core.datasets.dataset.DatasetSubset.write_dataset方法的典型用法代码示例。如果您正苦于以下问题:Python DatasetSubset.write_dataset方法的具体用法?Python DatasetSubset.write_dataset怎么用?Python DatasetSubset.write_dataset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类opus_core.datasets.dataset.DatasetSubset
的用法示例。
在下文中一共展示了DatasetSubset.write_dataset方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
# 需要导入模块: from opus_core.datasets.dataset import DatasetSubset [as 别名]
# 或者: from opus_core.datasets.dataset.DatasetSubset import write_dataset [as 别名]
def run(self, location_set, development_event_set, *args, **kwargs):
changed_indices, processed_development_event_indices = \
EventsCoordinator.run(self, location_set,
development_event_set, *args, **kwargs)
if development_event_set is not None:
subset = DatasetSubset(development_event_set, processed_development_event_indices)
subset.write_dataset(out_storage=AttributeCache())
return (changed_indices, processed_development_event_indices)
示例2: _convert_lccm_input
# 需要导入模块: from opus_core.datasets.dataset import DatasetSubset [as 别名]
# 或者: from opus_core.datasets.dataset.DatasetSubset import write_dataset [as 别名]
def _convert_lccm_input(self, flt_directory_in, flt_directory_out):
gc.collect()
t1 = time()
lc = LandCoverDataset(in_storage = StorageFactory().get_storage('flt_storage', storage_location = flt_directory_in),
out_storage = StorageFactory().get_storage('flt_storage', storage_location = flt_directory_out))
# lc.get_header() # added 23 june 2009 by mm
mask = lc.get_mask()
idx = where(mask==0)[0]
lcsubset = DatasetSubset(lc, idx)
print "Converting:"
lcsubset.write_dataset(attributes=["relative_x"], out_table_name="land_covers")
lc.delete_one_attribute("relative_x")
lcsubset.write_dataset(attributes=["relative_y"], out_table_name="land_covers")
lc.delete_one_attribute("relative_y")
lc.flush_dataset()
gc.collect()
# lc_names = lc.get_primary_attribute_names()
for attr in lc.get_primary_attribute_names():
print " ", attr
lcsubset.write_dataset(attributes=[attr], out_table_name="land_covers")
lc.delete_one_attribute(attr)
logger.log_status("Data conversion done. " + str(time()-t1) + " s")
示例3: LandCoverDataset
# 需要导入模块: from opus_core.datasets.dataset import DatasetSubset [as 别名]
# 或者: from opus_core.datasets.dataset.DatasetSubset import write_dataset [as 别名]
#years = [1995, 1999]
#years = [2002]
#years = sys.argv[3]
years = [2007, 2007]
lc1 = LandCoverDataset(in_storage = StorageFactory().get_storage('flt_storage',
storage_location = os.path.join(flt_directory_in, str(years[0]))),
out_storage = StorageFactory().get_storage('flt_storage',
storage_location = os.path.join(flt_directory_out, str(years[0]))))
agents_index = where(lc1.get_attribute(index_attribute))[0]
lc1subset = DatasetSubset(lc1, agents_index)
print "Writing set 1:"
for attr in lc1.get_primary_attribute_names():
print " ", attr
lc1subset.write_dataset(attributes=[attr], out_table_name="land_covers")
lc1.delete_one_attribute(attr) # leaving this line in causes the processing of every other input data file; commenting it causes memory error
lc2 = LandCoverDataset(in_storage = StorageFactory().get_storage('flt_storage',
storage_location = os.path.join(flt_directory_in, str(years[1]))),
out_storage = StorageFactory().get_storage('flt_storage',
storage_location = os.path.join(flt_directory_out, str(years[1]))))
lc2subset = DatasetSubset(lc2, agents_index)
print "Writing set 2:"
for attr in lc2.get_primary_attribute_names():
print " ", attr
lc2subset.write_dataset(attributes=[attr], out_table_name="land_covers")
lc2.delete_one_attribute(attr) # leaving this line in causes the processing of every other input data file ; commenting it causes memory error
logger.log_status("Data storage done. " + str(time()-t1) + " s")
示例4: str
# 需要导入模块: from opus_core.datasets.dataset import DatasetSubset [as 别名]
# 或者: from opus_core.datasets.dataset.DatasetSubset import write_dataset [as 别名]
test_flag = options.test_flag
# shutil.rmtree(flt_directory_out)
# os.mkdir(flt_directory_out)
logger.log_status("Convert input data from ", str(input_year))
lc = LandCoverDataset(in_storage = StorageFactory().get_storage('flt_storage', storage_location = flt_directory_in),
out_storage = StorageFactory().get_storage('flt_storage', storage_location = flt_directory_out))
lc.get_header() # added 23 june 2009 by mm
mask = lc.get_mask()
idx = where(mask==0)[0]
lcsubset = DatasetSubset(lc, idx)
print "Converting:"
lcsubset.write_dataset(attributes=["relative_x"], out_table_name="land_covers")
#lcsubset.write_dataset(attributes=["relative_x"], out_table_name="land_covers",
# valuetypes=valuetypes)
lc.delete_one_attribute("relative_x")
lcsubset.write_dataset(attributes=["relative_y"], out_table_name="land_covers")
#lcsubset.write_dataset(attributes=["relative_y"], out_table_name="land_covers",
# valuetypes=valuetypes)
lc.delete_one_attribute("relative_y")
# srcdir = os.path.join(flt_directory_out, "land_covers", "computed")
# shutil.move(os.path.join(srcdir,"relative_x.li4"), os.path.join(flt_directory_out, "land_covers"))
# shutil.move(os.path.join(srcdir,"relative_y.li4"), os.path.join(flt_directory_out, "land_covers"))
# shutil.rmtree(srcdir)
for attr in lc.get_primary_attribute_names():
print " ", attr
lcsubset.write_dataset(attributes=[attr], out_table_name="land_covers")
# lcsubset.write_dataset(attributes=[attr], out_table_name="land_covers",
示例5: ScenarioDatabase
# 需要导入模块: from opus_core.datasets.dataset import DatasetSubset [as 别名]
# 或者: from opus_core.datasets.dataset.DatasetSubset import write_dataset [as 别名]
'db_output_database':None,
'cache_directory':cache_directory,
'base_year':2000,
'tables_to_cache':[
'gridcells',
# 'households',
# 'jobs',
]})
#CacheScenarioDatabase().run(gridcell_config)
# step 2 cache water demand data by
dbcon = ScenarioDatabase(database_name = "water_demand_seattle2")
print "Create Storage object."
from opus_core.storage_factory import StorageFactory
storage = StorageFactory().get_storage(type="mysql_storage", storage_location=dbcon)
from waterdemand.datasets.consumption_dataset import ConsumptionDataset
consumption_types = ['wrmr', 'wcsr', 'wrsr'] #'wcmr'
for consumption_type in consumption_types:
consumption = ConsumptionDataset(in_storage = storage, in_table_name=consumption_type+'_grid')
for year in range(1990, 2001):
print "%s %s" % (consumption_type, year)
year_index = where(consumption.get_attribute("billyear") == year)
out_storage = StorageFactory().get_storage(type="flt_storage", storage_location=os.path.join(cache_directory, str(year)))
consumption_subset = DatasetSubset(consumption, year_index)
consumption_subset.write_dataset(out_storage=out_storage, out_table_name=consumption_type.lower())