本文整理汇总了Python中pygeoc.utils.FileClass.is_file_exists方法的典型用法代码示例。如果您正苦于以下问题:Python FileClass.is_file_exists方法的具体用法?Python FileClass.is_file_exists怎么用?Python FileClass.is_file_exists使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pygeoc.utils.FileClass
的用法示例。
在下文中一共展示了FileClass.is_file_exists方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: calculate_environment
# 需要导入模块: from pygeoc.utils import FileClass [as 别名]
# 或者: from pygeoc.utils.FileClass import is_file_exists [as 别名]
def calculate_environment(self):
if not self.modelrun: # no evaluate done
self.economy = self.worst_econ
self.environment = self.worst_env
return
rfile = self.modelout_dir + os.path.sep + self.bmps_info['ENVEVAL']
if not FileClass.is_file_exists(rfile):
time.sleep(5) # sleep 5 seconds wait for the ouput
if not FileClass.is_file_exists(rfile):
print('WARNING: Although SEIMS model runs successfully, the desired output: %s'
' cannot be found!' % rfile)
self.economy = self.worst_econ
self.environment = self.worst_env
return
base_amount = self.bmps_info['BASE_ENV']
if StringClass.string_match(rfile.split('.')[-1], 'tif'): # Raster data
rr = RasterUtilClass.read_raster(rfile)
soil_erosion_amount = rr.get_sum() / self.timerange # unit: year
# reduction rate of soil erosion
self.environment = (base_amount - soil_erosion_amount) / base_amount
elif StringClass.string_match(rfile.split('.')[-1], 'txt'): # Time series data
sed_sum = read_simulation_from_txt(self.modelout_dir) # TODO, fix it later, lj
self.environment = (base_amount - sed_sum) / base_amount
else:
self.economy = self.worst_econ
self.environment = self.worst_env
return
示例2: mask_origin_delineated_data
# 需要导入模块: from pygeoc.utils import FileClass [as 别名]
# 或者: from pygeoc.utils.FileClass import is_file_exists [as 别名]
def mask_origin_delineated_data(cfg):
"""Mask the original delineated data by Subbasin raster."""
subbasin_tau_file = cfg.taudems.subbsn
geodata2dbdir = cfg.dirs.geodata2db
UtilClass.mkdir(geodata2dbdir)
mask_file = cfg.spatials.mask
RasterUtilClass.get_mask_from_raster(subbasin_tau_file, mask_file)
# Total 12 raster files
original_files = [cfg.taudems.subbsn, cfg.taudems.d8flow, cfg.taudems.stream_raster,
cfg.taudems.slp, cfg.taudems.filldem, cfg.taudems.d8acc,
cfg.taudems.stream_order, cfg.taudems.dinf, cfg.taudems.dinf_d8dir,
cfg.taudems.dinf_slp, cfg.taudems.dinf_weight,
cfg.taudems.dist2stream_d8]
# output masked files
output_files = [cfg.taudems.subbsn_m, cfg.taudems.d8flow_m, cfg.taudems.stream_m,
cfg.spatials.slope, cfg.spatials.filldem, cfg.spatials.d8acc,
cfg.spatials.stream_order, cfg.spatials.dinf, cfg.spatials.dinf_d8dir,
cfg.spatials.dinf_slp, cfg.spatials.dinf_weight,
cfg.spatials.dist2stream_d8]
default_values = list()
for i in range(len(original_files)):
default_values.append(DEFAULT_NODATA)
# other input rasters need to be masked
# soil and landuse
FileClass.check_file_exists(cfg.soil)
FileClass.check_file_exists(cfg.landuse)
original_files.append(cfg.soil)
output_files.append(cfg.spatials.soil_type)
default_values.append(cfg.default_soil)
original_files.append(cfg.landuse)
output_files.append(cfg.spatials.landuse)
default_values.append(cfg.default_landuse)
# Additional raster file
for k, v in cfg.additional_rs.items():
org_v = v
if not FileClass.is_file_exists(org_v):
v = cfg.spatial_dir + os.path.sep + org_v
if not FileClass.is_file_exists(v):
print('WARNING: The additional file %s MUST be located in '
'SPATIAL_DATA_DIR, or provided as full file path!' % k)
continue
original_files.append(v)
output_files.append(cfg.dirs.geodata2db + os.path.sep + k + '.tif')
default_values.append(DEFAULT_NODATA)
config_file = cfg.logs.mask_cfg
# run mask operation
print('Mask original delineated data by Subbasin raster...')
SpatialDelineation.mask_raster_cpp(cfg.seims_bin, mask_file, original_files,
output_files, default_values, config_file)
示例3: get_psa_config
# 需要导入模块: from pygeoc.utils import FileClass [as 别名]
# 或者: from pygeoc.utils.FileClass import is_file_exists [as 别名]
def get_psa_config():
"""Parse arguments.
Returns:
cf: ConfigParse object of *.ini file
mtd: Parameters sensitivity method name, currently, 'morris' and 'fast' are supported.
"""
# define input arguments
parser = argparse.ArgumentParser(description="Execute parameters sensitivity analysis.")
parser.add_argument('-ini', type=str, help="Full path of configuration file")
# add mutually group
psa_group = parser.add_mutually_exclusive_group()
psa_group.add_argument('-morris', action='store_true', help='Run Morris Screening method')
psa_group.add_argument('-fast', action='store_true', help='Run FAST variant-based method')
# parse arguments
args = parser.parse_args()
ini_file = args.ini
psa_mtd = 'morris' # Default
if args.fast:
psa_mtd = 'fast'
elif args.morris:
psa_mtd = 'morris'
if not FileClass.is_file_exists(ini_file):
raise ImportError('Configuration file is not existed: %s' % ini_file)
cf = ConfigParser()
cf.read(ini_file)
return cf, psa_mtd
示例4: calculate_sensitivity
# 需要导入模块: from pygeoc.utils import FileClass [as 别名]
# 或者: from pygeoc.utils.FileClass import is_file_exists [as 别名]
def calculate_sensitivity(self):
"""Calculate Morris elementary effects.
It is worth to be noticed that evaluate_models() allows to return
several output variables, hence we should calculate each of them separately.
"""
if not self.psa_si:
if FileClass.is_file_exists(self.cfg.outfiles.psa_si_json):
with open(self.cfg.outfiles.psa_si_json, 'r') as f:
self.psa_si = UtilClass.decode_strs_in_dict(json.load(f))
return
if not self.objnames:
if FileClass.is_file_exists('%s/objnames.pickle' % self.cfg.psa_outpath):
with open('%s/objnames.pickle' % self.cfg.psa_outpath, 'r') as f:
self.objnames = pickle.load(f)
if self.output_values is None or len(self.output_values) == 0:
self.evaluate_models()
if self.param_values is None or len(self.param_values) == 0:
self.generate_samples()
if not self.param_defs:
self.read_param_ranges()
row, col = self.output_values.shape
assert (row == self.run_count)
for i in range(col):
print(self.objnames[i])
if self.cfg.method == 'morris':
tmp_Si = morris_alz(self.param_defs,
self.param_values,
self.output_values[:, i],
conf_level=0.95, print_to_console=True,
num_levels=self.cfg.morris.num_levels,
grid_jump=self.cfg.morris.grid_jump)
elif self.cfg.method == 'fast':
tmp_Si = fast_alz(self.param_defs, self.output_values[:, i],
print_to_console=True)
else:
raise ValueError('%s method is not supported now!' % self.cfg.method)
self.psa_si[i] = tmp_Si
# print(self.psa_si)
# Save as json, which can be loaded by json.load()
json_data = json.dumps(self.psa_si, indent=4, cls=SpecialJsonEncoder)
with open(self.cfg.outfiles.psa_si_json, 'w') as f:
f.write(json_data)
self.output_psa_si()
示例5: __init__
# 需要导入模块: from pygeoc.utils import FileClass [as 别名]
# 或者: from pygeoc.utils.FileClass import is_file_exists [as 别名]
def __init__(self, bin_dir='', model_dir='', nthread=4, lyrmtd=0,
host='127.0.0.1', port=27017, scenario_id=-1, calibration_id=-1,
version='OMP', nprocess=1, mpi_bin='', hosts_opt='-f', hostfile='',
**kwargs): # Allow any other keyword arguments
# Derived from input arguments
args_dict = dict()
if 'args_dict' in kwargs: # Preferred to use 'args_dict' if existed.
args_dict = kwargs['args_dict']
bin_dir = args_dict['bin_dir'] if 'bin_dir' in args_dict else bin_dir
model_dir = args_dict['model_dir'] if 'model_dir' in args_dict else model_dir
self.version = args_dict['version'] if 'version' in args_dict else version
suffix = '.exe' if sysstr == 'Windows' else ''
if self.version == 'MPI':
self.seims_exec = bin_dir + os.path.sep + 'seims_mpi' + suffix
else:
self.seims_exec = bin_dir + os.path.sep + 'seims_omp' + suffix
if not FileClass.is_file_exists(self.seims_exec): # If not support OpenMP, use `seims`!
self.seims_exec = bin_dir + os.path.sep + 'seims' + suffix
self.seims_exec = os.path.abspath(self.seims_exec)
self.model_dir = os.path.abspath(model_dir)
self.nthread = args_dict['nthread'] if 'nthread' in args_dict else nthread
self.lyrmtd = args_dict['lyrmtd'] if 'lyrmtd' in args_dict else lyrmtd
self.host = args_dict['host'] if 'host' in args_dict else host
self.port = args_dict['port'] if 'port' in args_dict else port
self.scenario_id = args_dict['scenario_id'] if 'scenario_id' in args_dict else scenario_id
self.calibration_id = args_dict[
'calibration_id'] if 'calibration_id' in args_dict else calibration_id
self.nprocess = args_dict['nprocess'] if 'nprocess' in args_dict else nprocess
self.mpi_bin = args_dict['mpi_bin'] if 'mpi_bin' in args_dict else mpi_bin
self.hosts_opt = args_dict['hosts_opt'] if 'hosts_opt' in args_dict else hosts_opt
self.hostfile = args_dict['hostfile'] if 'hostfile' in args_dict else hostfile
# Concatenate executable command
self.cmd = self.Command
self.run_success = False
self.output_dir = self.OutputDirectory
# Read model data from MongoDB
self.db_name = os.path.split(self.model_dir)[1]
self.outlet_id = self.OutletID
self.start_time, self.end_time = self.SimulatedPeriod
# Data maybe used after model run
self.timespan = dict()
self.obs_vars = list() # Observation types at the outlet
self.obs_value = dict() # Observation value, key: DATETIME, value: value list of obs_vars
self.sim_vars = list() # Simulation types at the outlet, which is part of obs_vars
self.sim_value = dict() # Simulation value, same as obs_value
# The format of sim_obs_dict:
# {VarName: {'UTCDATETIME': [t1, t2, ..., tn],
# 'Obs': [o1, o2, ..., on],
# 'Sim': [s1, s2, ..., sn]},
# ...
# }
self.sim_obs_dict = dict()
示例6: __init__
# 需要导入模块: from pygeoc.utils import FileClass [as 别名]
# 或者: from pygeoc.utils.FileClass import is_file_exists [as 别名]
def __init__(self, cf, method='morris'):
"""Initialization."""
self.method = method
# 1. SEIMS model related
self.model = ParseSEIMSConfig(cf)
# 2. Common settings of parameters sensitivity analysis
if 'PSA_Settings' not in cf.sections():
raise ValueError("[PSA_Settings] section MUST be existed in *.ini file.")
self.evaluate_params = list()
if cf.has_option('PSA_Settings', 'evaluateparam'):
eva_str = cf.get('PSA_Settings', 'evaluateparam')
self.evaluate_params = StringClass.split_string(eva_str, ',')
else:
self.evaluate_params = ['Q'] # Default
self.param_range_def = 'morris_param_rng.def' # Default
if cf.has_option('PSA_Settings', 'paramrngdef'):
self.param_range_def = cf.get('PSA_Settings', 'paramrngdef')
self.param_range_def = self.model.model_dir + os.path.sep + self.param_range_def
if not FileClass.is_file_exists(self.param_range_def):
raise IOError('Ranges of parameters MUST be provided!')
if not (cf.has_option('PSA_Settings', 'psa_time_start') and
cf.has_option('PSA_Settings', 'psa_time_end')):
raise ValueError("Start and end time of PSA MUST be specified in [PSA_Settings].")
try:
# UTCTIME
tstart = cf.get('PSA_Settings', 'psa_time_start')
tend = cf.get('PSA_Settings', 'psa_time_end')
self.psa_stime = StringClass.get_datetime(tstart)
self.psa_etime = StringClass.get_datetime(tend)
except ValueError:
raise ValueError('The time format MUST be"YYYY-MM-DD HH:MM:SS".')
if self.psa_stime >= self.psa_etime:
raise ValueError("Wrong time settings in [PSA_Settings]!")
# 3. Parameters settings for specific sensitivity analysis methods
self.morris = None
self.fast = None
if self.method == 'fast':
self.fast = FASTConfig(cf)
self.psa_outpath = '%s/PSA-FAST-N%dM%d' % (self.model.model_dir,
self.fast.N, self.fast.M)
elif self.method == 'morris':
self.morris = MorrisConfig(cf)
self.psa_outpath = '%s/PSA-Morris-N%dL%dJ%d' % (self.model.model_dir,
self.morris.N,
self.morris.num_levels,
self.morris.grid_jump)
# Do not remove psa_outpath if already existed
UtilClass.mkdir(self.psa_outpath)
self.outfiles = PSAOutputs(self.psa_outpath)
示例7: ParamDefs
# 需要导入模块: from pygeoc.utils import FileClass [as 别名]
# 或者: from pygeoc.utils.FileClass import is_file_exists [as 别名]
def ParamDefs(self):
"""Read cali_param_rng.def file
name,lower_bound,upper_bound
e.g.,
Param1,0,1
Param2,0.5,1.2
Param3,-1.0,1.0
Returns:
a dictionary containing:
- names - the names of the parameters
- bounds - a list of lists of lower and upper bounds
- num_vars - a scalar indicating the number of variables
(the length of names)
"""
# read param_defs.json if already existed
if self.param_defs:
return self.param_defs
# read param_range_def file and output to json file
client = ConnectMongoDB(self.cfg.model.host, self.cfg.model.port)
conn = client.get_conn()
db = conn[self.cfg.model.db_name]
collection = db['PARAMETERS']
names = list()
bounds = list()
num_vars = 0
if not FileClass.is_file_exists(self.cfg.param_range_def):
raise ValueError('Parameters definition file: %s is not'
' existed!' % self.cfg.param_range_def)
items = read_data_items_from_txt(self.cfg.param_range_def)
for item in items:
if len(item) < 3:
continue
# find parameter name, print warning message if not existed
cursor = collection.find({'NAME': item[0]}, no_cursor_timeout=True)
if not cursor.count():
print('WARNING: parameter %s is not existed!' % item[0])
continue
num_vars += 1
names.append(item[0])
bounds.append([float(item[1]), float(item[2])])
self.param_defs = {'names': names, 'bounds': bounds, 'num_vars': num_vars}
return self.param_defs
示例8: output_wgs84_geojson
# 需要导入模块: from pygeoc.utils import FileClass [as 别名]
# 或者: from pygeoc.utils.FileClass import is_file_exists [as 别名]
def output_wgs84_geojson(cfg):
"""Convert ESRI shapefile to GeoJson based on WGS84 coordinate."""
src_srs = RasterUtilClass.read_raster(cfg.dem).srs
proj_srs = src_srs.ExportToProj4()
if not proj_srs:
raise ValueError('The source raster %s has not '
'coordinate, which is required!' % cfg.dem)
# print(proj_srs)
wgs84_srs = 'EPSG:4326'
geo_json_dict = {'reach': [cfg.vecs.reach, cfg.vecs.json_reach],
'subbasin': [cfg.vecs.subbsn, cfg.vecs.json_subbsn],
'basin': [cfg.vecs.bsn, cfg.vecs.json_bsn],
'outlet': [cfg.vecs.outlet, cfg.vecs.json_outlet]}
for jsonName, shp_json_list in list(geo_json_dict.items()):
# delete if geojson file already existed
if FileClass.is_file_exists(shp_json_list[1]):
os.remove(shp_json_list[1])
VectorUtilClass.convert2geojson(shp_json_list[1], proj_srs, wgs84_srs,
shp_json_list[0])
示例9: generate_samples
# 需要导入模块: from pygeoc.utils import FileClass [as 别名]
# 或者: from pygeoc.utils.FileClass import is_file_exists [as 别名]
def generate_samples(self):
"""Sampling and write to a single file and MongoDB 'PARAMETERS' collection"""
if self.param_values is None or len(self.param_values) == 0:
if FileClass.is_file_exists(self.cfg.outfiles.param_values_txt):
self.param_values = numpy.loadtxt(self.cfg.outfiles.param_values_txt)
self.run_count = len(self.param_values)
return
if not self.param_defs:
self.read_param_ranges()
if self.cfg.method == 'morris':
self.param_values = morris_spl(self.param_defs, self.cfg.morris.N,
self.cfg.morris.num_levels, self.cfg.morris.grid_jump,
optimal_trajectories=self.cfg.morris.optimal_t,
local_optimization=self.cfg.morris.local_opt)
elif self.cfg.method == 'fast':
self.param_values = fast_spl(self.param_defs, self.cfg.fast.N, self.cfg.fast.M)
else:
raise ValueError('%s method is not supported now!' % self.cfg.method)
self.run_count = len(self.param_values)
# Save as txt file, which can be loaded by numpy.loadtxt()
numpy.savetxt(self.cfg.outfiles.param_values_txt,
self.param_values, delimiter=' ', fmt='%.4f')
示例10: get_cali_config
# 需要导入模块: from pygeoc.utils import FileClass [as 别名]
# 或者: from pygeoc.utils.FileClass import is_file_exists [as 别名]
def get_cali_config():
"""Parse arguments.
Returns:
cf: ConfigParse object of *.ini file
mtd: Calibration method name, currently, 'nsga2' is supported.
"""
# define input arguments
parser = argparse.ArgumentParser(description="Execute parameters calibration.")
parser.add_argument('-ini', type=str, help="Full path of configuration file")
# add mutually group
psa_group = parser.add_mutually_exclusive_group()
psa_group.add_argument('-nsga2', action='store_true', help='Run NSGA-II method')
# parse arguments
args = parser.parse_args()
ini_file = args.ini
psa_mtd = 'nsga2' # Default
if args.nsga2:
psa_mtd = 'nsga2'
if not FileClass.is_file_exists(ini_file):
raise ImportError('Configuration file is not existed: %s' % ini_file)
cf = ConfigParser()
cf.read(ini_file)
return cf, psa_mtd
示例11: watershed_delineation
# 需要导入模块: from pygeoc.utils import FileClass [as 别名]
# 或者: from pygeoc.utils.FileClass import is_file_exists [as 别名]
def watershed_delineation(np, dem, outlet_file=None, thresh=0, singlebasin=False,
workingdir=None, mpi_bin=None, bin_dir=None,
logfile=None, runtime_file=None, hostfile=None):
"""Watershed Delineation."""
# 1. Check directories
if not os.path.exists(dem):
TauDEM.error('DEM: %s is not existed!' % dem)
dem = os.path.abspath(dem)
if workingdir is None:
workingdir = os.path.dirname(dem)
namecfg = TauDEMFilesUtils(workingdir)
workingdir = namecfg.workspace
UtilClass.mkdir(workingdir)
# 2. Check log file
if logfile is not None and FileClass.is_file_exists(logfile):
os.remove(logfile)
# 3. Get predefined intermediate file names
filled_dem = namecfg.filldem
flow_dir = namecfg.d8flow
slope = namecfg.slp
flow_dir_dinf = namecfg.dinf
slope_dinf = namecfg.dinf_slp
dir_code_dinf = namecfg.dinf_d8dir
weight_dinf = namecfg.dinf_weight
acc = namecfg.d8acc
stream_raster = namecfg.stream_raster
default_outlet = namecfg.outlet_pre
modified_outlet = namecfg.outlet_m
stream_skeleton = namecfg.stream_pd
acc_with_weight = namecfg.d8acc_weight
stream_order = namecfg.stream_order
ch_network = namecfg.channel_net
ch_coord = namecfg.channel_coord
stream_net = namecfg.streamnet_shp
subbasin = namecfg.subbsn
dist2_stream_d8 = namecfg.dist2stream_d8
# 4. perform calculation
UtilClass.writelog(logfile, '[Output] %d..., %s' % (10, 'pitremove DEM...'), 'a')
TauDEM.pitremove(np, dem, filled_dem, workingdir, mpi_bin, bin_dir,
log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d..., %s' %
(20, 'Calculating D8 and Dinf flow direction...'), 'a')
TauDEM.d8flowdir(np, filled_dem, flow_dir, slope, workingdir,
mpi_bin, bin_dir, log_file=logfile,
runtime_file=runtime_file, hostfile=hostfile)
TauDEM.dinfflowdir(np, filled_dem, flow_dir_dinf, slope_dinf, workingdir,
mpi_bin, bin_dir, log_file=logfile,
runtime_file=runtime_file, hostfile=hostfile)
DinfUtil.output_compressed_dinf(flow_dir_dinf, dir_code_dinf, weight_dinf)
UtilClass.writelog(logfile, '[Output] %d..., %s' % (30, 'D8 flow accumulation...'), 'a')
TauDEM.aread8(np, flow_dir, acc, None, None, False, workingdir, mpi_bin, bin_dir,
log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d..., %s' %
(40, 'Generating stream raster initially...'), 'a')
min_accum, max_accum, mean_accum, std_accum = RasterUtilClass.raster_statistics(acc)
TauDEM.threshold(np, acc, stream_raster, mean_accum, workingdir,
mpi_bin, bin_dir, log_file=logfile,
runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d..., %s' % (50, 'Moving outlet to stream...'), 'a')
if outlet_file is None:
outlet_file = default_outlet
TauDEM.connectdown(np, flow_dir, acc, outlet_file, wtsd=None,
workingdir=workingdir, mpiexedir=mpi_bin, exedir=bin_dir,
log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
TauDEM.moveoutletstostrm(np, flow_dir, stream_raster, outlet_file,
modified_outlet, workingdir, mpi_bin, bin_dir,
log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d..., %s' %
(60, 'Generating stream skeleton...'), 'a')
TauDEM.peukerdouglas(np, filled_dem, stream_skeleton, workingdir,
mpi_bin, bin_dir, log_file=logfile,
runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d..., %s' %
(70, 'Flow accumulation with outlet...'), 'a')
tmp_outlet = None
if singlebasin:
tmp_outlet = modified_outlet
TauDEM.aread8(np, flow_dir, acc_with_weight, tmp_outlet, stream_skeleton, False,
workingdir, mpi_bin, bin_dir, log_file=logfile,
runtime_file=runtime_file, hostfile=hostfile)
if thresh <= 0: # find the optimal threshold using dropanalysis function
UtilClass.writelog(logfile, '[Output] %d..., %s' %
(75, 'Drop analysis to select optimal threshold...'), 'a')
min_accum, max_accum, mean_accum, std_accum = \
RasterUtilClass.raster_statistics(acc_with_weight)
if mean_accum - std_accum < 0:
minthresh = mean_accum
else:
minthresh = mean_accum - std_accum
maxthresh = mean_accum + std_accum
numthresh = 20
logspace = 'true'
drp_file = namecfg.drptxt
TauDEM.dropanalysis(np, filled_dem, flow_dir, acc_with_weight,
acc_with_weight, modified_outlet, minthresh, maxthresh,
numthresh, logspace, drp_file, workingdir, mpi_bin, bin_dir,
log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
if not FileClass.is_file_exists(drp_file):
#.........这里部分代码省略.........
示例12: evaluate_models
# 需要导入模块: from pygeoc.utils import FileClass [as 别名]
# 或者: from pygeoc.utils.FileClass import is_file_exists [as 别名]
def evaluate_models(self):
"""Run SEIMS for objective output variables, and write out.
"""
if self.output_values is None or len(self.output_values) == 0:
if FileClass.is_file_exists(self.cfg.outfiles.output_values_txt):
self.output_values = numpy.loadtxt(self.cfg.outfiles.output_values_txt)
return
assert (self.run_count > 0)
# model configurations
model_cfg_dict = self.model.ConfigDict
# Parameters to be evaluated
input_eva_vars = self.cfg.evaluate_params
# split tasks if needed
task_num = self.run_count // 480 # In our cluster, the largest workers number is 96.
if task_num == 0:
split_seqs = [range(self.run_count)]
else:
split_seqs = numpy.array_split(numpy.arange(self.run_count), task_num + 1)
split_seqs = [a.tolist() for a in split_seqs]
# Loop partitioned tasks
run_model_stime = time.time()
exec_times = list() # execute time of all model runs
for idx, cali_seqs in enumerate(split_seqs):
cur_out_file = '%s/outputs_%d.txt' % (self.cfg.outfiles.output_values_dir, idx)
if FileClass.is_file_exists(cur_out_file):
continue
model_cfg_dict_list = list()
for i, caliid in enumerate(cali_seqs):
tmpcfg = deepcopy(model_cfg_dict)
tmpcfg['calibration_id'] = caliid
model_cfg_dict_list.append(tmpcfg)
try: # parallel on multiprocessor or clusters using SCOOP
from scoop import futures
output_models = list(futures.map(create_run_model, model_cfg_dict_list))
except ImportError or ImportWarning: # serial
output_models = list(map(create_run_model, model_cfg_dict_list))
time.sleep(0.1) # Wait a moment in case of unpredictable file system error
# Read observation data from MongoDB only once
if len(output_models) < 1: # Although this is not gonna happen, just for insurance.
continue
obs_vars, obs_data_dict = output_models[0].ReadOutletObservations(input_eva_vars)
if (len(obs_vars)) < 1: # Make sure the observation data exists.
continue
# Loop the executed models
eva_values = list()
for imod, mod_obj in enumerate(output_models):
# Read executable timespan of each model run
exec_times.append(mod_obj.GetTimespan())
# Set observation data since there is no need to read from MongoDB.
if imod != 0:
mod_obj.SetOutletObservations(obs_vars, obs_data_dict)
# Read simulation
mod_obj.ReadTimeseriesSimulations(self.cfg.psa_stime, self.cfg.psa_etime)
# Calculate NSE, R2, RMSE, PBIAS, RSR, ln(NSE), NSE1, and NSE3
self.objnames, obj_values = mod_obj.CalcTimeseriesStatistics(mod_obj.sim_obs_dict)
eva_values.append(obj_values)
# delete model output directory for saving storage
rmtree(mod_obj.output_dir)
if not isinstance(eva_values, numpy.ndarray):
eva_values = numpy.array(eva_values)
numpy.savetxt(cur_out_file, eva_values, delimiter=' ', fmt='%.4f')
# Save as pickle data for further usage. DO not save all models which maybe very large!
cur_model_out_file = '%s/models_%d.pickle' % (self.cfg.outfiles.output_values_dir, idx)
with open(cur_model_out_file, 'wb') as f:
pickle.dump(output_models, f)
exec_times = numpy.array(exec_times)
numpy.savetxt('%s/exec_time_allmodelruns.txt' % self.cfg.psa_outpath,
exec_times, delimiter=' ', fmt='%.4f')
print('Running time of all SEIMS models:\n'
'\tIO\tCOMP\tSIMU\tRUNTIME\n'
'MAX\t%s\n'
'MIN\t%s\n'
'AVG\t%s\n'
'SUM\t%s\n' % ('\t'.join('%.3f' % v for v in exec_times.max(0)),
'\t'.join('%.3f' % v for v in exec_times.min(0)),
'\t'.join('%.3f' % v for v in exec_times.mean(0)),
'\t'.join('%.3f' % v for v in exec_times.sum(0))))
print('Running time of executing SEIMS models: %.2fs' % (time.time() - run_model_stime))
# Save objective names as pickle data for further usgae
with open('%s/objnames.pickle' % self.cfg.psa_outpath, 'wb') as f:
pickle.dump(self.objnames, f)
# load the first part of output values
self.output_values = numpy.loadtxt('%s/outputs_0.txt' % self.cfg.outfiles.output_values_dir)
if task_num == 0:
import shutil
shutil.move('%s/outputs_0.txt' % self.cfg.outfiles.output_values_dir,
self.cfg.outfiles.output_values_txt)
shutil.rmtree(self.cfg.outfiles.output_values_dir)
return
for idx in range(1, task_num + 1):
tmp_outputs = numpy.loadtxt('%s/outputs_%d.txt' % (self.cfg.outfiles.output_values_dir,
idx))
self.output_values = numpy.concatenate((self.output_values, tmp_outputs))
numpy.savetxt(self.cfg.outfiles.output_values_txt,
self.output_values, delimiter=' ', fmt='%.4f')
示例13: read_param_ranges
# 需要导入模块: from pygeoc.utils import FileClass [as 别名]
# 或者: from pygeoc.utils.FileClass import is_file_exists [as 别名]
def read_param_ranges(self):
"""Read param_rng.def file
name,lower_bound,upper_bound,group,dist
(group and dist are optional)
e.g.,
Param1,0,1[,Group1][,dist1]
Param2,0,1[,Group2][,dist2]
Param3,0,1[,Group3][,dist3]
Returns:
a dictionary containing:
- names - the names of the parameters
- bounds - a list of lists of lower and upper bounds
- num_vars - a scalar indicating the number of variables
(the length of names)
- groups - a list of group names (strings) for each variable
- dists - a list of distributions for the problem,
None if not specified or all uniform
"""
# read param_defs.json if already existed
if not self.param_defs:
if FileClass.is_file_exists(self.cfg.outfiles.param_defs_json):
with open(self.cfg.outfiles.param_defs_json, 'r') as f:
self.param_defs = UtilClass.decode_strs_in_dict(json.load(f))
return
# read param_range_def file and output to json file
client = ConnectMongoDB(self.model.host, self.model.port)
conn = client.get_conn()
db = conn[self.model.db_name]
collection = db['PARAMETERS']
names = list()
bounds = list()
groups = list()
dists = list()
num_vars = 0
items = read_data_items_from_txt(self.cfg.param_range_def)
for item in items:
if len(item) < 3:
continue
# find parameter name, print warning message if not existed
cursor = collection.find({'NAME': item[0]}, no_cursor_timeout=True)
if not cursor.count():
print('WARNING: parameter %s is not existed!' % item[0])
continue
num_vars += 1
names.append(item[0])
bounds.append([float(item[1]), float(item[2])])
# If the fourth column does not contain a group name, use
# the parameter name
if len(item) >= 4:
groups.append(item[3])
else:
groups.append(item[0])
if len(item) >= 5:
dists.append(item[4])
else:
dists.append('unif')
if groups == names:
groups = None
elif len(set(groups)) == 1:
raise ValueError('Only one group defined, results will not bemeaningful')
# setting dists to none if all are uniform
# because non-uniform scaling is not needed
if all([d == 'unif' for d in dists]):
dists = None
self.param_defs = {'names': names, 'bounds': bounds,
'num_vars': num_vars, 'groups': groups, 'dists': dists}
# Save as json, which can be loaded by json.load()
json_data = json.dumps(self.param_defs, indent=4, cls=SpecialJsonEncoder)
with open(self.cfg.outfiles.param_defs_json, 'w') as f:
f.write(json_data)