本文整理汇总了Python中opus_core.services.run_server.run_manager.RunManager.add_row_to_history方法的典型用法代码示例。如果您正苦于以下问题:Python RunManager.add_row_to_history方法的具体用法?Python RunManager.add_row_to_history怎么用?Python RunManager.add_row_to_history使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类opus_core.services.run_server.run_manager.RunManager
的用法示例。
在下文中一共展示了RunManager.add_row_to_history方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: on_buttonBox_accepted
# 需要导入模块: from opus_core.services.run_server.run_manager import RunManager [as 别名]
# 或者: from opus_core.services.run_server.run_manager.RunManager import add_row_to_history [as 别名]
def on_buttonBox_accepted(self):
path = str(self.lePath.text())
if not os.path.exists(path):
msg = 'Cannot import, %s does not exist' % path
logger.log_warning(msg)
MessageBox.warning(mainwindow = self,
text = msg,
detailed_text = '')
else:
cache_directory = path
years = []
for dir in os.listdir(cache_directory):
if len(dir) == 4 and dir.isdigit():
years.append(int(dir))
if years == []:
msg = 'Cannot import, %s has no run data'%path
logger.log_warning(msg)
MessageBox.warning(mainwindow = self,
text = msg,
detailed_text = '')
else:
start_year = min(years)
end_year = max(years)
project_name = os.environ['OPUSPROJECTNAME']
run_name = os.path.basename(path)
server_config = ServicesDatabaseConfiguration()
run_manager = RunManager(server_config)
run_id = run_manager._get_new_run_id()
resources = {
'cache_directory': cache_directory,
'description': '',
'years': (start_year, end_year),
'project_name': project_name
}
try:
run_manager.add_row_to_history(run_id = run_id,
resources = resources,
status = 'done',
run_name = run_name)
update_available_runs(self.project)
logger.log_status('Added run %s of project %s to run_activity table'%(run_name, project_name))
except:
errorInfo = formatExceptionInfo()
logger.log_error(errorInfo)
MessageBox.error(mainwindow = self,
text = 'Could not add run %s of project %s to run_activity table'%(run_name, project_name),
detailed_text = errorInfo)
self.close()
示例2: add_runs_to_services_db_from_disk
# 需要导入模块: from opus_core.services.run_server.run_manager import RunManager [as 别名]
# 或者: from opus_core.services.run_server.run_manager.RunManager import add_row_to_history [as 别名]
def add_runs_to_services_db_from_disk(projects = None):
server_config = ServicesDatabaseConfiguration()
if server_config.protocol == 'sqlite':
datapath = paths.OPUS_DATA_PATH
for project_name in os.listdir(datapath):
if projects is not None and project_name not in projects: continue
if not os.path.isdir(os.path.join(datapath, project_name)): continue
os.environ['OPUSPROJECTNAME'] = project_name
server = DatabaseServer(server_config)
server.drop_database(database_name = 'run_activity')
server.close()
run_manager = RunManager(server_config)
baseyear_directory = os.path.join(datapath, project_name, 'base_year_data')
if os.path.exists(baseyear_directory):
years = []
if os.path.exists(baseyear_directory):
for dir in os.listdir(baseyear_directory):
if len(dir) == 4 and dir.isdigit():
years.append(int(dir))
start_year = min(years)
end_year = max(years)
run_name = 'base_year_data'
run_id = run_manager._get_new_run_id()
resources = {
'cache_directory': baseyear_directory,
'description': 'base year data',
'years': (start_year, end_year)
}
logger.log_status('Adding run %s of project %s to run_activity table'%(run_name, project_name))
run_manager.add_row_to_history(run_id = run_id,
resources = resources,
status = 'done',
run_name = run_name)
data_directory = os.path.join(datapath, project_name, 'runs')
if not os.path.exists(data_directory): continue
for run_name in os.listdir(data_directory):
try:
cache_directory = os.path.join(data_directory,run_name)
years = []
if not os.path.isdir(cache_directory): continue
for dir in os.listdir(cache_directory):
if len(dir) == 4 and dir.isdigit():
years.append(int(dir))
start_year = min(years)
end_year = max(years)
run_id = run_manager._get_new_run_id()
resources = {
'cache_directory': cache_directory,
'description': '',
'years': (start_year, end_year)
}
logger.log_status('Adding run %s of project %s to run_activity table'%(run_name, project_name))
run_manager.add_row_to_history(run_id = run_id,
resources = resources,
status = 'done',
run_name = run_name)
except: pass
示例3: OptionGroup
# 需要导入模块: from opus_core.services.run_server.run_manager import RunManager [as 别名]
# 或者: from opus_core.services.run_server.run_manager.RunManager import add_row_to_history [as 别名]
if __name__ == "__main__":
try: import wingdbstub
except: pass
option_group = OptionGroup()
parser = option_group.parser
(options, args) = parser.parse_args()
run_manager = RunManager(option_group.get_services_database_configuration(options))
if options.configuration_path is not None:
opus_path = options.configuration_path
try:
config = get_config_from_opus_path(opus_path)
except ImportError:
import_stmt = 'from %s import run_configuration as config' % opus_path
exec(import_stmt)
config['cache_directory'] = options.cache_directory
results = run_manager.storage.GetResultsFromQuery("SELECT * from run_activity WHERE run_id = %s " % options.run_id)
if len(results) > 1 and not options.force:
print "WARNING: run_id %s exists in run_activity. Use --force to override." % options.run_id
sys.exit()
elif options.force:
run_manager.services_db.execute(
run_manager.services_db.delete(run_manager.services_db.c.run_id == options.run_id))
run_manager.add_row_to_history(options.run_id, config, "started")
示例4: Calibration
# 需要导入模块: from opus_core.services.run_server.run_manager import RunManager [as 别名]
# 或者: from opus_core.services.run_server.run_manager.RunManager import add_row_to_history [as 别名]
#.........这里部分代码省略.........
default_kwargs.update(optimizer_kwargs)
results = optimizer_func(self.target_func, copy(init_v), **default_kwargs)
duration = time.time() - t0
if results_pickle_prefix is not None:
pickle_file = "{}_{}.pickle".format(results_pickle_prefix, optimizer)
pickle_file = os.path.join(self.log_directory, pickle_file)
pickle.dump(results, open(pickle_file, "wb"))
if is_parallelizable == True:
set_parallel(False)
logger.log_status("init target_func: {}".format(self.target_func(init_v)))
logger.log_status("end target_func: {}".format(results[:])) # which one?
logger.log_status("outputs from optimizer: {}".format(results))
logger.log_status("Execution time: {}".format(duration))
def init_run(self, create_baseyear_cache=True):
""" init run, get run_id & cache_directory. """
##avoid invoking start_run from cmd line -
option_group = StartRunOptionGroup()
option_group.parser.set_defaults(xml_configuration=self.xml_config, scenario_name=self.scenario)
# run_id, cache_directory = start_run(option_group)
options, args = option_group.parse()
self.run_manager = RunManager(option_group.get_services_database_configuration(options))
resources = XMLConfiguration(self.xml_config).get_run_configuration(self.scenario)
insert_auto_generated_cache_directory_if_needed(resources)
cache_directory = resources["cache_directory"]
self.run_manager.setup_new_run(cache_directory, resources)
run_id, cache_directory = self.run_manager.run_id, self.run_manager.get_current_cache_directory()
self.run_manager.add_row_to_history(run_id, resources, "done")
if create_baseyear_cache:
self.run_manager.create_baseyear_cache(resources)
## good for testing
# run_id = 275
# cache_directory = '/home/lmwang/opus/data/paris_zone/runs/run_275.2012_05_26_00_20'
assert run_id is not None
assert cache_directory is not None
return run_id, cache_directory
def update_parameters(
self, est_v, cache_directory, simulation_state, dataset_pool, calib_datasets, *args, **kwargs
):
i_est_v = 0
current_year = simulation_state.get_current_time()
simulation_state.set_current_time(self.base_year)
simulation_state.set_cache_directory(cache_directory)
for dataset_name, calib in calib_datasets.iteritems():
dataset, calib_attr, index = calib
if type(calib_attr) == str:
dtype = dataset[calib_attr].dtype
dataset[calib_attr][index] = (est_v[i_est_v : i_est_v + index.size]).astype(dtype)
i_est_v += index.size
elif type(calib_attr) in (list, tuple):
for attr in calib_attr:
dtype = dataset[attr].dtype
dataset[attr][index] = (est_v[i_est_v : i_est_v + index.size]).astype(dtype)
i_est_v += index.size
else:
raise TypeError, "Unrecongized data type in calib_datasets"