本文整理汇总了Python中soma_workflow.client.Helper.list_failed_jobs方法的典型用法代码示例。如果您正苦于以下问题:Python Helper.list_failed_jobs方法的具体用法?Python Helper.list_failed_jobs怎么用?Python Helper.list_failed_jobs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类soma_workflow.client.Helper
的用法示例。
在下文中一共展示了Helper.list_failed_jobs方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_result
# 需要导入模块: from soma_workflow.client import Helper [as 别名]
# 或者: from soma_workflow.client.Helper import list_failed_jobs [as 别名]
def test_result(self):
workflow = self.wf_examples.example_wrong_native_spec_pbs()
self.wf_id = self.wf_ctrl.submit_workflow(
workflow=workflow,
name=self.__class__.__name__)
# Transfer input files if file transfer
if self.path_management == self.FILE_TRANSFER or \
self.path_management == self.SHARED_TRANSFER:
Helper.transfer_input_files(self.wf_id,
self.wf_ctrl)
# Wait for the workflow to finish
Helper.wait_workflow(self.wf_id, self.wf_ctrl)
# Transfer output files if file transfer
if self.path_management == self.FILE_TRANSFER or \
self.path_management == self.SHARED_TRANSFER:
Helper.transfer_output_files(self.wf_id,
self.wf_ctrl)
status = self.wf_ctrl.workflow_status(self.wf_id)
self.assertTrue(status == constants.WORKFLOW_DONE,
"workflow status : %s. Expected : %s" %
(status, constants.WORKFLOW_DONE))
nb_failed_jobs = len(Helper.list_failed_jobs(
self.wf_id,
self.wf_ctrl))
self.assertTrue(nb_failed_jobs == 0,
"nb failed jobs : %i. Expected : %i" %
(nb_failed_jobs, 0))
nb_failed_aborted_jobs = len(Helper.list_failed_jobs(
self.wf_id,
self.wf_ctrl,
include_aborted_jobs=True))
if self.path_management == self.LOCAL_PATH:
self.assertTrue(nb_failed_aborted_jobs == 0,
"nb failed jobs including aborted : %i. "
"Expected : %i" % (nb_failed_aborted_jobs, 0))
else:
self.assertTrue(nb_failed_aborted_jobs == 1,
"nb failed jobs including aborted : %i. "
"Expected : %i" % (nb_failed_aborted_jobs, 1))
示例2: _get_workflow_status
# 需要导入模块: from soma_workflow.client import Helper [as 别名]
# 或者: from soma_workflow.client.Helper import list_failed_jobs [as 别名]
def _get_workflow_status(self):
sw_status \
= self._workflow_controller.workflow_status(self._workflow_id)
if (sw_status in [sw.constants.WORKFLOW_IN_PROGRESS,
sw.constants.WORKFLOW_NOT_STARTED]):
status = Runner.RUNNING
else:
has_failed = (len(Helper.list_failed_jobs(
self._workflow_id, self._workflow_controller,
include_aborted_jobs=True,
include_user_killed_jobs=True)) != 0)
if has_failed:
status = Runner.FAILED
else:
status = Runner.SUCCESS
return status
示例3: test_result
# 需要导入模块: from soma_workflow.client import Helper [as 别名]
# 或者: from soma_workflow.client.Helper import list_failed_jobs [as 别名]
def test_result(self):
nb = 20
time_sleep = 1
workflow = self.wf_examples.example_n_jobs(nb=nb, time=time_sleep)
self.wf_id = self.wf_ctrl.submit_workflow(
workflow=workflow,
name=self.__class__.__name__)
# Transfer input files if file transfer
if self.path_management == self.FILE_TRANSFER or \
self.path_management == self.SHARED_TRANSFER:
Helper.transfer_input_files(self.wf_id, self.wf_ctrl)
# Wait for the workflow to finish
Helper.wait_workflow(self.wf_id, self.wf_ctrl)
# Transfer output files if file transfer
if self.path_management == self.FILE_TRANSFER or \
self.path_management == self.SHARED_TRANSFER:
Helper.transfer_output_files(self.wf_id, self.wf_ctrl)
status = self.wf_ctrl.workflow_status(self.wf_id)
self.assertTrue(status == constants.WORKFLOW_DONE,
"workflow status : %s. Expected : %s" %
(status, constants.WORKFLOW_DONE))
nb_failed_jobs = len(Helper.list_failed_jobs(
self.wf_id,
self.wf_ctrl))
self.assertTrue(nb_failed_jobs == 0,
"nb failed jobs : %i. Expected : %i" %
(nb_failed_jobs, 0))
nb_failed_aborted_jobs = len(Helper.list_failed_jobs(
self.wf_id,
self.wf_ctrl,
include_aborted_jobs=True))
self.assertTrue(nb_failed_aborted_jobs == 0,
"nb failed jobs including aborted : %i. Expected : %i"
% (nb_failed_aborted_jobs, 0))
(jobs_info, transfers_info, workflow_status, workflow_queue,
tmp_files) = self.wf_ctrl.workflow_elements_status(self.wf_id)
for (job_id, tmp_status, queue, exit_info, dates) in jobs_info:
job_list = self.wf_ctrl.jobs([job_id])
job_name, job_command, job_submission_date = job_list[job_id]
self.tested_job = job_id
if exit_info[0] == constants.FINISHED_REGULARLY:
# To check job standard out and standard err
job_stdout_file = tempfile.NamedTemporaryFile(
prefix="job_soma_out_log_",
suffix=repr(job_id),
delete=False)
job_stdout_file = job_stdout_file.name
job_stderr_file = tempfile.NamedTemporaryFile(
prefix="job_soma_outerr_log_",
suffix=repr(job_id),
delete=False)
job_stderr_file = job_stderr_file.name
try:
self.wf_ctrl.retrieve_job_stdouterr(job_id,
job_stdout_file,
job_stderr_file)
# Test stdout
self.assertTrue(os.stat(job_stdout_file).st_size == 0,
"job stdout not empty : file: %s, "
"contents:\n%s" %
(job_stdout_file,
open(job_stdout_file).read()))
# Test no stderr
self.assertTrue(os.stat(job_stderr_file).st_size == 0,
"job stderr not empty : file %s, "
"contents:\n%s" %
(job_stderr_file,
open(job_stderr_file).read()))
finally:
os.unlink(job_stdout_file)
os.unlink(job_stderr_file)
del self.tested_job
示例4: test_result
# 需要导入模块: from soma_workflow.client import Helper [as 别名]
# 或者: from soma_workflow.client.Helper import list_failed_jobs [as 别名]
def test_result(self):
workflow = self.wf_examples.example_simple()
self.wf_id = self.wf_ctrl.submit_workflow(
workflow=workflow,
name=self.__class__.__name__)
# Transfer input files if file transfer
if self.path_management == self.FILE_TRANSFER or \
self.path_management == self.SHARED_TRANSFER:
Helper.transfer_input_files(self.wf_id, self.wf_ctrl)
# Wait for the workflow to finish
Helper.wait_workflow(self.wf_id, self.wf_ctrl)
# Transfer output files if file transfer
if self.path_management == self.FILE_TRANSFER or \
self.path_management == self.SHARED_TRANSFER:
Helper.transfer_output_files(self.wf_id, self.wf_ctrl)
status = self.wf_ctrl.workflow_status(self.wf_id)
self.assertTrue(status == constants.WORKFLOW_DONE,
"workflow status : %s. Expected : %s" %
(status, constants.WORKFLOW_DONE))
nb_failed_jobs = len(Helper.list_failed_jobs(self.wf_id,
self.wf_ctrl))
self.assertTrue(nb_failed_jobs == 0,
"nb failed jobs : %i. Expected : %i" %
(nb_failed_jobs, 0))
nb_failed_aborted_jobs = len(Helper.list_failed_jobs(
self.wf_id,
self.wf_ctrl,
include_aborted_jobs=True))
self.assertTrue(nb_failed_aborted_jobs == 0,
"nb failed jobs including aborted : %i. Expected : %i"
% (nb_failed_aborted_jobs, 0))
(jobs_info, transfers_info, workflow_status, workflow_queue,
tmp_files) = self.wf_ctrl.workflow_elements_status(self.wf_id)
for (job_id, tmp_status, queue, exit_info, dates) in jobs_info:
job_list = self.wf_ctrl.jobs([job_id])
job_name, job_command, job_submission_date = job_list[job_id]
self.tested_job = job_id
if exit_info[0] == constants.FINISHED_REGULARLY:
# To check job standard out and standard err
job_stdout_file = tempfile.NamedTemporaryFile(
prefix="job_soma_out_log_",
suffix=repr(job_id),
delete=False)
job_stdout_file = job_stdout_file.name
job_stderr_file = tempfile.NamedTemporaryFile(
prefix="job_soma_outerr_log_",
suffix=repr(job_id),
delete=False)
job_stderr_file = job_stderr_file.name
try:
self.wf_ctrl.retrieve_job_stdouterr(job_id,
job_stdout_file,
job_stderr_file)
if job_name == 'job1':
# Test stdout
isSame, msg = identical_files(
job_stdout_file,
self.wf_examples.lo_stdout[1])
self.assertTrue(isSame, msg)
# Test no stderr
msg = "job stderr not empty : cf %s\n" \
"stderr:\n---\n%s\n---" \
% (job_stderr_file, open(job_stderr_file).read())
self.assertTrue(os.stat(job_stderr_file).st_size == 0,
msg)
# Test output files
if self.path_management == self.LOCAL_PATH:
isSame, msg = identical_files(
self.wf_examples.lo_out_model_file[11],
self.wf_examples.lo_file[11])
self.assertTrue(isSame, msg)
isSame, msg = identical_files(
self.wf_examples.lo_out_model_file[12],
self.wf_examples.lo_file[12])
self.assertTrue(isSame, msg)
if self.path_management == self.FILE_TRANSFER or \
self.path_management == self.SHARED_TRANSFER:
isSame, msg = identical_files(
self.wf_examples.lo_out_model_file[11],
self.wf_examples.tr_file[11].client_path)
self.assertTrue(isSame, msg)
isSame, msg = identical_files(
self.wf_examples.lo_out_model_file[12],
self.wf_examples.tr_file[12].client_path)
self.assertTrue(isSame, msg)
# For unknown reason, it raises some errors
# http://stackoverflow.com/questions/10496758/unexpected-end-of-file-and-error-importing-function-definition-error-running
# isSame, msg = identical_files(job_stderr_file,self.wf_examples.lo_stderr[1])
# self.failUnless(isSame == True)
if job_name in ['job2', 'job3', 'job4']:
job_nb = int(job_name[3])
# Test stdout
#.........这里部分代码省略.........
示例5: test_result
# 需要导入模块: from soma_workflow.client import Helper [as 别名]
# 或者: from soma_workflow.client.Helper import list_failed_jobs [as 别名]
def test_result(self):
workflow = self.wf_examples.example_native_spec_pbs()
self.wf_id = self.wf_ctrl.submit_workflow(workflow=workflow, name=self.__class__.__name__)
# Transfer input files if file transfer
if self.path_management == self.FILE_TRANSFER or self.path_management == self.SHARED_TRANSFER:
Helper.transfer_input_files(self.wf_id, self.wf_ctrl)
# Wait for the workflow to finish
Helper.wait_workflow(self.wf_id, self.wf_ctrl)
# Transfer output files if file transfer
if self.path_management == self.FILE_TRANSFER or self.path_management == self.SHARED_TRANSFER:
Helper.transfer_output_files(self.wf_id, self.wf_ctrl)
status = self.wf_ctrl.workflow_status(self.wf_id)
self.assertTrue(
status == constants.WORKFLOW_DONE, "workflow status : %s. Expected : %s" % (status, constants.WORKFLOW_DONE)
)
nb_failed_jobs = len(Helper.list_failed_jobs(self.wf_id, self.wf_ctrl))
self.assertTrue(nb_failed_jobs == 0, "nb failed jobs : %i. Expected : %i" % (nb_failed_jobs, 0))
nb_failed_aborted_jobs = len(Helper.list_failed_jobs(self.wf_id, self.wf_ctrl, include_aborted_jobs=True))
self.assertTrue(
nb_failed_aborted_jobs == 0,
"nb failed jobs including aborted : %i. Expected : %i" % (nb_failed_aborted_jobs, 0),
)
(jobs_info, transfers_info, workflow_status, workflow_queue, tmp_files) = self.wf_ctrl.workflow_elements_status(
self.wf_id
)
for (job_id, tmp_status, queue, exit_info, dates) in jobs_info:
job_list = self.wf_ctrl.jobs([job_id])
job_name, job_command, job_submission_date = job_list[job_id]
self.tested_job = job_id
if exit_info[0] == constants.FINISHED_REGULARLY:
# To check job standard out and standard err
job_stdout_file = tempfile.NamedTemporaryFile(
prefix="job_soma_out_log_", suffix=repr(job_id), delete=False
)
job_stdout_file = job_stdout_file.name
job_stderr_file = tempfile.NamedTemporaryFile(
prefix="job_soma_outerr_log_", suffix=repr(job_id), delete=False
)
job_stderr_file = job_stderr_file.name
try:
self.wf_ctrl.retrieve_job_stdouterr(job_id, job_stdout_file, job_stderr_file)
# Test stdout
isSame, msg = identical_files(job_stdout_file, self.wf_examples.lo_stdout[1])
self.assertTrue(isSame, msg)
# Test no stderr
self.assertTrue(
os.stat(job_stderr_file).st_size == 0, "job stderr not empty : cf %s" % job_stderr_file
)
# Test output files
if self.path_management == self.LOCAL_PATH:
isSame, msg = identical_files(
self.wf_examples.lo_out_model_file[11], self.wf_examples.lo_file[11]
)
self.assertTrue(isSame, msg)
isSame, msg = identical_files(
self.wf_examples.lo_out_model_file[12], self.wf_examples.lo_file[12]
)
self.assertTrue(isSame, msg)
if self.path_management == self.FILE_TRANSFER or self.path_management == self.SHARED_TRANSFER:
isSame, msg = identical_files(
self.wf_examples.lo_out_model_file[11], self.wf_examples.tr_file[11].client_path
)
self.assertTrue(isSame, msg)
isSame, msg = identical_files(
self.wf_examples.lo_out_model_file[12], self.wf_examples.tr_file[12].client_path
)
self.assertTrue(isSame, msg)
finally:
os.unlink(job_stdout_file)
os.unlink(job_stderr_file)
del self.tested_job
示例6: test_result
# 需要导入模块: from soma_workflow.client import Helper [as 别名]
# 或者: from soma_workflow.client.Helper import list_failed_jobs [as 别名]
def test_result(self):
workflow = self.wf_examples.example_simple_exception1()
self.wf_id = self.wf_ctrl.submit_workflow(
workflow=workflow,
name=self.__class__.__name__)
# Transfer input files if file transfer
if self.path_management == self.FILE_TRANSFER or \
self.path_management == self.SHARED_TRANSFER:
Helper.transfer_input_files(self.wf_id, self.wf_ctrl)
# Wait for the workflow to finish
Helper.wait_workflow(self.wf_id, self.wf_ctrl)
# Transfer output files if file transfer
if self.path_management == self.FILE_TRANSFER or \
self.path_management == self.SHARED_TRANSFER:
Helper.transfer_output_files(self.wf_id, self.wf_ctrl)
status = self.wf_ctrl.workflow_status(self.wf_id)
self.assertTrue(status == constants.WORKFLOW_DONE,
"workflow status : %s. Expected : %s" %
(status, constants.WORKFLOW_DONE))
nb_failed_jobs = len(Helper.list_failed_jobs(self.wf_id,
self.wf_ctrl))
self.assertTrue(nb_failed_jobs == 1,
"nb failed jobs : %i. Expected : %i" %
(nb_failed_jobs, 1))
nb_failed_aborted_jobs = len(Helper.list_failed_jobs(
self.wf_id,
self.wf_ctrl,
include_aborted_jobs=True))
self.assertTrue(nb_failed_aborted_jobs == 4,
"nb failed jobs including aborted : %i. Expected : %i"
% (nb_failed_aborted_jobs, 4))
(jobs_info, transfers_info, workflow_status, workflow_queue,
tmp_files) = self.wf_ctrl.workflow_elements_status(self.wf_id)
for (job_id, tmp_status, queue, exit_info, dates) in jobs_info:
job_list = self.wf_ctrl.jobs([job_id])
job_name, job_command, job_submission_date = job_list[job_id]
self.tested_job = job_id
if exit_info[0] == constants.FINISHED_REGULARLY:
# To check job standard out and standard err
job_stdout_file = tempfile.NamedTemporaryFile(
prefix="job_soma_out_log_",
suffix=repr(job_id),
delete=False)
job_stdout_file = job_stdout_file.name
job_stderr_file = tempfile.NamedTemporaryFile(
prefix="job_soma_outerr_log_",
suffix=repr(job_id),
delete=False)
job_stderr_file = job_stderr_file.name
try:
self.wf_ctrl.retrieve_job_stdouterr(job_id,
job_stdout_file,
job_stderr_file)
if job_name == 'job1 with exception':
# Test stdout
isSame, msg = identical_files(
job_stdout_file,
self.wf_examples.lo_stdout_exception_model)
self.assertTrue(isSame, msg)
# Test the last line of stderr
with open(job_stderr_file) as f:
lines = f.readlines()
expected_error = 'Exception: Paf Boum Boum Bada Boum !!!\n'
isSame = (lines[-1] == expected_error)
self.assertTrue(isSame,
"Job exception : %s. Expected : %s" %
(lines[-1], expected_error))
finally:
os.unlink(job_stdout_file)
os.unlink(job_stderr_file)
del self.tested_job
示例7: test_result
# 需要导入模块: from soma_workflow.client import Helper [as 别名]
# 或者: from soma_workflow.client.Helper import list_failed_jobs [as 别名]
def test_result(self):
workflow = self.wf_examples.example_special_transfer()
self.wf_id = self.wf_ctrl.submit_workflow(workflow=workflow, name=self.__class__.__name__)
# Transfer input files
Helper.transfer_input_files(self.wf_id, self.wf_ctrl)
# Wait for the worklow to finish
Helper.wait_workflow(self.wf_id, self.wf_ctrl)
status = self.wf_ctrl.workflow_status(self.wf_id)
# Transfer output files
Helper.transfer_output_files(self.wf_id, self.wf_ctrl)
status = self.wf_ctrl.workflow_status(self.wf_id)
self.assertTrue(
status == constants.WORKFLOW_DONE, "workflow status : %s. Expected : %s" % (status, constants.WORKFLOW_DONE)
)
nb_failed_jobs = len(Helper.list_failed_jobs(self.wf_id, self.wf_ctrl))
self.assertTrue(nb_failed_jobs == 0, "nb failed jobs : %i. Expected : %i" % (nb_failed_jobs, 0))
nb_failed_aborted_jobs = len(Helper.list_failed_jobs(self.wf_id, self.wf_ctrl, include_aborted_jobs=True))
self.assertTrue(
nb_failed_aborted_jobs == 0,
"nb failed jobs including aborted : %i. Expected : %i" % (nb_failed_aborted_jobs, 0),
)
(jobs_info, transfers_info, workflow_status, workflow_queue, tmp_files) = self.wf_ctrl.workflow_elements_status(
self.wf_id
)
for (job_id, tmp_status, queue, exit_info, dates) in jobs_info:
job_list = self.wf_ctrl.jobs([job_id])
job_name, job_command, job_submission_date = job_list[job_id]
self.tested_job = job_id
if exit_info[0] == constants.FINISHED_REGULARLY:
# To check job standard out and standard err
job_stdout_file = tempfile.NamedTemporaryFile(
prefix="job_soma_out_log_", suffix=repr(job_id), delete=False
)
job_stdout_file = job_stdout_file.name
job_stderr_file = tempfile.NamedTemporaryFile(
prefix="job_soma_outerr_log_", suffix=repr(job_id), delete=False
)
job_stderr_file = job_stderr_file.name
try:
self.wf_ctrl.retrieve_job_stdouterr(job_id, job_stdout_file, job_stderr_file)
if job_name == "dir_contents":
# Test job standard out
with open(job_stdout_file, "r+") as f:
dir_contents = f.readlines()
dir_path_in = self.wf_examples.lo_in_dir
full_path_list = []
for element in os.listdir(dir_path_in):
full_path_list.append(os.path.join(dir_path_in, element))
dir_contents_model = list_contents(full_path_list, [])
self.assertTrue(sorted(dir_contents) == sorted(dir_contents_model))
# Test no stderr
self.assertTrue(
os.stat(job_stderr_file).st_size == 0, "job stderr not empty : cf %s" % job_stderr_file
)
if job_name == "multi file format test":
# Test job standard out
isSame, msg = identical_files(job_stdout_file, self.wf_examples.lo_mff_stdout)
self.assertTrue(isSame, msg)
# Test no stderr
self.assertTrue(
os.stat(job_stderr_file).st_size == 0, "job stderr not empty : cf %s" % job_stderr_file
)
finally:
os.unlink(job_stdout_file)
os.unlink(job_stderr_file)
del self.tested_job
示例8: test_result
# 需要导入模块: from soma_workflow.client import Helper [as 别名]
# 或者: from soma_workflow.client.Helper import list_failed_jobs [as 别名]
def test_result(self):
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
# Trigger a warning.
workflow = self.wf_examples.example_special_command()
# Verify some things
self.assertTrue(len(w) == 1)
self.assertTrue(issubclass(w[-1].category, UserWarning))
self.assertTrue("contains single quote. It could fail using DRMAA"
in str(w[-1].message))
self.wf_id = self.wf_ctrl.submit_workflow(
workflow=workflow,
name=self.__class__.__name__)
# Transfer input files if file transfer
if self.path_management == self.FILE_TRANSFER or \
self.path_management == self.SHARED_TRANSFER:
Helper.transfer_input_files(self.wf_id, self.wf_ctrl)
# Wait for the worklow to finish
Helper.wait_workflow(self.wf_id, self.wf_ctrl)
status = self.wf_ctrl.workflow_status(self.wf_id)
self.assertTrue(status == constants.WORKFLOW_DONE,
"workflow status : %s. Expected : %s" %
(status, constants.WORKFLOW_DONE))
# TODO : sometimes raises an error
# because status = "workflow_in_progress"
nb_failed_jobs = len(Helper.list_failed_jobs(
self.wf_id,
self.wf_ctrl))
self.assertTrue(nb_failed_jobs == 0,
"nb failed jobs : %i. Expected : %i" %
(nb_failed_jobs, 0))
nb_failed_aborted_jobs = len(Helper.list_failed_jobs(
self.wf_id,
self.wf_ctrl,
include_aborted_jobs=True))
self.assertTrue(nb_failed_aborted_jobs == 0,
"nb failed jobs including aborted : %i. Expected : %i"
% (nb_failed_aborted_jobs, 0))
(jobs_info, transfers_info, workflow_status, workflow_queue,
tmp_files) = self.wf_ctrl.workflow_elements_status(self.wf_id)
for (job_id, tmp_status, queue, exit_info, dates) in jobs_info:
job_list = self.wf_ctrl.jobs([job_id])
job_name, job_command, job_submission_date = job_list[job_id]
self.tested_job = job_id
if exit_info[0] == constants.FINISHED_REGULARLY:
# To check job standard out and standard err
job_stdout_file = tempfile.NamedTemporaryFile(
prefix="job_soma_out_log_",
suffix=repr(job_id),
delete=False)
job_stdout_file = job_stdout_file.name
job_stderr_file = tempfile.NamedTemporaryFile(
prefix="job_soma_outerr_log_",
suffix=repr(job_id),
delete=False)
job_stderr_file = job_stderr_file.name
try:
self.wf_ctrl.retrieve_job_stdouterr(job_id,
job_stdout_file,
job_stderr_file)
# Test job stdout
if self.path_management == self.LOCAL_PATH:
isSame, msg = identical_files(
job_stdout_file,
self.wf_examples.lo_stdout_command_local)
self.assertTrue(isSame, msg)
else:
isSame, msg = identical_files(
job_stdout_file,
self.wf_examples.lo_stdout_command_remote)
self.assertTrue(isSame, msg)
# Test no stderr
self.assertTrue(os.stat(job_stderr_file).st_size == 0,
"job stderr not empty : cf %s" %
job_stderr_file)
finally:
os.unlink(job_stdout_file)
os.unlink(job_stderr_file)
del self.tested_job
示例9: Job
# 需要导入模块: from soma_workflow.client import Helper [as 别名]
# 或者: from soma_workflow.client.Helper import list_failed_jobs [as 别名]
echo %s
""" % test_bash_script
fileout.write(filecontent)
fileout.close()
os.chdir(cur_work_dir)
job1 = Job(command=[u"touch", test_filepath],
name="epac_job_test",
working_directory=tmp_work_dir_path)
job2 = Job(command=["%s/readfile" % cur_file_dir, test_bash_script],
name="epac_job_test",
working_directory=tmp_work_dir_path)
soma_workflow = Workflow(jobs=[job1, job2])
resource_id = socket.gethostname()
controller = WorkflowController(resource_id, "", "")
## run soma-workflow
## =================
wf_id = controller.submit_workflow(workflow=soma_workflow,
name="epac workflow")
Helper.wait_workflow(wf_id, controller)
nb_failed_jobs = len(Helper.list_failed_jobs(wf_id, controller))
if nb_failed_jobs > 0:
raise ValueError("Soma-workflow error, cannot use working directory")
if not os.path.isfile(os.path.join(tmp_work_dir_path, test_filepath)):
raise ValueError("Soma-workflow cannot define working directory")
else:
print "OK for creating new file in working directory"
示例10: test_result
# 需要导入模块: from soma_workflow.client import Helper [as 别名]
# 或者: from soma_workflow.client.Helper import list_failed_jobs [as 别名]
def test_result(self):
if hasattr(self.wf_ctrl.scheduler_config, 'get_proc_nb'):
n_iter = 10 * self.wf_ctrl.scheduler_config.get_proc_nb()
else:
n_iter = 100
workflow = self.wf_examples.example_fake_pipelineT1(n_iter)
self.wf_id = self.wf_ctrl.submit_workflow(
workflow=workflow,
name=self.__class__.__name__)
# Transfer input files if file transfer
if self.path_management == self.FILE_TRANSFER or \
self.path_management == self.SHARED_TRANSFER:
Helper.transfer_input_files(self.wf_id,
self.wf_ctrl)
# Wait for the workflow to finish
Helper.wait_workflow(self.wf_id, self.wf_ctrl)
# Transfer output files if file transfer
if self.path_management == self.FILE_TRANSFER or \
self.path_management == self.SHARED_TRANSFER:
Helper.transfer_output_files(self.wf_id,
self.wf_ctrl)
status = self.wf_ctrl.workflow_status(self.wf_id)
self.assertTrue(status == constants.WORKFLOW_DONE)
self.assertTrue(len(Helper.list_failed_jobs(
self.wf_id,
self.wf_ctrl)) == 0)
self.assertTrue(len(Helper.list_failed_jobs(
self.wf_id,
self.wf_ctrl,
include_aborted_jobs=True)) == 0)
(jobs_info, transfers_info, workflow_status, workflow_queue,
tmp_files) = self.wf_ctrl.workflow_elements_status(self.wf_id)
for (job_id, tmp_status, queue, exit_info, dates) in jobs_info:
job_list = self.wf_ctrl.jobs([job_id])
job_name, job_command, job_submission_date = job_list[job_id]
self.tested_job = job_id
if exit_info[0] == constants.FINISHED_REGULARLY:
# To check job standard out and standard err
job_stdout_file = tempfile.NamedTemporaryFile(
prefix="job_soma_out_log_",
suffix=repr(job_id),
delete=False)
job_stdout_file = job_stdout_file.name
job_stderr_file = tempfile.NamedTemporaryFile(
prefix="job_soma_outerr_log_",
suffix=repr(job_id),
delete=False)
job_stderr_file = job_stderr_file.name
try:
self.wf_ctrl.retrieve_job_stdouterr(job_id,
job_stdout_file,
job_stderr_file)
# Test stdout
self.assertTrue(os.stat(job_stdout_file).st_size == 0,
"job stdout not empty : cf %s" %
job_stdout_file)
# Test no stderr
self.assertTrue(os.stat(job_stderr_file).st_size == 0,
"job stderr not empty : cf %s" %
job_stderr_file)
finally:
if os.path.exists(job_stdout_file):
os.unlink(job_stdout_file)
if os.path.exists(job_stderr_file):
os.unlink(job_stderr_file)
del self.tested_job