本文整理汇总了Python中soma_workflow.client.Helper.unserialize方法的典型用法代码示例。如果您正苦于以下问题:Python Helper.unserialize方法的具体用法?Python Helper.unserialize怎么用?Python Helper.unserialize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类soma_workflow.client.Helper
的用法示例。
在下文中一共展示了Helper.unserialize方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_serialization
# 需要导入模块: from soma_workflow.client import Helper [as 别名]
# 或者: from soma_workflow.client.Helper import unserialize [as 别名]
def test_serialization(self):
simple_wf_examples = workflow_local.WorkflowExamplesLocal()
tr_wf_examples = workflow_transfer.WorkflowExamplesTransfer()
srp_wf_examples = workflow_shared.WorkflowExamplesShared()
self.temporaries += [simple_wf_examples.output_dir,
tr_wf_examples.output_dir,
srp_wf_examples.output_dir]
workflows = []
workflows.append(("multiple", simple_wf_examples.example_multiple()))
workflows.append(("special_command",
simple_wf_examples.example_special_command()))
workflows.append(("mutiple_transfer",
tr_wf_examples.example_multiple()))
workflows.append(("special_command_transfer",
tr_wf_examples.example_special_command()))
workflows.append(("special_transfer",
tr_wf_examples.example_special_transfer()))
workflows.append(("mutiple_srp", srp_wf_examples.example_multiple()))
workflows.append(("special_command_srp",
srp_wf_examples.example_special_command()))
for workflow_name, workflow in workflows:
print("Testing", workflow_name)
file_path = tempfile.mkstemp(prefix="json_",
suffix=workflow_name + ".wf")
os.close(file_path[0])
file_path = file_path[1]
Helper.serialize(file_path, workflow)
new_workflow = Helper.unserialize(file_path)
self.assertTrue(new_workflow.attributs_equal(workflow),
"Serialization failed for workflow %s" %
workflow_name)
try:
os.remove(file_path)
except IOError:
pass
示例2: test_serialization
# 需要导入模块: from soma_workflow.client import Helper [as 别名]
# 或者: from soma_workflow.client.Helper import unserialize [as 别名]
def test_serialization(self):
directory = "/tmp/"
simple_wf_examples = workflow_local.WorkflowExamplesLocal()
tr_wf_examples = workflow_transfer.WorkflowExamplesTransfer()
srp_wf_examples = workflow_shared.WorkflowExamplesShared()
workflows = []
workflows.append(("multiple", simple_wf_examples.example_multiple()))
workflows.append(("special_command",
simple_wf_examples.example_special_command()))
workflows.append(("mutiple_transfer",
tr_wf_examples.example_multiple()))
workflows.append(("special_command_transfer",
tr_wf_examples.example_special_command()))
workflows.append(("special_transfer",
tr_wf_examples.example_special_transfer()))
workflows.append(("mutiple_srp", srp_wf_examples.example_multiple()))
workflows.append(("special_command_srp",
srp_wf_examples.example_special_command()))
for workflow_name, workflow in workflows:
print "Testing", workflow_name
file_path = os.path.join(directory,
"json_" + workflow_name + ".wf")
Helper.serialize(file_path, workflow)
new_workflow = Helper.unserialize(file_path)
self.assertTrue(new_workflow.attributs_equal(workflow),
"Serialization failed for workflow %s" %
workflow_name)
try:
os.remove(file_path)
except IOError:
pass
示例3: repr
# 需要导入模块: from soma_workflow.client import Helper [as 别名]
# 或者: from soma_workflow.client.Helper import unserialize [as 别名]
logger.info("epd_to_deploy " + repr(options.epd_to_deploy))
logger.info("untar_directory " + repr(options.untar_directory))
sch = MPIScheduler(comm, interval=1, nb_attempt_per_job=options.nb_attempt_per_job)
config.disable_queue_limits()
workflow_engine = ConfiguredWorkflowEngine(database_server,
sch,
config)
if options.workflow_file and os.path.exists(options.workflow_file):
workflow_file = options.workflow_file
logger.info(" ")
logger.info("******* submission of worklfow **********")
logger.info("workflow file: " + repr(workflow_file))
workflow = Helper.unserialize(workflow_file)
workflow_engine.submit_workflow(workflow,
expiration_date=None,
name=None,
queue=None)
if options.wf_id_to_restart != None:
workflow_id = options.wf_id_to_restart
logger.info(" ")
logger.info("******* restart worklfow **********")
logger.info("workflow if: " + repr(workflow_id))
workflow_engine.stop_workflow(workflow_id)
workflow_engine.restart_workflow(workflow_id, queue=None)
while not workflow_engine.engine_loop.are_jobs_and_workflow_done():
time.sleep(2)
for slave in range(1, comm.size):