本文整理匯總了Python中soma_workflow.client.Helper.unserialize方法的典型用法代碼示例。如果您正苦於以下問題:Python Helper.unserialize方法的具體用法?Python Helper.unserialize怎麽用?Python Helper.unserialize使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類soma_workflow.client.Helper
的用法示例。
在下文中一共展示了Helper.unserialize方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_serialization
# 需要導入模塊: from soma_workflow.client import Helper [as 別名]
# 或者: from soma_workflow.client.Helper import unserialize [as 別名]
def test_serialization(self):
simple_wf_examples = workflow_local.WorkflowExamplesLocal()
tr_wf_examples = workflow_transfer.WorkflowExamplesTransfer()
srp_wf_examples = workflow_shared.WorkflowExamplesShared()
self.temporaries += [simple_wf_examples.output_dir,
tr_wf_examples.output_dir,
srp_wf_examples.output_dir]
workflows = []
workflows.append(("multiple", simple_wf_examples.example_multiple()))
workflows.append(("special_command",
simple_wf_examples.example_special_command()))
workflows.append(("mutiple_transfer",
tr_wf_examples.example_multiple()))
workflows.append(("special_command_transfer",
tr_wf_examples.example_special_command()))
workflows.append(("special_transfer",
tr_wf_examples.example_special_transfer()))
workflows.append(("mutiple_srp", srp_wf_examples.example_multiple()))
workflows.append(("special_command_srp",
srp_wf_examples.example_special_command()))
for workflow_name, workflow in workflows:
print("Testing", workflow_name)
file_path = tempfile.mkstemp(prefix="json_",
suffix=workflow_name + ".wf")
os.close(file_path[0])
file_path = file_path[1]
Helper.serialize(file_path, workflow)
new_workflow = Helper.unserialize(file_path)
self.assertTrue(new_workflow.attributs_equal(workflow),
"Serialization failed for workflow %s" %
workflow_name)
try:
os.remove(file_path)
except IOError:
pass
示例2: test_serialization
# 需要導入模塊: from soma_workflow.client import Helper [as 別名]
# 或者: from soma_workflow.client.Helper import unserialize [as 別名]
def test_serialization(self):
directory = "/tmp/"
simple_wf_examples = workflow_local.WorkflowExamplesLocal()
tr_wf_examples = workflow_transfer.WorkflowExamplesTransfer()
srp_wf_examples = workflow_shared.WorkflowExamplesShared()
workflows = []
workflows.append(("multiple", simple_wf_examples.example_multiple()))
workflows.append(("special_command",
simple_wf_examples.example_special_command()))
workflows.append(("mutiple_transfer",
tr_wf_examples.example_multiple()))
workflows.append(("special_command_transfer",
tr_wf_examples.example_special_command()))
workflows.append(("special_transfer",
tr_wf_examples.example_special_transfer()))
workflows.append(("mutiple_srp", srp_wf_examples.example_multiple()))
workflows.append(("special_command_srp",
srp_wf_examples.example_special_command()))
for workflow_name, workflow in workflows:
print "Testing", workflow_name
file_path = os.path.join(directory,
"json_" + workflow_name + ".wf")
Helper.serialize(file_path, workflow)
new_workflow = Helper.unserialize(file_path)
self.assertTrue(new_workflow.attributs_equal(workflow),
"Serialization failed for workflow %s" %
workflow_name)
try:
os.remove(file_path)
except IOError:
pass
示例3: repr
# 需要導入模塊: from soma_workflow.client import Helper [as 別名]
# 或者: from soma_workflow.client.Helper import unserialize [as 別名]
logger.info("epd_to_deploy " + repr(options.epd_to_deploy))
logger.info("untar_directory " + repr(options.untar_directory))
sch = MPIScheduler(comm, interval=1, nb_attempt_per_job=options.nb_attempt_per_job)
config.disable_queue_limits()
workflow_engine = ConfiguredWorkflowEngine(database_server,
sch,
config)
if options.workflow_file and os.path.exists(options.workflow_file):
workflow_file = options.workflow_file
logger.info(" ")
logger.info("******* submission of worklfow **********")
logger.info("workflow file: " + repr(workflow_file))
workflow = Helper.unserialize(workflow_file)
workflow_engine.submit_workflow(workflow,
expiration_date=None,
name=None,
queue=None)
if options.wf_id_to_restart != None:
workflow_id = options.wf_id_to_restart
logger.info(" ")
logger.info("******* restart worklfow **********")
logger.info("workflow if: " + repr(workflow_id))
workflow_engine.stop_workflow(workflow_id)
workflow_engine.restart_workflow(workflow_id, queue=None)
while not workflow_engine.engine_loop.are_jobs_and_workflow_done():
time.sleep(2)
for slave in range(1, comm.size):