本文整理汇总了Python中interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient.find_subjects方法的典型用法代码示例。如果您正苦于以下问题:Python ResourceRegistryServiceProcessClient.find_subjects方法的具体用法?Python ResourceRegistryServiceProcessClient.find_subjects怎么用?Python ResourceRegistryServiceProcessClient.find_subjects使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient
的用法示例。
在下文中一共展示了ResourceRegistryServiceProcessClient.find_subjects方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: TestVisualizationServiceIntegration
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import find_subjects [as 别名]
#.........这里部分代码省略.........
# and test to make sure the subscription to the queues is working correctly
assertions = self.assertTrue
# Build the workflow definition
workflow_def_obj = IonObject(RT.WorkflowDefinition, name='Viz_Test_Workflow',description='A workflow to test collection of multiple data products in queues')
workflow_data_product_name = 'TEST-Workflow_Output_Product' #Set a specific output product name
#-------------------------------------------------------------------------------------------------------------------------
#Add a transformation process definition for salinity
#-------------------------------------------------------------------------------------------------------------------------
ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition()
workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=ctd_L2_salinity_dprocdef_id, persist_process_output_data=False) #Don't persist the intermediate data product
configuration = {'stream_name' : 'salinity'}
workflow_step_obj.configuration = configuration
workflow_def_obj.workflow_steps.append(workflow_step_obj)
#Create it in the resource registry
workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)
aids = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition)
assertions(len(aids) == 1 )
#The list of data product streams to monitor
data_product_stream_ids = list()
#Create the input data product
ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
data_product_stream_ids.append(ctd_stream_id)
#Create and start the workflow
workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id, timeout=30)
workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
assertions(len(workflow_output_ids) == 1 )
#Walk the associations to find the appropriate output data streams to validate the messages
workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
assertions(len(workflow_dp_ids) == 1 )
for dp_id in workflow_dp_ids:
stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
assertions(len(stream_ids) == 1 )
data_product_stream_ids.append(stream_ids[0])
# Now for each of the data_product_stream_ids create a queue and pipe their data to the queue
user_queue_name1 = USER_VISUALIZATION_QUEUE + '1'
user_queue_name2 = USER_VISUALIZATION_QUEUE + '2'
# use idempotency to create queues
xq1 = self.container.ex_manager.create_xn_queue(user_queue_name1)
self.addCleanup(xq1.delete)
xq2 = self.container.ex_manager.create_xn_queue(user_queue_name2)
self.addCleanup(xq2.delete)
xq1.purge()
xq2.purge()
# the create_subscription call takes a list of stream_ids so create temp ones
dp_stream_id1 = list()
dp_stream_id1.append(data_product_stream_ids[0])
dp_stream_id2 = list()
dp_stream_id2.append(data_product_stream_ids[1])
示例2: TestWorkflowManagementIntegration
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import find_subjects [as 别名]
#.........这里部分代码省略.........
ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition()
workflow_step_obj = IonObject('DataProcessWorkflowStep',
data_process_definition_id=ctd_L2_salinity_dprocdef_id,
persist_process_output_data=False) #Don't persist the intermediate data product
workflow_def_obj.workflow_steps.append(workflow_step_obj)
#-------------------------------------------------------------------------------------------------------------------------
log.debug( "Adding a transformation process definition for salinity doubler")
#-------------------------------------------------------------------------------------------------------------------------
salinity_doubler_dprocdef_id = self.create_salinity_doubler_data_process_definition()
workflow_step_obj = IonObject('DataProcessWorkflowStep',
data_process_definition_id=salinity_doubler_dprocdef_id, )
workflow_def_obj.workflow_steps.append(workflow_step_obj)
log.debug( "Creating workflow def in the resource registry")
workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)
aids = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition)
assertions(len(aids) == 2 )
#The list of data product streams to monitor
data_product_stream_ids = list()
log.debug( "Creating the input data product")
ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
data_product_stream_ids.append(ctd_stream_id)
log.debug( "Creating and starting the workflow")
workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id,
ctd_parsed_data_product_id,
persist_workflow_data_product=True, output_data_product_name=workflow_data_product_name, timeout=300)
workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
assertions(len(workflow_output_ids) == 1 )
log.debug( "persisting the output product")
#self.dataproductclient.activate_data_product_persistence(workflow_product_id)
dataset_ids,_ = self.rrclient.find_objects(workflow_product_id, PRED.hasDataset, RT.Dataset, True)
assertions(len(dataset_ids) == 1 )
dataset_id = dataset_ids[0]
log.debug( "Verifying the output data product name matches what was specified in the workflow definition")
workflow_product = self.rrclient.read(workflow_product_id)
assertions(workflow_product.name.startswith(workflow_data_product_name), 'Nope: %s != %s' % (workflow_product.name, workflow_data_product_name))
log.debug( "Walking the associations to find the appropriate output data streams to validate the messages")
workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
assertions(len(workflow_dp_ids) == 2 )
for dp_id in workflow_dp_ids:
stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
assertions(len(stream_ids) == 1 )
data_product_stream_ids.append(stream_ids[0])
log.debug( "data_product_stream_ids: %s" % data_product_stream_ids)
log.debug( "Starting the output stream listener to monitor to collect messages")
results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids)
log.debug( "results::: %s" % results)
log.debug( "Stopping the workflow processes")
self.workflowclient.terminate_data_process_workflow(workflow_id, False, timeout=250) # Should test true at some point
示例3: TestVisualizationServiceIntegration
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import find_subjects [as 别名]
#.........这里部分代码省略.........
# and test to make sure the subscription to the queues is working correctly
assertions = self.assertTrue
# Build the workflow definition
workflow_def_obj = IonObject(RT.WorkflowDefinition, name='Viz_Test_Workflow',description='A workflow to test collection of multiple data products in queues')
workflow_data_product_name = 'TEST-Workflow_Output_Product' #Set a specific output product name
#-------------------------------------------------------------------------------------------------------------------------
#Add a transformation process definition for salinity
#-------------------------------------------------------------------------------------------------------------------------
ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition()
workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=ctd_L2_salinity_dprocdef_id, persist_process_output_data=False) #Don't persist the intermediate data product
configuration = {'stream_name' : 'salinity'}
workflow_step_obj.configuration = configuration
workflow_def_obj.workflow_steps.append(workflow_step_obj)
#Create it in the resource registry
workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)
aids = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition)
assertions(len(aids) == 1 )
#The list of data product streams to monitor
data_product_stream_ids = list()
#Create the input data product
ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
data_product_stream_ids.append(ctd_stream_id)
#Create and start the workflow
workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id, timeout=30)
workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
assertions(len(workflow_output_ids) == 1 )
#Walk the associations to find the appropriate output data streams to validate the messages
workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
assertions(len(workflow_dp_ids) == 1 )
for dp_id in workflow_dp_ids:
stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
assertions(len(stream_ids) == 1 )
data_product_stream_ids.append(stream_ids[0])
# Now for each of the data_product_stream_ids create a queue and pipe their data to the queue
user_queue_name1 = 'user_queue_1'
user_queue_name2 = 'user_queue_2'
# use idempotency to create queues
xq1 = self.container.ex_manager.create_xn_queue(user_queue_name1)
self.addCleanup(xq1.delete)
xq2 = self.container.ex_manager.create_xn_queue(user_queue_name2)
self.addCleanup(xq2.delete)
xq1.purge()
xq2.purge()
# the create_subscription call takes a list of stream_ids so create temp ones
dp_stream_id1 = list()
dp_stream_id1.append(data_product_stream_ids[0])
dp_stream_id2 = list()
dp_stream_id2.append(data_product_stream_ids[1])