本文整理汇总了Python中interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient.find_objects方法的典型用法代码示例。如果您正苦于以下问题:Python ResourceRegistryServiceProcessClient.find_objects方法的具体用法?Python ResourceRegistryServiceProcessClient.find_objects怎么用?Python ResourceRegistryServiceProcessClient.find_objects使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient
的用法示例。
在下文中一共展示了ResourceRegistryServiceProcessClient.find_objects方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: on_start
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import find_objects [as 别名]
def on_start(self):
rr_cli = ResourceRegistryServiceProcessClient(process=self, node=self.container.node)
pubsub_cli = PubsubManagementServiceProcessClient(process=self, node=self.container.node)
# Get the stream(s)
data_product_id = self.CFG.get_safe('dispatcher.data_product_id','')
stream_ids,_ = rr_cli.find_objects(subject=data_product_id, predicate=PRED.hasStream, id_only=True)
log.info('Got Stream Ids: "%s"', stream_ids)
assert stream_ids, 'No streams found for this data product!'
exchange_name = 'dispatcher_%s' % str(os.getpid())
subscription_id = pubsub_cli.create_subscription(
name='SampleSubscription',
exchange_name=exchange_name,
stream_ids=stream_ids,
description='Sample Subscription Description'
)
stream_defs = {}
def message_received(message, stream_route, stream_id):
granule = message
stream_id = granule.stream_resource_id
data_stream_id = granule.data_stream_id
data_stream = granule.identifiables[data_stream_id]
tstamp = get_datetime(data_stream.timestamp.value)
records = granule.identifiables['record_count'].value
log.info('Received a message from stream %s with time stamp %s and %d records' % (stream_id, tstamp, records))
if stream_id not in stream_defs:
stream_defs[stream_id] = pubsub_cli.find_stream_definition(stream_id, id_only=False).container
stream_def = stream_defs.get(stream_id)
sp = PointSupplementStreamParser(stream_definition=stream_def, stream_granule=granule)
last_data = {}
for field in sp.list_field_names():
last_data[field] = sp.get_values(field)[-1]
log.info('Last values in the message: %s' % str(last_data))
subscriber = StreamSubscriber(process=self, exchange_name=exchange_name, callback=message_received)
subscriber.start()
pubsub_cli.activate_subscription(subscription_id)
示例2: TestWorkflowManagementIntegration
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import find_objects [as 别名]
class TestWorkflowManagementIntegration(VisualizationIntegrationTestHelper):
def setUp(self):
# Start container
self._start_container()
self.container.start_rel_from_url('res/deploy/r2deploy.yml')
#Instantiate a process to represent the test
process=WorkflowServiceTestProcess()
# Now create client to DataProductManagementService
self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)
self.damsclient = DataAcquisitionManagementServiceProcessClient(node=self.container.node, process=process)
self.pubsubclient = PubsubManagementServiceProcessClient(node=self.container.node, process=process)
self.ingestclient = IngestionManagementServiceProcessClient(node=self.container.node, process=process)
self.imsclient = InstrumentManagementServiceProcessClient(node=self.container.node, process=process)
self.dataproductclient = DataProductManagementServiceProcessClient(node=self.container.node, process=process)
self.dataprocessclient = DataProcessManagementServiceProcessClient(node=self.container.node, process=process)
self.datasetclient = DatasetManagementServiceProcessClient(node=self.container.node, process=process)
self.workflowclient = WorkflowManagementServiceProcessClient(node=self.container.node, process=process)
self.process_dispatcher = ProcessDispatcherServiceProcessClient(node=self.container.node, process=process)
self.data_retriever = DataRetrieverServiceProcessClient(node=self.container.node, process=process)
self.ctd_stream_def = SBE37_CDM_stream_definition()
@attr('LOCOINT')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
def test_SA_transform_components(self):
assertions = self.assertTrue
#The list of data product streams to monitor
data_product_stream_ids = list()
#Create the input data product
ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
data_product_stream_ids.append(ctd_stream_id)
###
### Setup the first transformation
###
# Salinity: Data Process Definition
ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition()
l2_salinity_all_data_process_id, ctd_l2_salinity_output_dp_id = self.create_transform_process(ctd_L2_salinity_dprocdef_id,ctd_parsed_data_product_id, 'salinity' )
## get the stream id for the transform outputs
stream_ids, _ = self.rrclient.find_objects(ctd_l2_salinity_output_dp_id, PRED.hasStream, None, True)
assertions(len(stream_ids) > 0 )
sal_stream_id = stream_ids[0]
data_product_stream_ids.append(sal_stream_id)
###
### Setup the second transformation
###
# Salinity Doubler: Data Process Definition
salinity_doubler_dprocdef_id = self.create_salinity_doubler_data_process_definition()
salinity_double_data_process_id, salinity_doubler_output_dp_id = self.create_transform_process(salinity_doubler_dprocdef_id, ctd_l2_salinity_output_dp_id, 'salinity' )
stream_ids, _ = self.rrclient.find_objects(salinity_doubler_output_dp_id, PRED.hasStream, None, True)
assertions(len(stream_ids) > 0 )
sal_dbl_stream_id = stream_ids[0]
data_product_stream_ids.append(sal_dbl_stream_id)
#Start the output stream listener to monitor and collect messages
results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids)
#Stop the transform processes
self.dataprocessclient.deactivate_data_process(salinity_double_data_process_id)
self.dataprocessclient.deactivate_data_process(l2_salinity_all_data_process_id)
#Validate the data from each of the messages along the way
self.validate_messages(results)
@attr('LOCOINT')
@attr('SMOKE')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
def test_transform_workflow(self):
assertions = self.assertTrue
log.debug("Building the workflow definition")
workflow_def_obj = IonObject(RT.WorkflowDefinition,
name='Salinity_Test_Workflow',
description='tests a workflow of multiple transform data processes')
workflow_data_product_name = 'TEST-Workflow_Output_Product' #Set a specific output product name
#.........这里部分代码省略.........
示例3: VizTransformMatplotlibGraphs
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import find_objects [as 别名]
class VizTransformMatplotlibGraphs(TransformStreamPublisher, TransformEventListener, TransformStreamListener):
"""
This class is used for instantiating worker processes that have subscriptions to data streams and convert
incoming data from CDM format to Matplotlib graphs
"""
output_bindings = ['graph_image_param_dict']
event_timer_interval = None
def on_start(self):
#print ">>>>>>>>>>>>>>>>>>>>>> MPL CFG = ", self.CFG
self.pubsub_management = PubsubManagementServiceProcessClient(process=self)
self.ssclient = SchedulerServiceProcessClient(process=self)
self.rrclient = ResourceRegistryServiceProcessClient(process=self)
self.data_retriever_client = DataRetrieverServiceProcessClient(process=self)
self.dsm_client = DatasetManagementServiceProcessClient(process=self)
self.pubsub_client = PubsubManagementServiceProcessClient(process = self)
self.stream_info = self.CFG.get_safe('process.publish_streams',{})
self.stream_names = self.stream_info.keys()
self.stream_ids = self.stream_info.values()
if not self.stream_names:
raise BadRequest('MPL Transform has no output streams.')
graph_time_periods= self.CFG.get_safe('graph_time_periods')
# If this is meant to be an event driven process, schedule an event to be generated every few minutes/hours
self.event_timer_interval = self.CFG.get_safe('graph_gen_interval')
if self.event_timer_interval:
event_origin = "Interval_Timer_Matplotlib"
sub = EventSubscriber(event_type="ResourceEvent", callback=self.interval_timer_callback, origin=event_origin)
sub.start()
self.interval_timer_id = self.ssclient.create_interval_timer(start_time="now" , interval=self._str_to_secs(self.event_timer_interval),
event_origin=event_origin, event_subtype="")
super(VizTransformMatplotlibGraphs,self).on_start()
# when tranform is used as a data process
def recv_packet(self, packet, in_stream_route, in_stream_id):
#Check to see if the class instance was set up as a event triggered transform. If yes, skip the packet
if self.event_timer_interval:
return
log.info('Received packet')
mpl_data_granule = VizTransformMatplotlibGraphsAlgorithm.execute(packet, params=self.get_stream_definition())
for stream_name in self.stream_names:
publisher = getattr(self, stream_name)
publisher.publish(mpl_data_granule)
def get_stream_definition(self):
stream_id = self.stream_ids[0]
stream_def = self.pubsub_management.read_stream_definition(stream_id=stream_id)
return stream_def._id
def process_event(self, msg, headers):
return
def interval_timer_callback(self, *args, **kwargs):
#Find out the input data product to this process
in_dp_id = self.CFG.get_safe('in_dp_id')
print " >>>>>>>>>>>>>> IN DP ID from cfg : ", in_dp_id
# get the dataset_id associated with the data_product. Need it to do the data retrieval
ds_ids,_ = self.rrclient.find_objects(in_dp_id, PRED.hasDataset, RT.Dataset, True)
if ds_ids is None or not ds_ids:
return None
# retrieve data for the specified time interval. Setup up query from pass config first
query = {}
param_list_str = self.CFG.get_safe('parameters')
if param_list_str:
query['parameters'] = param_list_str.split(', ')
# append time if not present in list of parameters
if not 'time' in query['parameters']:
query['parameters'].append('time')
query['start_time'] = query['end_time'] = ntplib.system_to_ntp_time(time.time()) # Now
query['stride_time'] = 1
if self.CFG.get_safe('graph_time_period'):
query['start_time'] = query['end_time'] - self._str_to_secs(self.CFG.get_safe('graph_time_period'))
#print " >>>>>>>>>>>>>> QUERY = ", query
#retrieved_granule = self.data_retriever_client.retrieve(ds_ids[0],{'start_time':start_time,'end_time':end_time})
retrieved_granule = self.data_retriever_client.retrieve(ds_ids[0], query=query)
# add extra parameters to query passed in config that are not needed by data retrieval
if self.CFG.get_safe('resolution'):
query['resolution'] = self.CFG.get_safe('resolution')
# send the granule through the Algorithm code to get the matplotlib graphs
#.........这里部分代码省略.........
示例4: TestVisualizationServiceIntegration
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import find_objects [as 别名]
#.........这里部分代码省略.........
workflow_def_obj = IonObject(RT.WorkflowDefinition, name='Viz_Test_Workflow',description='A workflow to test collection of multiple data products in queues')
workflow_data_product_name = 'TEST-Workflow_Output_Product' #Set a specific output product name
#-------------------------------------------------------------------------------------------------------------------------
#Add a transformation process definition for salinity
#-------------------------------------------------------------------------------------------------------------------------
ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition()
workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=ctd_L2_salinity_dprocdef_id, persist_process_output_data=False) #Don't persist the intermediate data product
configuration = {'stream_name' : 'salinity'}
workflow_step_obj.configuration = configuration
workflow_def_obj.workflow_steps.append(workflow_step_obj)
#Create it in the resource registry
workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)
aids = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition)
assertions(len(aids) == 1 )
#The list of data product streams to monitor
data_product_stream_ids = list()
#Create the input data product
ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
data_product_stream_ids.append(ctd_stream_id)
#Create and start the workflow
workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id, timeout=30)
workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
assertions(len(workflow_output_ids) == 1 )
#Walk the associations to find the appropriate output data streams to validate the messages
workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
assertions(len(workflow_dp_ids) == 1 )
for dp_id in workflow_dp_ids:
stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
assertions(len(stream_ids) == 1 )
data_product_stream_ids.append(stream_ids[0])
# Now for each of the data_product_stream_ids create a queue and pipe their data to the queue
user_queue_name1 = USER_VISUALIZATION_QUEUE + '1'
user_queue_name2 = USER_VISUALIZATION_QUEUE + '2'
# use idempotency to create queues
xq1 = self.container.ex_manager.create_xn_queue(user_queue_name1)
self.addCleanup(xq1.delete)
xq2 = self.container.ex_manager.create_xn_queue(user_queue_name2)
self.addCleanup(xq2.delete)
xq1.purge()
xq2.purge()
# the create_subscription call takes a list of stream_ids so create temp ones
dp_stream_id1 = list()
dp_stream_id1.append(data_product_stream_ids[0])
dp_stream_id2 = list()
dp_stream_id2.append(data_product_stream_ids[1])
salinity_subscription_id1 = self.pubsubclient.create_subscription( stream_ids=dp_stream_id1,
exchange_name = user_queue_name1, name = "user visualization queue1")
salinity_subscription_id2 = self.pubsubclient.create_subscription( stream_ids=dp_stream_id2,
示例5: TestVisualizationServiceIntegration
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import find_objects [as 别名]
#.........这里部分代码省略.........
workflow_def_obj = IonObject(RT.WorkflowDefinition, name='Viz_Test_Workflow',description='A workflow to test collection of multiple data products in queues')
workflow_data_product_name = 'TEST-Workflow_Output_Product' #Set a specific output product name
#-------------------------------------------------------------------------------------------------------------------------
#Add a transformation process definition for salinity
#-------------------------------------------------------------------------------------------------------------------------
ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition()
workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=ctd_L2_salinity_dprocdef_id, persist_process_output_data=False) #Don't persist the intermediate data product
configuration = {'stream_name' : 'salinity'}
workflow_step_obj.configuration = configuration
workflow_def_obj.workflow_steps.append(workflow_step_obj)
#Create it in the resource registry
workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)
aids = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition)
assertions(len(aids) == 1 )
#The list of data product streams to monitor
data_product_stream_ids = list()
#Create the input data product
ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
data_product_stream_ids.append(ctd_stream_id)
#Create and start the workflow
workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id, timeout=30)
workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
assertions(len(workflow_output_ids) == 1 )
#Walk the associations to find the appropriate output data streams to validate the messages
workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
assertions(len(workflow_dp_ids) == 1 )
for dp_id in workflow_dp_ids:
stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
assertions(len(stream_ids) == 1 )
data_product_stream_ids.append(stream_ids[0])
# Now for each of the data_product_stream_ids create a queue and pipe their data to the queue
user_queue_name1 = 'user_queue_1'
user_queue_name2 = 'user_queue_2'
# use idempotency to create queues
xq1 = self.container.ex_manager.create_xn_queue(user_queue_name1)
self.addCleanup(xq1.delete)
xq2 = self.container.ex_manager.create_xn_queue(user_queue_name2)
self.addCleanup(xq2.delete)
xq1.purge()
xq2.purge()
# the create_subscription call takes a list of stream_ids so create temp ones
dp_stream_id1 = list()
dp_stream_id1.append(data_product_stream_ids[0])
dp_stream_id2 = list()
dp_stream_id2.append(data_product_stream_ids[1])
salinity_subscription_id1 = self.pubsubclient.create_subscription( stream_ids=dp_stream_id1,
exchange_name = user_queue_name1, name = "user visualization queue1")
salinity_subscription_id2 = self.pubsubclient.create_subscription( stream_ids=dp_stream_id2,
示例6: TestGovernanceInt
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import find_objects [as 别名]
#.........这里部分代码省略.........
req_id = self.org_client.request_role(org2_id, user_id, INSTRUMENT_OPERATOR, headers=user_header )
requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header)
self.assertEqual(len(requests),2)
requests = self.org_client.find_requests(org2_id,request_status='Open', headers=self.sa_user_header)
self.assertEqual(len(requests),1)
requests = self.org_client.find_user_requests(user_id, org2_id, headers=user_header)
self.assertEqual(len(requests),2)
requests = self.org_client.find_user_requests(user_id, org2_id, request_type=RT.RoleRequest, headers=user_header)
self.assertEqual(len(requests),1)
requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header)
self.assertEqual(len(requests),1)
ia_list,_ = self.rr_client.find_resources(restype=RT.InstrumentAgent)
self.assertEqual(len(ia_list),0)
ia_obj = IonObject(RT.InstrumentAgent, name='Instrument Agent1', description='The first Instrument Agent')
with self.assertRaises(Unauthorized) as cm:
self.ims_client.create_instrument_agent(ia_obj)
self.assertIn('instrument_management(create_instrument_agent) has been denied',cm.exception.message)
with self.assertRaises(Unauthorized) as cm:
self.ims_client.create_instrument_agent(ia_obj, headers=user_header)
self.assertIn('instrument_management(create_instrument_agent) has been denied',cm.exception.message)
#Manager approves request
self.org_client.approve_request(org2_id,req_id, headers=self.sa_user_header)
requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header)
self.assertEqual(len(requests),0)
#User accepts request
self.org_client.accept_request(org2_id, req_id, headers=user_header)
#Refresh headers with new role
user_roles = get_role_message_headers(self.org_client.find_all_roles_by_user(user_id))
user_header = {'ion-actor-id': user_id, 'ion-actor-roles': user_roles }
self.ims_client.create_instrument_agent(ia_obj, headers=user_header)
ia_obj = IonObject(RT.InstrumentAgent, name='Instrument Agent2', description='The second Instrument Agent')
self.ims_client.create_instrument_agent(ia_obj, headers=user_header)
ia_list,_ = self.rr_client.find_resources(restype=RT.InstrumentAgent)
self.assertEqual(len(ia_list),2)
#First make a acquire resource request with an non-enrolled user.
with self.assertRaises(BadRequest) as cm:
req_id = self.org_client.request_acquire_resource(org2_id,self.system_actor._id,ia_list[0]._id , headers=self.sa_user_header)
self.assertIn('A precondition for this request has not been satisfied: is_enrolled(org_id,user_id)',cm.exception.message)
req_id = self.org_client.request_acquire_resource(org2_id,user_id,ia_list[0]._id , headers=user_header)
requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header)
self.assertEqual(len(requests),3)
requests = self.org_client.find_user_requests(user_id, org2_id, headers=user_header)
self.assertEqual(len(requests),3)
requests = self.org_client.find_user_requests(user_id, org2_id, request_type=RT.ResourceRequest, headers=user_header)
self.assertEqual(len(requests),1)
requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header)
self.assertEqual(len(requests),1)
self.assertEqual(requests[0]._id, req_id)
#Manager approves Instrument request
self.org_client.approve_request(org2_id,req_id, headers=self.sa_user_header)
requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header)
self.assertEqual(len(requests),0)
#User accepts request
self.org_client.accept_request(org2_id,req_id, headers=user_header)
#Check commitments
commitments, _ = self.rr_client.find_objects(ia_list[0]._id,PRED.hasCommitment, RT.ResourceCommitment)
self.assertEqual(len(commitments),1)
commitments, _ = self.rr_client.find_objects(user_id,PRED.hasCommitment, RT.ResourceCommitment)
self.assertEqual(len(commitments),1)
#Release the resource
self.org_client.release_resource(org2_id,user_id ,ia_list[0]._id, headers=self.sa_user_header,timeout=15) #TODO - Refactor release_resource
#Check commitments
commitments, _ = self.rr_client.find_objects(ia_list[0]._id,PRED.hasCommitment, RT.ResourceCommitment)
self.assertEqual(len(commitments),0)
commitments, _ = self.rr_client.find_objects(user_id,PRED.hasCommitment, RT.ResourceCommitment)
self.assertEqual(len(commitments),0)
示例7: TestGovernanceInt
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import find_objects [as 别名]
#.........这里部分代码省略.........
users = self.org_client.find_enrolled_users(org2_id, headers=self.sa_user_header)
self.assertEqual(len(users),1)
req_id = self.org_client.request_role(org2_id, user_id, INSTRUMENT_OPERATOR, headers=user_header )
requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header)
self.assertEqual(len(requests),2)
requests = self.org_client.find_requests(org2_id,request_status='Open', headers=self.sa_user_header)
self.assertEqual(len(requests),1)
requests = self.org_client.find_user_requests(user_id, org2_id, headers=user_header)
self.assertEqual(len(requests),2)
requests = self.org_client.find_user_requests(user_id, org2_id, request_type=RT.RoleRequest, headers=user_header)
self.assertEqual(len(requests),1)
requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header)
self.assertEqual(len(requests),1)
ia_list = self.ims_client.find_instrument_agents()
self.assertEqual(len(ia_list),0)
ia_obj = IonObject(RT.InstrumentAgent, name='Instrument Agent1', description='The first Instrument Agent')
with self.assertRaises(Unauthorized) as cm:
self.ims_client.create_instrument_agent(ia_obj)
self.assertIn('instrument_management(create_instrument_agent) has been denied',cm.exception.message)
with self.assertRaises(Unauthorized) as cm:
self.ims_client.create_instrument_agent(ia_obj, headers=user_header)
self.assertIn('instrument_management(create_instrument_agent) has been denied',cm.exception.message)
#Manager approves request
self.org_client.approve_request(org2_id,req_id, headers=self.sa_user_header)
requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header)
self.assertEqual(len(requests),0)
#User accepts request
self.org_client.accept_request(org2_id, req_id, headers=user_header)
#Refresh headers with new role
user_roles = get_role_message_headers(self.org_client.find_all_roles_by_user(user_id))
user_header = {'ion-actor-id': user_id, 'ion-actor-roles': user_roles }
self.ims_client.create_instrument_agent(ia_obj, headers=user_header)
ia_obj = IonObject(RT.InstrumentAgent, name='Instrument Agent2', description='The second Instrument Agent')
self.ims_client.create_instrument_agent(ia_obj, headers=user_header)
ia_list = self.ims_client.find_instrument_agents()
self.assertEqual(len(ia_list),2)
#First make a acquire resource request with an non-enrolled user.
with self.assertRaises(BadRequest) as cm:
req_id = self.org_client.request_acquire_resource(org2_id,self.system_actor._id,ia_list[0]._id , headers=self.sa_user_header)
self.assertIn('A precondition for this request has not been satisfied: is_enrolled(org_id,user_id) == True',cm.exception.message)
req_id = self.org_client.request_acquire_resource(org2_id,user_id,ia_list[0]._id , headers=user_header)
requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header)
self.assertEqual(len(requests),3)
requests = self.org_client.find_user_requests(user_id, org2_id, headers=user_header)
self.assertEqual(len(requests),3)
requests = self.org_client.find_user_requests(user_id, org2_id, request_type=RT.ResourceRequest, headers=user_header)
self.assertEqual(len(requests),1)
requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header)
self.assertEqual(len(requests),1)
self.assertEqual(requests[0]._id, req_id)
#Manager approves Instrument request
self.org_client.approve_request(org2_id,req_id, headers=self.sa_user_header)
requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header)
self.assertEqual(len(requests),0)
#User accepts request
self.org_client.accept_request(org2_id,req_id, headers=user_header)
#Check commitments
commitments, _ = self.rr_client.find_objects(ia_list[0]._id,PRED.hasCommitment, RT.ResourceCommitment)
self.assertEqual(len(commitments),1)
commitments, _ = self.rr_client.find_objects(user_id,PRED.hasCommitment, RT.ResourceCommitment)
self.assertEqual(len(commitments),1)
#Release the resource
self.org_client.release_resource(org2_id,user_id ,ia_list[0]._id, headers=self.sa_user_header)
#Check commitments
commitments, _ = self.rr_client.find_objects(ia_list[0]._id,PRED.hasCommitment, RT.ResourceCommitment)
self.assertEqual(len(commitments),0)
commitments, _ = self.rr_client.find_objects(user_id,PRED.hasCommitment, RT.ResourceCommitment)
self.assertEqual(len(commitments),0)
示例8: len
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import find_objects [as 别名]
for r in requests:
log.info('User Request: ' +str(r))
log.info("User accepts request")
if req_id is not None:
org_client.accept_request(org2_id,req_id, headers={'ion-actor-id': user._id, 'ion-actor-roles': user_header_roles })
requests = org_client.find_user_requests(user._id, org2_id, headers={'ion-actor-id': user._id, 'ion-actor-roles': user_header_roles })
log.info("User Requests count: %d" % len(requests))
for r in requests:
log.info('User Request: ' +str(r))
if req_id is not None:
commitments, _ = rr_client.find_objects(ia_list[0]._id,PRED.hasCommitment, RT.ResourceCommitment)
log.info("Resource Commitments: %d" % len(commitments))
for c in commitments:
log.info('Commitments: ' +str(c))
log.info("Release resource commitment")
org_client.release_resource(org2_id,user._id,ia_list[0]._id, headers={'ion-actor-id': system_actor._id, 'ion-actor-roles': sa_header_roles })
commitments, _ = rr_client.find_objects(ia_list[0]._id,PRED.hasCommitment, RT.ResourceCommitment)
log.info("Resource Commitments: %d" % len(commitments))
for c in commitments:
log.info('Commitments: ' +str(c))
示例9: VizTransformProcForGoogleDT
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import find_objects [as 别名]
class VizTransformProcForGoogleDT(TransformDataProcess):
"""
This class is used for instantiating worker processes that have subscriptions to data streams and convert
incoming data from CDM format to JSON style Google DataTables
"""
def on_start(self):
super(VizTransformProcForGoogleDT,self).on_start()
self.initDataTableFlag = True
# need some clients
self.rr_cli = ResourceRegistryServiceProcessClient(process = self, node = self.container.node)
self.pubsub_cli = PubsubManagementServiceClient(node=self.container.node)
# extract the various parameters passed
self.out_stream_id = self.CFG.get('process').get('publish_streams').get('visualization_service_submit_stream_id')
# Create a publisher on the output stream
out_stream_pub_registrar = StreamPublisherRegistrar(process=self.container, node=self.container.node)
self.out_stream_pub = out_stream_pub_registrar.create_publisher(stream_id=self.out_stream_id)
self.data_product_id = self.CFG.get('data_product_id')
self.stream_def_id = self.CFG.get("stream_def_id")
stream_def_resource = self.rr_cli.read(self.stream_def_id)
self.stream_def = stream_def_resource.container
self.realtime_flag = False
if self.CFG.get("realtime_flag") == "True":
self.realtime_flag = True
else:
self.data_product_id_token = self.CFG.get('data_product_id_token')
# extract the stream_id associated with the DP. Needed later
stream_ids,_ = self.rr_cli.find_objects(self.data_product_id, PRED.hasStream, None, True)
self.stream_id = stream_ids[0]
self.dataDescription = []
self.dataTableContent = []
self.varTuple = []
self.total_num_of_records_recvd = 0
def process(self, packet):
log.debug('(%s): Received Viz Data Packet' % (self.name) )
element_count_id = 0
expected_range = []
psd = PointSupplementStreamParser(stream_definition=self.stream_def, stream_granule=packet)
vardict = {}
arrLen = None
for varname in psd.list_field_names():
vardict[varname] = psd.get_values(varname)
arrLen = len(vardict[varname])
#if its the first time, init the dataTable
if self.initDataTableFlag:
# create data description from the variables in the message
self.dataDescription = [('time', 'datetime', 'time')]
# split the data string to extract variable names
for varname in psd.list_field_names():
if varname == 'time':
continue
self.dataDescription.append((varname, 'number', varname))
self.initDataTableFlag = False
# Add the records to the datatable
for i in xrange(arrLen):
varTuple = []
for varname,_,_ in self.dataDescription:
val = float(vardict[varname][i])
if varname == 'time':
varTuple.append(datetime.fromtimestamp(val))
else:
varTuple.append(val)
# Append the tuples to the data table
self.dataTableContent.append (varTuple)
if self.realtime_flag:
# Maintain a sliding window for realtime transform processes
realtime_window_size = 100
if len(self.dataTableContent) > realtime_window_size:
# always pop the first element till window size is what we want
while len(self.dataTableContent) > realtime_window_size:
self.dataTableContent.pop(0)
if not self.realtime_flag:
# This is the historical view part. Make a note of now many records were received
data_stream_id = self.stream_def.data_stream_id
element_count_id = self.stream_def.identifiables[data_stream_id].element_count_id
#.........这里部分代码省略.........