本文整理汇总了Python中interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient.read方法的典型用法代码示例。如果您正苦于以下问题:Python ResourceRegistryServiceProcessClient.read方法的具体用法?Python ResourceRegistryServiceProcessClient.read怎么用?Python ResourceRegistryServiceProcessClient.read使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient
的用法示例。
在下文中一共展示了ResourceRegistryServiceProcessClient.read方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: on_start
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import read [as 别名]
def on_start(self):
self.query = self.CFG.get_safe('process.query',{})
self.delivery_format = self.CFG.get_safe('process.delivery_format',{})
self.datastore_name = self.CFG.get_safe('process.datastore_name','dm_datastore')
definition_id = self.delivery_format.get('definition_id')
rrsc = ResourceRegistryServiceProcessClient(process=self, node=self.container.node)
definition = rrsc.read(definition_id)
self.definition = definition.container
self.fields = self.delivery_format.get('fields',None)
self.view_name = self.CFG.get_safe('process.view_name','datasets/dataset_by_id')
self.key_id = self.CFG.get_safe('process.key_id')
self.stream_id = self.CFG.get_safe('process.publish_streams.output')
if not self.stream_id:
raise Inconsistent('The replay process requires a stream id. Invalid configuration!')
self.data_stream_id = self.definition.data_stream_id
self.encoding_id = self.definition.identifiables[self.data_stream_id].encoding_id
self.element_type_id = self.definition.identifiables[self.data_stream_id].element_type_id
self.element_count_id = self.definition.identifiables[self.data_stream_id].element_count_id
self.data_record_id = self.definition.identifiables[self.element_type_id].data_record_id
self.field_ids = self.definition.identifiables[self.data_record_id].field_ids
self.domain_ids = self.definition.identifiables[self.data_record_id].domain_ids
self.time_id = self.definition.identifiables[self.domain_ids[0]].temporal_coordinate_vector_id
示例2: resolve_org_negotiation
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import read [as 别名]
def resolve_org_negotiation():
try:
payload = request.form['payload']
json_params = json_loads(str(payload))
ion_actor_id, expiry = get_governance_info_from_request('serviceRequest', json_params)
ion_actor_id, expiry = validate_request(ion_actor_id, expiry)
headers = build_message_headers(ion_actor_id, expiry)
# extract negotiation-specific data (convert from unicode just in case - these are machine generated and unicode specific
# chars are unexpected)
verb = str(json_params['verb'])
originator = str(json_params['originator'])
negotiation_id = str(json_params['negotiation_id'])
reason = str(json_params.get('reason', ''))
proposal_status = None
if verb.lower() == "accept":
proposal_status = ProposalStatusEnum.ACCEPTED
elif verb.lower() == "reject":
proposal_status = ProposalStatusEnum.REJECTED
proposal_originator = None
if originator.lower() == "consumer":
proposal_originator = ProposalOriginatorEnum.CONSUMER
elif originator.lower() == "provider":
proposal_originator = ProposalOriginatorEnum.PROVIDER
rr_client = ResourceRegistryServiceProcessClient(process=service_gateway_instance)
negotiation = rr_client.read(negotiation_id, headers=headers)
new_negotiation_sap = Negotiation.create_counter_proposal(negotiation, proposal_status, proposal_originator)
org_client = OrgManagementServiceProcessClient(process=service_gateway_instance)
resp = org_client.negotiate(new_negotiation_sap, headers=headers)
# update reason if it exists
if reason:
# reload negotiation because it has changed
negotiation = rr_client.read(negotiation_id, headers=headers)
negotiation.reason = reason
rr_client.update(negotiation)
return gateway_json_response(resp)
except Exception as e:
return build_error_response(e)
示例3: get_resource
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import read [as 别名]
def get_resource(resource_id):
ret = None
client = ResourceRegistryServiceProcessClient(node=Container.instance.node, process=service_gateway_instance)
if resource_id != '':
try:
result = client.read(resource_id)
if not result:
raise NotFound("No resource found for id: %s " % resource_id)
ret = simplejson.dumps(result, default=ion_object_encoder)
except Exception, e:
ret = "Error: %s" % e
示例4: get_resource
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import read [as 别名]
def get_resource(resource_id):
result = None
client = ResourceRegistryServiceProcessClient(node=Container.instance.node, process=service_gateway_instance)
if resource_id != '':
try:
#Database object IDs are not unicode
result = client.read(convert_unicode(resource_id))
if not result:
raise NotFound("No resource found for id: %s " % resource_id)
return json_response({ GATEWAY_RESPONSE :result } )
except Exception, e:
return build_error_response(e)
示例5: get_resource
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import read [as 别名]
def get_resource(resource_id):
try:
client = ResourceRegistryServiceProcessClient(node=Container.instance.node, process=service_gateway_instance)
# Validate requesting user and expiry and add governance headers
ion_actor_id, expiry = get_governance_info_from_request()
ion_actor_id, expiry = validate_request(ion_actor_id, expiry)
# Database object IDs are not unicode
result = client.read(convert_unicode(resource_id))
if not result:
raise NotFound("No resource found for id: %s " % resource_id)
return gateway_json_response(result)
except Exception, e:
return build_error_response(e)
示例6: upload_data
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import read [as 别名]
def upload_data(dataproduct_id):
upload_folder = FileSystem.get_url(FS.TEMP,'uploads')
try:
rr_client = ResourceRegistryServiceProcessClient(process=service_gateway_instance)
object_store = Container.instance.object_store
try:
rr_client.read(str(dataproduct_id))
except BadRequest:
raise BadRequest('Unknown DataProduct ID %s' % dataproduct_id)
# required fields
upload = request.files['file'] # <input type=file name="file">
# determine filetype
filetype = _check_magic(upload)
upload.seek(0) # return to beginning for save
if upload and filetype is not None:
# upload file - run filename through werkzeug.secure_filename
filename = secure_filename(upload.filename)
path = os.path.join(upload_folder, filename)
upload_time = time.time()
upload.save(path)
# register upload
file_upload_context = {
# TODO add dataproduct_id
'name':'User uploaded file %s' % filename,
'filename':filename,
'filetype':filetype,
'path':path,
'upload_time':upload_time,
'status':'File uploaded to server'
}
fuc_id, _ = object_store.create_doc(file_upload_context)
# client to process dispatch
pd_client = ProcessDispatcherServiceClient()
# create process definition
process_definition = ProcessDefinition(
name='upload_data_processor',
executable={
'module':'ion.processes.data.upload.upload_data_processing',
'class':'UploadDataProcessing'
}
)
process_definition_id = pd_client.create_process_definition(process_definition)
# create process
process_id = pd_client.create_process(process_definition_id)
#schedule process
config = DotDict()
config.process.fuc_id = fuc_id
config.process.dp_id = dataproduct_id
pid = pd_client.schedule_process(process_definition_id, process_id=process_id, configuration=config)
log.info('UploadDataProcessing process created %s' % pid)
# response - only FileUploadContext ID and determined filetype for UX display
resp = {'fuc_id': fuc_id}
return gateway_json_response(resp)
raise BadRequest('Invalid Upload')
except Exception as e:
return build_error_response(e)
示例7: TestSchedulerService
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import read [as 别名]
class TestSchedulerService(IonIntegrationTestCase):
def setUp(self):
self.interval_timer_count = 0
self.interval_timer_sent_time = 0
self.interval_timer_received_time = 0
self.interval_timer_interval = 3
# Start container
self._start_container()
self.container.start_rel_from_url('res/deploy/r2deploy.yml')
process = FakeProcess()
self.ssclient = SchedulerServiceProcessClient(node=self.container.node, process=process)
self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)
def tearDown(self):
pass
def now_utc(self):
return time.mktime(datetime.datetime.utcnow().timetuple())
def test_create_interval_timer(self):
# create the interval timer resource
# create the event listener
# call scheduler to set the timer
# receive a few intervals, validate that arrival time is as expected
# cancel the timer
# wait until after next interval to verify that timer was correctly cancelled
self.interval_timer_count = 0
self.interval_timer_sent_time = 0
self.interval_timer_received_time = 0
self.interval_timer_interval = 3
event_origin = "Interval_Timer_233"
sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback, origin=event_origin)
sub.start()
self.addCleanup(sub.stop)
start_time = self.now_utc()
self.interval_timer_end_time = start_time + 10
id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval,
end_time=self.interval_timer_end_time,
event_origin=event_origin, event_subtype="")
self.interval_timer_sent_time = datetime.datetime.utcnow()
self.assertEqual(type(id), str)
# Validate the timer is stored in RR
ss = self.rrclient.read(id)
self.assertEqual(ss.entry.event_origin, event_origin)
# Wait until two events are published
gevent.sleep((self.interval_timer_interval * 2) + 1)
time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
timer_counts = math.floor(time_diff/self.interval_timer_interval)
#Cancle the timer
ss = self.ssclient.cancel_timer(id)
# wait until after next interval to verify that timer was correctly cancelled
gevent.sleep(self.interval_timer_interval)
# Validate the timer correctly cancelled
with self.assertRaises(BadRequest):
self.ssclient.cancel_timer(id)
# Validate the timer is removed from resource regsitry
with self.assertRaises(NotFound):
self.rrclient.read(id)
# Validate the number of timer counts
self.assertEqual(self.interval_timer_count, timer_counts, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.interval_timer_count, timer_counts, id))
def test_system_restart(self):
# create the interval timer resource
# create the event listener
# call scheduler to set the timer
# receive a few intervals, validate that arrival time is as expected
# cancel the timer
# wait until after next interval to verify that timer was correctly cancelled
self.interval_timer_count = 0
self.interval_timer_sent_time = 0
self.interval_timer_received_time = 0
self.interval_timer_interval = 3
event_origin = "Interval_Timer_4444"
sub = EventSubscriber(event_type="TimerEvent", callback=self.on_restart_callback, origin=event_origin)
sub.start()
self.addCleanup(sub.stop)
start_time = self.now_utc()
self.interval_timer_end_time = start_time + 20
id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval,
end_time=self.interval_timer_end_time,
event_origin=event_origin, event_subtype="")
self.interval_timer_sent_time = datetime.datetime.utcnow()
#.........这里部分代码省略.........
示例8: TestWorkflowManagementIntegration
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import read [as 别名]
#.........这里部分代码省略.........
salinity_doubler_dprocdef_id = self.create_salinity_doubler_data_process_definition()
workflow_step_obj = IonObject('DataProcessWorkflowStep',
data_process_definition_id=salinity_doubler_dprocdef_id, )
workflow_def_obj.workflow_steps.append(workflow_step_obj)
log.debug( "Creating workflow def in the resource registry")
workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)
aids = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition)
assertions(len(aids) == 2 )
#The list of data product streams to monitor
data_product_stream_ids = list()
log.debug( "Creating the input data product")
ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
data_product_stream_ids.append(ctd_stream_id)
log.debug( "Creating and starting the workflow")
workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id,
ctd_parsed_data_product_id,
persist_workflow_data_product=True, output_data_product_name=workflow_data_product_name, timeout=300)
workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
assertions(len(workflow_output_ids) == 1 )
log.debug( "persisting the output product")
#self.dataproductclient.activate_data_product_persistence(workflow_product_id)
dataset_ids,_ = self.rrclient.find_objects(workflow_product_id, PRED.hasDataset, RT.Dataset, True)
assertions(len(dataset_ids) == 1 )
dataset_id = dataset_ids[0]
log.debug( "Verifying the output data product name matches what was specified in the workflow definition")
workflow_product = self.rrclient.read(workflow_product_id)
assertions(workflow_product.name.startswith(workflow_data_product_name), 'Nope: %s != %s' % (workflow_product.name, workflow_data_product_name))
log.debug( "Walking the associations to find the appropriate output data streams to validate the messages")
workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
assertions(len(workflow_dp_ids) == 2 )
for dp_id in workflow_dp_ids:
stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
assertions(len(stream_ids) == 1 )
data_product_stream_ids.append(stream_ids[0])
log.debug( "data_product_stream_ids: %s" % data_product_stream_ids)
log.debug( "Starting the output stream listener to monitor to collect messages")
results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids)
log.debug( "results::: %s" % results)
log.debug( "Stopping the workflow processes")
self.workflowclient.terminate_data_process_workflow(workflow_id, False, timeout=250) # Should test true at some point
log.debug( "Making sure the Workflow object was removed")
objs, _ = self.rrclient.find_resources(restype=RT.Workflow)
assertions(len(objs) == 0)
log.debug( "Validating the data from each of the messages along the way")
self.validate_messages(results)
log.debug( "Checking to see if dataset id = %s, was persisted, and that it can be retrieved...." % dataset_id)
self.validate_data_ingest_retrieve(dataset_id)
示例9: TestSchedulerService
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import read [as 别名]
class TestSchedulerService(IonIntegrationTestCase):
def setUp(self):
self.interval_timer_count = 0
self.interval_timer_sent_time = 0
self.interval_timer_received_time = 0
self.interval_timer_interval = 3
# Start container
self._start_container()
self.container.start_rel_from_url('res/deploy/r2deploy.yml')
process = FakeProcess()
self.ssclient = SchedulerServiceProcessClient(node=self.container.node, process=process)
self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)
def tearDown(self):
pass
def now_utc(self):
return time.mktime(datetime.datetime.utcnow().timetuple())
def test_create_interval_timer(self):
# create the interval timer resource
# create the event listener
# call scheduler to set the timer
# receive a few intervals, validate that arrival time is as expected
# cancel the timer
# wait until after next interval to verify that timer was correctly cancelled
self.interval_timer_count = 0
self.interval_timer_sent_time = 0
self.interval_timer_received_time = 0
self.interval_timer_interval = 3
event_origin = "Interval_Timer_233"
sub = EventSubscriber(event_type="ResourceEvent", callback=self.interval_timer_callback, origin=event_origin)
sub.start()
start_time = self.now_utc()
self.interval_timer_end_time = start_time + 10
id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval,
end_time=self.interval_timer_end_time,
event_origin=event_origin, event_subtype="")
self.interval_timer_sent_time = datetime.datetime.utcnow()
self.assertEqual(type(id), str)
# Validate the timer is stored in RR
ss = self.rrclient.read(id)
self.assertEqual(ss.entry.event_origin, event_origin)
# Wait until two events are published
gevent.sleep((self.interval_timer_interval * 2) + 1)
#Cancle the timer
ss = self.ssclient.cancel_timer(id)
# wait until after next interval to verify that timer was correctly cancelled
gevent.sleep(self.interval_timer_interval)
# Validate the timer correctly cancelled
with self.assertRaises(BadRequest):
self.ssclient.cancel_timer(id)
# Validate the timer is removed from resource regsitry
with self.assertRaises(NotFound):
self.rrclient.read(id)
# Validate only 2 events are published
self.assertEqual(self.interval_timer_count, 2)
def test_system_restart(self):
# create the interval timer resource
# create the event listener
# call scheduler to set the timer
# receive a few intervals, validate that arrival time is as expected
# cancel the timer
# wait until after next interval to verify that timer was correctly cancelled
self.interval_timer_count = 0
self.interval_timer_sent_time = 0
self.interval_timer_received_time = 0
self.interval_timer_interval = 3
event_origin = "Interval_Timer_4444"
sub = EventSubscriber(event_type="ResourceEvent", callback=self.on_restart_callback, origin=event_origin)
sub.start()
start_time = self.now_utc()
self.interval_timer_end_time = start_time + 20
id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval,
end_time=self.interval_timer_end_time,
event_origin=event_origin, event_subtype="")
self.interval_timer_sent_time = datetime.datetime.utcnow()
self.assertEqual(type(id), str)
# Validate the timer is stored in RR
ss = self.rrclient.read(id)
self.assertEqual(ss.entry.event_origin, event_origin)
#.........这里部分代码省略.........
示例10: VizTransformProcForMatplotlibGraphs
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import read [as 别名]
class VizTransformProcForMatplotlibGraphs(TransformDataProcess):
"""
This class is used for instantiating worker processes that have subscriptions to data streams and convert
incoming data from CDM format to Matplotlib graphs
"""
def on_start(self):
super(VizTransformProcForMatplotlibGraphs,self).on_start()
#assert len(self.streams)==1
self.initDataFlag = True
self.graph_data = {} # Stores a dictionary of variables : [List of values]
# Need some clients
self.rr_cli = ResourceRegistryServiceProcessClient(process = self, node = self.container.node)
self.pubsub_cli = PubsubManagementServiceClient(node=self.container.node)
# extract the various parameters passed to the transform process
self.out_stream_id = self.CFG.get('process').get('publish_streams').get('visualization_service_submit_stream_id')
# Create a publisher on the output stream
#stream_route = self.pubsub_cli.register_producer(stream_id=self.out_stream_id)
out_stream_pub_registrar = StreamPublisherRegistrar(process=self.container, node=self.container.node)
self.out_stream_pub = out_stream_pub_registrar.create_publisher(stream_id=self.out_stream_id)
self.data_product_id = self.CFG.get('data_product_id')
self.stream_def_id = self.CFG.get("stream_def_id")
self.stream_def = self.rr_cli.read(self.stream_def_id)
# Start the thread responsible for keeping track of time and generating graphs
# Mutex for ensuring proper concurrent communications between threads
self.lock = RLock()
self.rendering_proc = Greenlet(self.rendering_thread)
self.rendering_proc.start()
def process(self, packet):
log.debug('(%s): Received Viz Data Packet' % self.name )
#log.debug('(%s): - Processing: %s' % (self.name,packet))
# parse the incoming data
psd = PointSupplementStreamParser(stream_definition=self.stream_def.container, stream_granule=packet)
# re-arrange incoming data into an easy to parse dictionary
vardict = {}
arrLen = None
for varname in psd.list_field_names():
vardict[varname] = psd.get_values(varname)
arrLen = len(vardict[varname])
if self.initDataFlag:
# look at the incoming packet and store
for varname in psd.list_field_names():
self.lock.acquire()
self.graph_data[varname] = []
self.lock.release()
self.initDataFlag = False
# If code reached here, the graph data storage has been initialized. Just add values
# to the list
with self.lock:
for varname in psd.list_field_names():
self.graph_data[varname].extend(vardict[varname])
def rendering_thread(self):
from copy import deepcopy
# Service Client
# init Matplotlib
fig = Figure()
ax = fig.add_subplot(111)
canvas = FigureCanvas(fig)
imgInMem = StringIO.StringIO()
while True:
# Sleep for a pre-decided interval. Should be specifiable in a YAML file
gevent.sleep(20)
# If there's no data, wait
# Lock is used here to make sure the entire vector exists start to finish, this assures that the data won
working_set=None
with self.lock:
if len(self.graph_data) == 0:
continue
else:
working_set = deepcopy(self.graph_data)
# For the simple case of testing, lets plot all time variant variables one at a time
xAxisVar = 'time'
xAxisFloatData = working_set[xAxisVar]
for varName, varData in working_set.iteritems():
if varName == 'time' or varName == 'height' or varName == 'longitude' or varName == 'latitude':
continue
#.........这里部分代码省略.........
示例11: VizTransformProcForGoogleDT
# 需要导入模块: from interface.services.coi.iresource_registry_service import ResourceRegistryServiceProcessClient [as 别名]
# 或者: from interface.services.coi.iresource_registry_service.ResourceRegistryServiceProcessClient import read [as 别名]
class VizTransformProcForGoogleDT(TransformDataProcess):
"""
This class is used for instantiating worker processes that have subscriptions to data streams and convert
incoming data from CDM format to JSON style Google DataTables
"""
def on_start(self):
super(VizTransformProcForGoogleDT,self).on_start()
self.initDataTableFlag = True
# need some clients
self.rr_cli = ResourceRegistryServiceProcessClient(process = self, node = self.container.node)
self.pubsub_cli = PubsubManagementServiceClient(node=self.container.node)
# extract the various parameters passed
self.out_stream_id = self.CFG.get('process').get('publish_streams').get('visualization_service_submit_stream_id')
# Create a publisher on the output stream
out_stream_pub_registrar = StreamPublisherRegistrar(process=self.container, node=self.container.node)
self.out_stream_pub = out_stream_pub_registrar.create_publisher(stream_id=self.out_stream_id)
self.data_product_id = self.CFG.get('data_product_id')
self.stream_def_id = self.CFG.get("stream_def_id")
stream_def_resource = self.rr_cli.read(self.stream_def_id)
self.stream_def = stream_def_resource.container
self.realtime_flag = False
if self.CFG.get("realtime_flag") == "True":
self.realtime_flag = True
else:
self.data_product_id_token = self.CFG.get('data_product_id_token')
# extract the stream_id associated with the DP. Needed later
stream_ids,_ = self.rr_cli.find_objects(self.data_product_id, PRED.hasStream, None, True)
self.stream_id = stream_ids[0]
self.dataDescription = []
self.dataTableContent = []
self.varTuple = []
self.total_num_of_records_recvd = 0
def process(self, packet):
log.debug('(%s): Received Viz Data Packet' % (self.name) )
element_count_id = 0
expected_range = []
psd = PointSupplementStreamParser(stream_definition=self.stream_def, stream_granule=packet)
vardict = {}
arrLen = None
for varname in psd.list_field_names():
vardict[varname] = psd.get_values(varname)
arrLen = len(vardict[varname])
#if its the first time, init the dataTable
if self.initDataTableFlag:
# create data description from the variables in the message
self.dataDescription = [('time', 'datetime', 'time')]
# split the data string to extract variable names
for varname in psd.list_field_names():
if varname == 'time':
continue
self.dataDescription.append((varname, 'number', varname))
self.initDataTableFlag = False
# Add the records to the datatable
for i in xrange(arrLen):
varTuple = []
for varname,_,_ in self.dataDescription:
val = float(vardict[varname][i])
if varname == 'time':
varTuple.append(datetime.fromtimestamp(val))
else:
varTuple.append(val)
# Append the tuples to the data table
self.dataTableContent.append (varTuple)
if self.realtime_flag:
# Maintain a sliding window for realtime transform processes
realtime_window_size = 100
if len(self.dataTableContent) > realtime_window_size:
# always pop the first element till window size is what we want
while len(self.dataTableContent) > realtime_window_size:
self.dataTableContent.pop(0)
if not self.realtime_flag:
# This is the historical view part. Make a note of now many records were received
data_stream_id = self.stream_def.data_stream_id
element_count_id = self.stream_def.identifiables[data_stream_id].element_count_id
#.........这里部分代码省略.........