本文整理汇总了Python中pyon.util.containers.DotDict.granule_counter方法的典型用法代码示例。如果您正苦于以下问题:Python DotDict.granule_counter方法的具体用法?Python DotDict.granule_counter怎么用?Python DotDict.granule_counter使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyon.util.containers.DotDict
的用法示例。
在下文中一共展示了DotDict.granule_counter方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_data_process
# 需要导入模块: from pyon.util.containers import DotDict [as 别名]
# 或者: from pyon.util.containers.DotDict import granule_counter [as 别名]
def load_data_process(self, stream_id=""):
dpms_client = DataProcessManagementServiceClient()
dataprocess_details = dpms_client.read_data_process_for_stream(stream_id)
dataprocess_details = DotDict(dataprocess_details or {})
dataprocess_id = dataprocess_details.dataprocess_id
#set metrics attributes
dataprocess_details.granule_counter = 0
self._dataprocesses[dataprocess_id] = dataprocess_details
#add the stream id to the map
if 'in_stream_id' in dataprocess_details:
if dataprocess_details['in_stream_id'] in self._streamid_map:
(self._streamid_map[ dataprocess_details['in_stream_id'] ]).append(dataprocess_id)
else:
self._streamid_map[ dataprocess_details['in_stream_id'] ] = [dataprocess_id]
#todo: add transform worker id
self.event_publisher.publish_event(origin=dataprocess_id, origin_type='DataProcess', status=DataProcessStatusType.NORMAL,
description='data process loaded into transform worker')
#create a publisher for output stream
self.create_publisher(dataprocess_id, dataprocess_details)
return [dataprocess_id]
示例2: load_data_process
# 需要导入模块: from pyon.util.containers import DotDict [as 别名]
# 或者: from pyon.util.containers.DotDict import granule_counter [as 别名]
def load_data_process(self, stream_id=""):
dpms_client = DataProcessManagementServiceClient()
dataprocess_details_list = dpms_client.read_data_process_for_stream(stream_id)
dataprocess_ids = []
#this returns a list of data process info dicts
for dataprocess_details in dataprocess_details_list:
dataprocess_details = DotDict(dataprocess_details or {})
dataprocess_id = dataprocess_details.dataprocess_id
#set metrics attributes
dataprocess_details.granule_counter = 0
self._dataprocesses[dataprocess_id] = dataprocess_details
log.debug('load_data_process dataprocess_id: %s', dataprocess_id)
log.debug('load_data_process dataprocess_details: %s', dataprocess_details)
# validate details
# if not outstream info avaialable log a warning but TF may publish an event so proceed
if not dataprocess_details.out_stream_def or not dataprocess_details.output_param:
log.warning('No output stream details provided for data process %s, will not publish a granule', dataprocess_id)
#add the stream id to the map
if 'in_stream_id' in dataprocess_details:
if dataprocess_details['in_stream_id'] in self._streamid_map:
(self._streamid_map[ dataprocess_details['in_stream_id'] ]).append(dataprocess_id)
else:
self._streamid_map[ dataprocess_details['in_stream_id'] ] = [dataprocess_id]
#todo: add transform worker id
self.event_publisher.publish_event(origin=dataprocess_id, origin_type='DataProcess', status=DataProcessStatusType.NORMAL,
description='data process loaded into transform worker')
#create a publisher for output stream
self.create_publisher(dataprocess_id, dataprocess_details)
dataprocess_ids.append(dataprocess_id)
return dataprocess_ids