本文整理汇总了Python中interface.services.dm.idata_retriever_service.DataRetrieverServiceClient.retrieve_last_data_points方法的典型用法代码示例。如果您正苦于以下问题:Python DataRetrieverServiceClient.retrieve_last_data_points方法的具体用法?Python DataRetrieverServiceClient.retrieve_last_data_points怎么用?Python DataRetrieverServiceClient.retrieve_last_data_points使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类interface.services.dm.idata_retriever_service.DataRetrieverServiceClient
的用法示例。
在下文中一共展示了DataRetrieverServiceClient.retrieve_last_data_points方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: TestDMEnd2End
# 需要导入模块: from interface.services.dm.idata_retriever_service import DataRetrieverServiceClient [as 别名]
# 或者: from interface.services.dm.idata_retriever_service.DataRetrieverServiceClient import retrieve_last_data_points [as 别名]
#.........这里部分代码省略.........
for i in xrange(4):
self.publish_hifi(stream_id,route,i)
def start_ingestion(self, stream_id, dataset_id):
'''
Starts ingestion/persistence for a given dataset
'''
ingest_config_id = self.get_ingestion_config()
self.ingestion_management.persist_data_stream(stream_id=stream_id, ingestion_configuration_id=ingest_config_id, dataset_id=dataset_id)
def stop_ingestion(self, stream_id):
ingest_config_id = self.get_ingestion_config()
self.ingestion_management.unpersist_data_stream(stream_id=stream_id, ingestion_configuration_id=ingest_config_id)
def validate_granule_subscription(self, msg, route, stream_id):
'''
Validation for granule format
'''
if msg == {}:
return
rdt = RecordDictionaryTool.load_from_granule(msg)
log.info('%s', rdt.pretty_print())
self.assertIsInstance(msg,Granule,'Message is improperly formatted. (%s)' % type(msg))
self.event.set()
def wait_until_we_have_enough_granules(self, dataset_id='',data_size=40):
'''
Loops until there is a sufficient amount of data in the dataset
'''
done = False
with gevent.Timeout(40):
while not done:
extents = self.dataset_management.dataset_extents(dataset_id, 'time')
granule = self.data_retriever.retrieve_last_data_points(dataset_id, 1)
rdt = RecordDictionaryTool.load_from_granule(granule)
if rdt['time'] and rdt['time'][0] != rdt._pdict.get_context('time').fill_value and extents >= data_size:
done = True
else:
gevent.sleep(0.2)
#--------------------------------------------------------------------------------
# Test Methods
#--------------------------------------------------------------------------------
def test_dm_end_2_end(self):
#--------------------------------------------------------------------------------
# Set up a stream and have a mock instrument (producer) send data
#--------------------------------------------------------------------------------
self.event.clear()
# Get a precompiled parameter dictionary with basic ctd fields
pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict',id_only=True)
context_ids = self.dataset_management.read_parameter_contexts(pdict_id, id_only=True)
# Add a field that supports binary data input.
bin_context = ParameterContext('binary', param_type=ArrayType())
context_ids.append(self.dataset_management.create_parameter_context('binary', bin_context.dump()))
# Add another field that supports dictionary elements.
rec_context = ParameterContext('records', param_type=RecordType())
context_ids.append(self.dataset_management.create_parameter_context('records', rec_context.dump()))
pdict_id = self.dataset_management.create_parameter_dictionary('replay_pdict', parameter_context_ids=context_ids, temporal_context='time')
stream_definition = self.pubsub_management.create_stream_definition('ctd data', parameter_dictionary_id=pdict_id)
示例2: TestDMEnd2End
# 需要导入模块: from interface.services.dm.idata_retriever_service import DataRetrieverServiceClient [as 别名]
# 或者: from interface.services.dm.idata_retriever_service.DataRetrieverServiceClient import retrieve_last_data_points [as 别名]
#.........这里部分代码省略.........
'''
ingest_config_id = self.get_ingestion_config()
self.ingestion_management.persist_data_stream(stream_id=stream_id, ingestion_configuration_id=ingest_config_id, dataset_id=dataset_id)
def stop_ingestion(self, stream_id):
ingest_config_id = self.get_ingestion_config()
self.ingestion_management.unpersist_data_stream(stream_id=stream_id, ingestion_configuration_id=ingest_config_id)
def stop_all_ingestion(self):
try:
[self.stop_ingestion(sid) for sid in self.streams]
except:
pass
def validate_granule_subscription(self, msg, route, stream_id):
'''
Validation for granule format
'''
if msg == {}:
return
rdt = RecordDictionaryTool.load_from_granule(msg)
log.info('%s', rdt.pretty_print())
self.assertIsInstance(msg,Granule,'Message is improperly formatted. (%s)' % type(msg))
self.event.set()
def wait_until_we_have_enough_granules(self, dataset_id='',data_size=40):
'''
Loops until there is a sufficient amount of data in the dataset
'''
done = False
with gevent.Timeout(40):
while not done:
extents = self.dataset_management.dataset_extents(dataset_id, 'time')[0]
granule = self.data_retriever.retrieve_last_data_points(dataset_id, 1)
rdt = RecordDictionaryTool.load_from_granule(granule)
if rdt['time'] and rdt['time'][0] != rdt._pdict.get_context('time').fill_value and extents >= data_size:
done = True
else:
gevent.sleep(0.2)
#--------------------------------------------------------------------------------
# Test Methods
#--------------------------------------------------------------------------------
@attr('SMOKE')
def test_dm_end_2_end(self):
#--------------------------------------------------------------------------------
# Set up a stream and have a mock instrument (producer) send data
#--------------------------------------------------------------------------------
self.event.clear()
# Get a precompiled parameter dictionary with basic ctd fields
pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict',id_only=True)
context_ids = self.dataset_management.read_parameter_contexts(pdict_id, id_only=True)
# Add a field that supports binary data input.
bin_context = ParameterContext('binary', param_type=ArrayType())
context_ids.append(self.dataset_management.create_parameter_context('binary', bin_context.dump()))
# Add another field that supports dictionary elements.
rec_context = ParameterContext('records', param_type=RecordType())
context_ids.append(self.dataset_management.create_parameter_context('records', rec_context.dump()))
pdict_id = self.dataset_management.create_parameter_dictionary('replay_pdict', parameter_context_ids=context_ids, temporal_context='time')
示例3: ExhaustiveParameterTest
# 需要导入模块: from interface.services.dm.idata_retriever_service import DataRetrieverServiceClient [as 别名]
# 或者: from interface.services.dm.idata_retriever_service.DataRetrieverServiceClient import retrieve_last_data_points [as 别名]
class ExhaustiveParameterTest(IonIntegrationTestCase):
def setUp(self):
self.i=0
self._start_container()
self.container.start_rel_from_url('res/deploy/r2params.yml')
self.dataset_management = DatasetManagementServiceClient()
self.pubsub_management = PubsubManagementServiceClient()
self.data_product_management = DataProductManagementServiceClient()
self.resource_registry = self.container.resource_registry
self.data_retriever = DataRetrieverServiceClient()
pdicts, _ = self.resource_registry.find_resources(restype='ParameterDictionary', id_only=False)
self.dp_ids = []
for pdict in pdicts:
stream_def_id = self.pubsub_management.create_stream_definition(pdict.name, parameter_dictionary_id=pdict._id)
dp_id = self.make_dp(stream_def_id)
if dp_id: self.dp_ids.append(dp_id)
def make_dp(self, stream_def_id):
stream_def = self.resource_registry.read(stream_def_id)
dp_obj = DataProduct(
name=stream_def.name,
description=stream_def.name,
processing_level_code='Parsed_Canonical')
data_product_id = self.data_product_management.create_data_product(dp_obj, stream_definition_id=stream_def_id)
self.data_product_management.activate_data_product_persistence(data_product_id)
return data_product_id
def fill_values(self, ptype, size):
if isinstance(ptype, ArrayType):
return ['blah'] * size
elif isinstance(ptype, QuantityType):
return np.sin(np.arange(size, dtype=ptype.value_encoding) * 2 * np.pi / 3)
elif isinstance(ptype, RecordType):
return [{'record': 'ok'}] * size
elif isinstance(ptype, ConstantRangeType):
return (1,1000)
elif isinstance(ptype, ConstantType):
return np.dtype(ptype.value_encoding).type(1)
elif isinstance(ptype, CategoryType):
return ptype.categories.keys()[0]
else:
return
def wait_until_we_have_enough_granules(self, dataset_id='',data_size=40):
'''
Loops until there is a sufficient amount of data in the dataset
'''
done = False
with gevent.Timeout(40):
while not done:
granule = self.data_retriever.retrieve_last_data_points(dataset_id, 1)
rdt = RecordDictionaryTool.load_from_granule(granule)
extents = self.dataset_management.dataset_extents(dataset_id, rdt._pdict.temporal_parameter_name)[0]
if rdt[rdt._pdict.temporal_parameter_name] and rdt[rdt._pdict.temporal_parameter_name][0] != rdt._pdict.get_context(rdt._pdict.temporal_parameter_name).fill_value and extents >= data_size:
done = True
else:
gevent.sleep(0.2)
def write_to_data_product(self,data_product_id):
dataset_ids, _ = self.resource_registry.find_objects(data_product_id, 'hasDataset', id_only=True)
dataset_id = dataset_ids.pop()
stream_ids , _ = self.resource_registry.find_objects(data_product_id, 'hasStream', id_only=True)
stream_id = stream_ids.pop()
stream_def_ids, _ = self.resource_registry.find_objects(stream_id, 'hasStreamDefinition', id_only=True)
stream_def_id = stream_def_ids.pop()
route = self.pubsub_management.read_stream_route(stream_id)
rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)
time_param = rdt._pdict.temporal_parameter_name
if time_param is None:
print '%s has no temporal parameter' % self.resource_registry.read(data_product_id).name
return
rdt[time_param] = np.arange(40)
for field in rdt.fields:
if field == rdt._pdict.temporal_parameter_name:
continue
rdt[field] = self.fill_values(rdt._pdict.get_context(field).param_type,40)
publisher = StandaloneStreamPublisher(stream_id, route)
publisher.publish(rdt.to_granule())
self.wait_until_we_have_enough_granules(dataset_id,40)
granule = self.data_retriever.retrieve(dataset_id)
rdt_out = RecordDictionaryTool.load_from_granule(granule)
bad = []
#.........这里部分代码省略.........
示例4: TestDMEnd2End
# 需要导入模块: from interface.services.dm.idata_retriever_service import DataRetrieverServiceClient [as 别名]
# 或者: from interface.services.dm.idata_retriever_service.DataRetrieverServiceClient import retrieve_last_data_points [as 别名]
#.........这里部分代码省略.........
)
def stop_ingestion(self, stream_id):
ingest_config_id = self.get_ingestion_config()
self.ingestion_management.unpersist_data_stream(
stream_id=stream_id, ingestion_configuration_id=ingest_config_id
)
def stop_all_ingestion(self):
try:
[self.stop_ingestion(sid) for sid in self.streams]
except:
pass
def validate_granule_subscription(self, msg, route, stream_id):
"""
Validation for granule format
"""
if msg == {}:
return
rdt = RecordDictionaryTool.load_from_granule(msg)
log.info("%s", rdt.pretty_print())
self.assertIsInstance(msg, Granule, "Message is improperly formatted. (%s)" % type(msg))
self.event.set()
def wait_until_we_have_enough_granules(self, dataset_id="", data_size=40):
"""
Loops until there is a sufficient amount of data in the dataset
"""
done = False
with gevent.Timeout(40):
while not done:
extents = self.dataset_management.dataset_extents(dataset_id, "time")[0]
granule = self.data_retriever.retrieve_last_data_points(dataset_id, 1)
rdt = RecordDictionaryTool.load_from_granule(granule)
if rdt["time"] and rdt["time"][0] != rdt._pdict.get_context("time").fill_value and extents >= data_size:
done = True
else:
gevent.sleep(0.2)
# --------------------------------------------------------------------------------
# Test Methods
# --------------------------------------------------------------------------------
@attr("SMOKE")
def test_dm_end_2_end(self):
# --------------------------------------------------------------------------------
# Set up a stream and have a mock instrument (producer) send data
# --------------------------------------------------------------------------------
self.event.clear()
# Get a precompiled parameter dictionary with basic ctd fields
pdict_id = self.dataset_management.read_parameter_dictionary_by_name("ctd_parsed_param_dict", id_only=True)
context_ids = self.dataset_management.read_parameter_contexts(pdict_id, id_only=True)
# Add a field that supports binary data input.
bin_context = ParameterContext("binary", param_type=ArrayType())
context_ids.append(self.dataset_management.create_parameter_context("binary", bin_context.dump()))
# Add another field that supports dictionary elements.
rec_context = ParameterContext("records", param_type=RecordType())
context_ids.append(self.dataset_management.create_parameter_context("records", rec_context.dump()))
pdict_id = self.dataset_management.create_parameter_dictionary(
"replay_pdict", parameter_context_ids=context_ids, temporal_context="time"
)
示例5: TestDMEnd2End
# 需要导入模块: from interface.services.dm.idata_retriever_service import DataRetrieverServiceClient [as 别名]
# 或者: from interface.services.dm.idata_retriever_service.DataRetrieverServiceClient import retrieve_last_data_points [as 别名]
#.........这里部分代码省略.........
stream_id, route = self.pubsub_management.create_stream('last_granule', exchange_point=self.exchange_point_name, stream_definition_id=stream_def_id)
config_id = self.get_ingestion_config()
dataset_id = self.create_dataset(pdict_id)
self.ingestion_management.persist_data_stream(stream_id=stream_id, ingestion_configuration_id=config_id, dataset_id=dataset_id)
#--------------------------------------------------------------------------------
# Create the datastore first,
#--------------------------------------------------------------------------------
self.get_datastore(dataset_id)
self.publish_hifi(stream_id,route, 0)
self.publish_hifi(stream_id,route, 1)
self.wait_until_we_have_enough_granules(dataset_id,2) # I just need two
success = False
def verifier():
replay_granule = self.data_retriever.retrieve_last_granule(dataset_id)
rdt = RecordDictionaryTool.load_from_granule(replay_granule)
comp = rdt['time'] == np.arange(10) + 10
if not isinstance(comp,bool):
return comp.all()
return False
success = poll(verifier)
self.assertTrue(success)
success = False
def verify_points():
replay_granule = self.data_retriever.retrieve_last_data_points(dataset_id,5)
rdt = RecordDictionaryTool.load_from_granule(replay_granule)
comp = rdt['time'] == np.arange(15,20)
if not isinstance(comp,bool):
return comp.all()
return False
success = poll(verify_points)
self.assertTrue(success)
def test_replay_with_parameters(self):
#--------------------------------------------------------------------------------
# Create the configurations and the dataset
#--------------------------------------------------------------------------------
# Get a precompiled parameter dictionary with basic ctd fields
pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict',id_only=True)
context_ids = self.dataset_management.read_parameter_contexts(pdict_id, id_only=True)
# Add a field that supports binary data input.
bin_context = ParameterContext('binary', param_type=ArrayType())
context_ids.append(self.dataset_management.create_parameter_context('binary', bin_context.dump()))
# Add another field that supports dictionary elements.
rec_context = ParameterContext('records', param_type=RecordType())
context_ids.append(self.dataset_management.create_parameter_context('records', rec_context.dump()))
pdict_id = self.dataset_management.create_parameter_dictionary('replay_pdict', parameter_context_ids=context_ids, temporal_context='time')
stream_def_id = self.pubsub_management.create_stream_definition('replay_stream', parameter_dictionary_id=pdict_id)