本文整理汇总了Python中interface.services.dm.idata_retriever_service.DataRetrieverServiceClient.retrieve_last_granule方法的典型用法代码示例。如果您正苦于以下问题:Python DataRetrieverServiceClient.retrieve_last_granule方法的具体用法?Python DataRetrieverServiceClient.retrieve_last_granule怎么用?Python DataRetrieverServiceClient.retrieve_last_granule使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类interface.services.dm.idata_retriever_service.DataRetrieverServiceClient
的用法示例。
在下文中一共展示了DataRetrieverServiceClient.retrieve_last_granule方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: TestDMEnd2End
# 需要导入模块: from interface.services.dm.idata_retriever_service import DataRetrieverServiceClient [as 别名]
# 或者: from interface.services.dm.idata_retriever_service.DataRetrieverServiceClient import retrieve_last_granule [as 别名]
#.........这里部分代码省略.........
'CTDL2SalinityTransformAlgorithm',
kwargs=dict(params=sal_stream_def_id))
rdt = RecordDictionaryTool.load_from_granule(granule)
for i in rdt['salinity']:
self.assertNotEquals(i,0)
def test_last_granule(self):
#--------------------------------------------------------------------------------
# Create the necessary configurations for the test
#--------------------------------------------------------------------------------
pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
stream_def_id = self.pubsub_management.create_stream_definition('ctd parsed', parameter_dictionary_id=pdict_id)
stream_id, route = self.pubsub_management.create_stream('last_granule', exchange_point=self.exchange_point_name, stream_definition_id=stream_def_id)
config_id = self.get_ingestion_config()
dataset_id = self.create_dataset(pdict_id)
self.ingestion_management.persist_data_stream(stream_id=stream_id, ingestion_configuration_id=config_id, dataset_id=dataset_id)
#--------------------------------------------------------------------------------
# Create the datastore first,
#--------------------------------------------------------------------------------
self.get_datastore(dataset_id)
self.publish_hifi(stream_id,route, 0)
self.publish_hifi(stream_id,route, 1)
self.wait_until_we_have_enough_granules(dataset_id,2) # I just need two
success = False
def verifier():
replay_granule = self.data_retriever.retrieve_last_granule(dataset_id)
rdt = RecordDictionaryTool.load_from_granule(replay_granule)
comp = rdt['time'] == np.arange(10) + 10
if not isinstance(comp,bool):
return comp.all()
return False
success = poll(verifier)
self.assertTrue(success)
success = False
def verify_points():
replay_granule = self.data_retriever.retrieve_last_data_points(dataset_id,5)
rdt = RecordDictionaryTool.load_from_granule(replay_granule)
comp = rdt['time'] == np.arange(15,20)
if not isinstance(comp,bool):
return comp.all()
return False
success = poll(verify_points)
self.assertTrue(success)
def test_replay_with_parameters(self):
#--------------------------------------------------------------------------------
# Create the configurations and the dataset
#--------------------------------------------------------------------------------
# Get a precompiled parameter dictionary with basic ctd fields
示例2: VisualizationIntegrationTestHelper
# 需要导入模块: from interface.services.dm.idata_retriever_service import DataRetrieverServiceClient [as 别名]
# 或者: from interface.services.dm.idata_retriever_service.DataRetrieverServiceClient import retrieve_last_granule [as 别名]
#.........这里部分代码省略.........
else:
#psd = PointSupplementStreamParser(stream_definition=SalinityTransform.outgoing_stream_def, stream_granule=message)
#log.info( psd.list_field_names())
# Test the handy info method for the names of fields in the stream def
#assertions('salinity' in psd.list_field_names())
# you have to know the name of the coverage in stream def
salinity = get_safe(rdt, 'salinity')
#salinity = psd.get_values('salinity')
log.info( 'salinity=' + str(numpy.nanmin(salinity)))
# Check to see if salinity has values
assertions(salinity != None)
assertions(isinstance(salinity, numpy.ndarray))
assertions(numpy.nanmin(salinity) > 0.0) # salinity should always be greater than 0
if first_salinity_values is None:
first_salinity_values = salinity.tolist()
else:
second_salinity_values = salinity.tolist()
assertions(len(first_salinity_values) == len(second_salinity_values))
for idx in range(0,len(first_salinity_values)):
assertions(first_salinity_values[idx]*2.0 == second_salinity_values[idx])
def validate_data_ingest_retrieve(self, dataset_id):
assertions = self.assertTrue
self.data_retriever = DataRetrieverServiceClient(node=self.container.node)
#validate that data was ingested
replay_granule = self.data_retriever.retrieve_last_granule(dataset_id)
rdt = RecordDictionaryTool.load_from_granule(replay_granule)
salinity = get_safe(rdt, 'salinity')
assertions(salinity != None)
#retrieve all the granules from the database and check the values
replay_granule_all = self.data_retriever.retrieve(dataset_id)
rdt = RecordDictionaryTool.load_from_granule(replay_granule_all)
for k, v in rdt.iteritems():
if k == 'salinity':
for val in numpy.nditer(v):
assertions(val > 0)
def create_salinity_data_process_definition(self):
# Salinity: Data Process Definition
#First look to see if it exists and if not, then create it
dpd,_ = self.rrclient.find_resources(restype=RT.DataProcessDefinition, name='ctd_salinity')
if len(dpd) > 0:
return dpd[0]
log.debug("Create data process definition SalinityTransform")
dpd_obj = IonObject(RT.DataProcessDefinition,
name='ctd_salinity',
description='create a salinity data product',
module='ion.processes.data.transforms.ctd.ctd_L2_salinity',
class_name='SalinityTransform',
process_source='SalinityTransform source code here...')
try:
ctd_L2_salinity_dprocdef_id = self.dataprocessclient.create_data_process_definition(dpd_obj)
except Excpetion as ex:
self.fail("failed to create new SalinityTransform data process definition: %s" %ex)
示例3: TestDMEnd2End
# 需要导入模块: from interface.services.dm.idata_retriever_service import DataRetrieverServiceClient [as 别名]
# 或者: from interface.services.dm.idata_retriever_service.DataRetrieverServiceClient import retrieve_last_granule [as 别名]
#.........这里部分代码省略.........
# --------------------------------------------------------------------------------
# There is a race condition sometimes between the services and the process for
# the creation of the datastore and it's instance, this ensures the datastore
# exists before the process is even subscribing to data.
self.get_datastore(dataset_id)
self.publish_fake_data(stream_id)
self.wait_until_we_have_enough_granules(dataset_id, 2) # I just need two
replay_granule = self.data_retriever.retrieve(dataset_id, {"start_time": 0, "end_time": 6})
rdt = RecordDictionaryTool.load_from_granule(replay_granule)
comp = rdt["time"] == np.array([0, 1, 2, 3, 4, 5])
try:
log.info("Compared granule: %s", replay_granule.__dict__)
log.info("Granule tax: %s", replay_granule.taxonomy.__dict__)
except:
pass
self.assertTrue(comp.all())
def test_last_granule(self):
# --------------------------------------------------------------------------------
# Create the necessary configurations for the test
# --------------------------------------------------------------------------------
stream_id = self.pubsub_management.create_stream()
config_id = self.get_ingestion_config()
dataset_id = self.create_dataset()
self.ingestion_management.persist_data_stream(
stream_id=stream_id, ingestion_configuration_id=config_id, dataset_id=dataset_id
)
# --------------------------------------------------------------------------------
# Create the datastore first,
# --------------------------------------------------------------------------------
self.get_datastore(dataset_id)
self.publish_hifi(stream_id, 0)
self.publish_hifi(stream_id, 1)
self.wait_until_we_have_enough_granules(dataset_id, 2) # I just need two
replay_granule = self.data_retriever.retrieve_last_granule(dataset_id)
rdt = RecordDictionaryTool.load_from_granule(replay_granule)
comp = rdt["time"] == np.arange(10) + 10
self.assertTrue(comp.all())
def test_replay_with_parameters(self):
# --------------------------------------------------------------------------------
# Create the configurations and the dataset
# --------------------------------------------------------------------------------
stream_id = self.pubsub_management.create_stream()
config_id = self.get_ingestion_config()
dataset_id = self.create_dataset()
self.ingestion_management.persist_data_stream(
stream_id=stream_id, ingestion_configuration_id=config_id, dataset_id=dataset_id
)
# --------------------------------------------------------------------------------
# Coerce the datastore into existence (beats race condition)
# --------------------------------------------------------------------------------
self.get_datastore(dataset_id)
self.launch_producer(stream_id)
self.wait_until_we_have_enough_granules(dataset_id, 4)
query = {"start_time": 0, "end_time": 20, "parameters": ["time", "temp"]}
retrieved_data = self.data_retriever.retrieve(dataset_id=dataset_id, query=query)
rdt = RecordDictionaryTool.load_from_granule(retrieved_data)
comp = np.arange(20) == rdt["time"]
self.assertTrue(comp.all(), "%s" % rdt.pretty_print())
self.assertEquals(set(rdt.iterkeys()), set(["time", "temp"]))
def test_repersist_data(self):
stream_id = self.pubsub_management.create_stream()
config_id = self.get_ingestion_config()
dataset_id = self.create_dataset()
self.ingestion_management.persist_data_stream(
stream_id=stream_id, ingestion_configuration_id=config_id, dataset_id=dataset_id
)
self.get_datastore(dataset_id)
self.publish_hifi(stream_id, 0)
self.publish_hifi(stream_id, 1)
self.wait_until_we_have_enough_granules(dataset_id, 2)
self.ingestion_management.unpersist_data_stream(stream_id=stream_id, ingestion_configuration_id=config_id)
self.ingestion_management.persist_data_stream(
stream_id=stream_id, ingestion_configuration_id=config_id, dataset_id=dataset_id
)
self.publish_hifi(stream_id, 2)
self.publish_hifi(stream_id, 3)
self.wait_until_we_have_enough_granules(dataset_id, 4)
retrieved_granule = self.data_retriever.retrieve(dataset_id)
rdt = RecordDictionaryTool.load_from_granule(retrieved_granule)
comp = rdt["time"] == np.arange(0, 40)
self.assertTrue(comp.all(), "Uh-oh: %s" % rdt["time"])