本文整理汇总了Python中ion.services.dm.utility.test.parameter_helper.ParameterHelper.fill_parsed_rdt方法的典型用法代码示例。如果您正苦于以下问题:Python ParameterHelper.fill_parsed_rdt方法的具体用法?Python ParameterHelper.fill_parsed_rdt怎么用?Python ParameterHelper.fill_parsed_rdt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ion.services.dm.utility.test.parameter_helper.ParameterHelper
的用法示例。
在下文中一共展示了ParameterHelper.fill_parsed_rdt方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_coverage_transform
# 需要导入模块: from ion.services.dm.utility.test.parameter_helper import ParameterHelper [as 别名]
# 或者: from ion.services.dm.utility.test.parameter_helper.ParameterHelper import fill_parsed_rdt [as 别名]
def test_coverage_transform(self):
ph = ParameterHelper(self.dataset_management, self.addCleanup)
pdict_id = ph.create_parsed()
stream_def_id = self.pubsub_management.create_stream_definition('ctd parsed', parameter_dictionary_id=pdict_id)
self.addCleanup(self.pubsub_management.delete_stream_definition, stream_def_id)
stream_id, route = self.pubsub_management.create_stream('example', exchange_point=self.exchange_point_name, stream_definition_id=stream_def_id)
self.addCleanup(self.pubsub_management.delete_stream, stream_id)
ingestion_config_id = self.get_ingestion_config()
dataset_id = self.create_dataset(pdict_id)
self.ingestion_management.persist_data_stream(stream_id=stream_id, ingestion_configuration_id=ingestion_config_id, dataset_id=dataset_id)
self.addCleanup(self.ingestion_management.unpersist_data_stream, stream_id, ingestion_config_id)
publisher = StandaloneStreamPublisher(stream_id, route)
rdt = ph.get_rdt(stream_def_id)
ph.fill_parsed_rdt(rdt)
dataset_monitor = DatasetMonitor(dataset_id)
self.addCleanup(dataset_monitor.stop)
publisher.publish(rdt.to_granule())
self.assertTrue(dataset_monitor.wait())
replay_granule = self.data_retriever.retrieve(dataset_id)
rdt_out = RecordDictionaryTool.load_from_granule(replay_granule)
np.testing.assert_array_almost_equal(rdt_out['time'], rdt['time'])
np.testing.assert_array_almost_equal(rdt_out['temp'], rdt['temp'])
np.testing.assert_allclose(rdt_out['conductivity_L1'], np.array([42.914]))
np.testing.assert_allclose(rdt_out['temp_L1'], np.array([20.]))
np.testing.assert_allclose(rdt_out['pressure_L1'], np.array([3.068]))
np.testing.assert_allclose(rdt_out['density'], np.array([1021.7144739593881], dtype='float32'))
np.testing.assert_allclose(rdt_out['salinity'], np.array([30.935132729668283], dtype='float32'))
示例2: TestTransformWorker
# 需要导入模块: from ion.services.dm.utility.test.parameter_helper import ParameterHelper [as 别名]
# 或者: from ion.services.dm.utility.test.parameter_helper.ParameterHelper import fill_parsed_rdt [as 别名]
class TestTransformWorker(IonIntegrationTestCase):
def setUp(self):
self._start_container()
self.container.start_rel_from_url('res/deploy/r2deploy.yml')
# Instantiate a process to represent the test
process=TransformWorkerTestProcess()
self.dataset_management_client = DatasetManagementServiceClient(node=self.container.node)
self.pubsub_client = PubsubManagementServiceClient(node=self.container.node)
self.dataproductclient = DataProductManagementServiceClient(node=self.container.node)
self.dataprocessclient = DataProcessManagementServiceClient(node=self.container.node)
self.processdispatchclient = ProcessDispatcherServiceClient(node=self.container.node)
self.damsclient = DataAcquisitionManagementServiceClient(node=self.container.node)
self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
self.imsclient = InstrumentManagementServiceProcessClient(node=self.container.node, process = process)
self.time_dom, self.spatial_dom = time_series_domain()
self.ph = ParameterHelper(self.dataset_management_client, self.addCleanup)
self.wait_time = CFG.get_safe('endpoint.receive.timeout', 10)
def push_granule(self, data_product_id):
'''
Publishes and monitors that the granule arrived
'''
datasets, _ = self.rrclient.find_objects(data_product_id, PRED.hasDataset, id_only=True)
dataset_monitor = DatasetMonitor(datasets[0])
rdt = self.ph.rdt_for_data_product(data_product_id)
self.ph.fill_parsed_rdt(rdt)
self.ph.publish_rdt_to_data_product(data_product_id, rdt)
assert dataset_monitor.wait()
dataset_monitor.stop()
@attr('LOCOINT')
@unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
def test_transform_worker(self):
# test that a data process (type: data-product-in / data-product-out) can be defined and launched.
# verify that the output granule fields are correctly populated
# test that the input and output data products are linked to facilitate provenance
self.dp_list = []
self.data_process_objs = []
self._output_stream_ids = []
self.granule_verified = Event()
self.worker_assigned_event_verified = Event()
self.dp_created_event_verified = Event()
self.heartbeat_event_verified = Event()
self.parameter_dict_id = self.dataset_management_client.read_parameter_dictionary_by_name(name='ctd_parsed_param_dict', id_only=True)
# create the StreamDefinition
self.stream_def_id = self.pubsub_client.create_stream_definition(name='stream_def', parameter_dictionary_id=self.parameter_dict_id)
self.addCleanup(self.pubsub_client.delete_stream_definition, self.stream_def_id)
# create the DataProduct that is the input to the data processes
input_dp_obj = IonObject( RT.DataProduct, name='input_data_product', description='input test stream',
temporal_domain = self.time_dom.dump(), spatial_domain = self.spatial_dom.dump())
self.input_dp_id = self.dataproductclient.create_data_product(data_product=input_dp_obj, stream_definition_id=self.stream_def_id)
# retrieve the Stream for this data product
stream_ids, assoc_ids = self.rrclient.find_objects(self.input_dp_id, PRED.hasStream, RT.Stream, True)
self.stream_id = stream_ids[0]
self.start_event_listener()
# create the DPD, DataProcess and output DataProduct
dataprocessdef_id, dataprocess_id, dataproduct_id = self.create_data_process()
self.dp_list.append(dataprocess_id)
# validate the repository for data product algorithms persists the new resources NEW SA-1
# create_data_process call created one of each
dpd_ids, _ = self.rrclient.find_resources(restype=OT.DataProcessDefinition, id_only=False)
# there will be more than one becuase of the DPDs that reperesent the PFs in the data product above
self.assertTrue(dpd_ids is not None)
dp_ids, _ = self.rrclient.find_resources(restype=OT.DataProcess, id_only=False)
# only one DP becuase the PFs that are in the code dataproduct above are not activated yet.
self.assertEquals(len(dp_ids), 1)
# validate the name and version label NEW SA - 2
dataprocessdef_obj = self.dataprocessclient.read_data_process_definition(dataprocessdef_id)
self.assertEqual(dataprocessdef_obj.version_label, '1.0a')
self.assertEqual(dataprocessdef_obj.name, 'add_arrays')
# validate that the DPD has an attachment NEW SA - 21
attachment_ids, assoc_ids = self.rrclient.find_objects(dataprocessdef_id, PRED.hasAttachment, RT.Attachment, True)
self.assertEqual(len(attachment_ids), 1)
attachment_obj = self.rrclient.read_attachment(attachment_ids[0])
log.debug('attachment: %s', attachment_obj)
# validate that the data process resource has input and output data products associated
# L4-CI-SA-RQ-364 and NEW SA-3
#.........这里部分代码省略.........