本文整理汇总了Python中interface.services.dm.idata_retriever_service.DataRetrieverServiceClient.start_replay_agent方法的典型用法代码示例。如果您正苦于以下问题:Python DataRetrieverServiceClient.start_replay_agent方法的具体用法?Python DataRetrieverServiceClient.start_replay_agent怎么用?Python DataRetrieverServiceClient.start_replay_agent使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类interface.services.dm.idata_retriever_service.DataRetrieverServiceClient
的用法示例。
在下文中一共展示了DataRetrieverServiceClient.start_replay_agent方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: TestDMEnd2End
# 需要导入模块: from interface.services.dm.idata_retriever_service import DataRetrieverServiceClient [as 别名]
# 或者: from interface.services.dm.idata_retriever_service.DataRetrieverServiceClient import start_replay_agent [as 别名]
#.........这里部分代码省略.........
#--------------------------------------------------------------------------------
# Now get the data in one chunk using an RPC Call to start_retreive
#--------------------------------------------------------------------------------
replay_data = self.data_retriever.retrieve(dataset_id)
self.assertIsInstance(replay_data, Granule)
rdt = RecordDictionaryTool.load_from_granule(replay_data)
self.assertTrue((rdt['time'][:10] == np.arange(10)).all(),'%s' % rdt['time'][:])
self.assertTrue((rdt['binary'][:10] == np.array(['hi']*10, dtype='object')).all())
#--------------------------------------------------------------------------------
# Now to try the streamed approach
#--------------------------------------------------------------------------------
replay_stream_id, replay_route = self.pubsub_management.create_stream('replay_out', exchange_point=self.exchange_point_name, stream_definition_id=stream_definition)
self.replay_id, process_id = self.data_retriever.define_replay(dataset_id=dataset_id, stream_id=replay_stream_id)
log.info('Process ID: %s', process_id)
replay_client = ReplayClient(process_id)
#--------------------------------------------------------------------------------
# Create the listening endpoint for the the retriever to talk to
#--------------------------------------------------------------------------------
sub_id = self.pubsub_management.create_subscription(self.exchange_space_name,stream_ids=[replay_stream_id])
self.addCleanup(self.pubsub_management.delete_subscription, sub_id)
self.pubsub_management.activate_subscription(sub_id)
self.addCleanup(self.pubsub_management.deactivate_subscription, sub_id)
subscriber = StandaloneStreamSubscriber(self.exchange_space_name, self.validate_granule_subscription)
subscriber.start()
self.addCleanup(subscriber.stop)
self.data_retriever.start_replay_agent(self.replay_id)
self.assertTrue(replay_client.await_agent_ready(5), 'The process never launched')
replay_client.start_replay()
self.assertTrue(self.event.wait(10))
self.data_retriever.cancel_replay_agent(self.replay_id)
#--------------------------------------------------------------------------------
# Test the slicing capabilities
#--------------------------------------------------------------------------------
granule = self.data_retriever.retrieve(dataset_id=dataset_id, query={'tdoa':slice(0,5)})
rdt = RecordDictionaryTool.load_from_granule(granule)
b = rdt['time'] == np.arange(5)
self.assertTrue(b.all() if not isinstance(b,bool) else b)
def test_coverage_transform(self):
ph = ParameterHelper(self.dataset_management, self.addCleanup)
pdict_id = ph.create_parsed()
stream_def_id = self.pubsub_management.create_stream_definition('ctd parsed', parameter_dictionary_id=pdict_id)
self.addCleanup(self.pubsub_management.delete_stream_definition, stream_def_id)
stream_id, route = self.pubsub_management.create_stream('example', exchange_point=self.exchange_point_name, stream_definition_id=stream_def_id)
self.addCleanup(self.pubsub_management.delete_stream, stream_id)
ingestion_config_id = self.get_ingestion_config()
dataset_id = self.create_dataset(pdict_id)
self.ingestion_management.persist_data_stream(stream_id=stream_id, ingestion_configuration_id=ingestion_config_id, dataset_id=dataset_id)
示例2: TestDMEnd2End
# 需要导入模块: from interface.services.dm.idata_retriever_service import DataRetrieverServiceClient [as 别名]
# 或者: from interface.services.dm.idata_retriever_service.DataRetrieverServiceClient import start_replay_agent [as 别名]
#.........这里部分代码省略.........
# --------------------------------------------------------------------------------
# Now get the data in one chunk using an RPC Call to start_retreive
# --------------------------------------------------------------------------------
replay_data = self.data_retriever.retrieve(dataset_id)
self.assertIsInstance(replay_data, Granule)
rdt = RecordDictionaryTool.load_from_granule(replay_data)
self.assertTrue((rdt["time"][:10] == np.arange(10)).all(), "%s" % rdt["time"][:])
self.assertTrue((rdt["binary"][:10] == np.array(["hi"] * 10, dtype="object")).all())
# --------------------------------------------------------------------------------
# Now to try the streamed approach
# --------------------------------------------------------------------------------
replay_stream_id, replay_route = self.pubsub_management.create_stream(
"replay_out", exchange_point=self.exchange_point_name, stream_definition_id=stream_definition
)
self.replay_id, process_id = self.data_retriever.define_replay(
dataset_id=dataset_id, stream_id=replay_stream_id
)
log.info("Process ID: %s", process_id)
replay_client = ReplayClient(process_id)
# --------------------------------------------------------------------------------
# Create the listening endpoint for the the retriever to talk to
# --------------------------------------------------------------------------------
xp = self.container.ex_manager.create_xp(self.exchange_point_name)
subscriber = StandaloneStreamSubscriber(self.exchange_space_name, self.validate_granule_subscription)
self.queue_buffer.append(self.exchange_space_name)
subscriber.start()
subscriber.xn.bind(replay_route.routing_key, xp)
self.data_retriever.start_replay_agent(self.replay_id)
self.assertTrue(replay_client.await_agent_ready(5), "The process never launched")
replay_client.start_replay()
self.assertTrue(self.event.wait(10))
subscriber.stop()
self.data_retriever.cancel_replay_agent(self.replay_id)
# --------------------------------------------------------------------------------
# Test the slicing capabilities
# --------------------------------------------------------------------------------
granule = self.data_retriever.retrieve(dataset_id=dataset_id, query={"tdoa": slice(0, 5)})
rdt = RecordDictionaryTool.load_from_granule(granule)
b = rdt["time"] == np.arange(5)
self.assertTrue(b.all() if not isinstance(b, bool) else b)
self.streams.append(stream_id)
self.stop_ingestion(stream_id)
@unittest.skip("Doesnt work")
@attr("LOCOINT")
@unittest.skipIf(os.getenv("CEI_LAUNCH_TEST", False), "Skip test while in CEI LAUNCH mode")
def test_replay_pause(self):
# Get a precompiled parameter dictionary with basic ctd fields
pdict_id = self.dataset_management.read_parameter_dictionary_by_name("ctd_parsed_param_dict", id_only=True)
context_ids = self.dataset_management.read_parameter_contexts(pdict_id, id_only=True)
# Add a field that supports binary data input.
bin_context = ParameterContext("binary", param_type=ArrayType())
context_ids.append(self.dataset_management.create_parameter_context("binary", bin_context.dump()))
# Add another field that supports dictionary elements.
示例3: TestDMEnd2End
# 需要导入模块: from interface.services.dm.idata_retriever_service import DataRetrieverServiceClient [as 别名]
# 或者: from interface.services.dm.idata_retriever_service.DataRetrieverServiceClient import start_replay_agent [as 别名]
#.........这里部分代码省略.........
self.launch_producer(stream_id)
self.wait_until_we_have_enough_granules(dataset_id,40)
#--------------------------------------------------------------------------------
# Now get the data in one chunk using an RPC Call to start_retreive
#--------------------------------------------------------------------------------
replay_data = self.data_retriever.retrieve(dataset_id)
self.assertIsInstance(replay_data, Granule)
rdt = RecordDictionaryTool.load_from_granule(replay_data)
self.assertTrue((rdt['time'][:10] == np.arange(10)).all(),'%s' % rdt['time'][:])
self.assertTrue((rdt['binary'][:10] == np.array(['hi']*10, dtype='object')).all())
#--------------------------------------------------------------------------------
# Now to try the streamed approach
#--------------------------------------------------------------------------------
replay_stream_id, replay_route = self.pubsub_management.create_stream('replay_out', exchange_point=self.exchange_point_name, stream_definition_id=stream_definition)
self.replay_id, process_id = self.data_retriever.define_replay(dataset_id=dataset_id, stream_id=replay_stream_id)
log.info('Process ID: %s', process_id)
replay_client = ReplayClient(process_id)
#--------------------------------------------------------------------------------
# Create the listening endpoint for the the retriever to talk to
#--------------------------------------------------------------------------------
xp = self.container.ex_manager.create_xp(self.exchange_point_name)
subscriber = StandaloneStreamSubscriber(self.exchange_space_name, self.validate_granule_subscription)
self.queue_buffer.append(self.exchange_space_name)
subscriber.start()
subscriber.xn.bind(replay_route.routing_key, xp)
self.data_retriever.start_replay_agent(self.replay_id)
self.assertTrue(replay_client.await_agent_ready(5), 'The process never launched')
replay_client.start_replay()
self.assertTrue(self.event.wait(10))
subscriber.stop()
self.data_retriever.cancel_replay_agent(self.replay_id)
#--------------------------------------------------------------------------------
# Test the slicing capabilities
#--------------------------------------------------------------------------------
granule = self.data_retriever.retrieve(dataset_id=dataset_id, query={'tdoa':slice(0,5)})
rdt = RecordDictionaryTool.load_from_granule(granule)
b = rdt['time'] == np.arange(5)
self.assertTrue(b.all() if not isinstance(b,bool) else b)
self.streams.append(stream_id)
self.stop_ingestion(stream_id)
def test_coverage_transform(self):
ph = ParameterHelper(self.dataset_management, self.addCleanup)
pdict_id = ph.create_parsed()
stream_def_id = self.pubsub_management.create_stream_definition('ctd parsed', parameter_dictionary_id=pdict_id)
self.addCleanup(self.pubsub_management.delete_stream_definition, stream_def_id)
stream_id, route = self.pubsub_management.create_stream('example', exchange_point=self.exchange_point_name, stream_definition_id=stream_def_id)
self.addCleanup(self.pubsub_management.delete_stream, stream_id)
ingestion_config_id = self.get_ingestion_config()
示例4: TestDMEnd2End
# 需要导入模块: from interface.services.dm.idata_retriever_service import DataRetrieverServiceClient [as 别名]
# 或者: from interface.services.dm.idata_retriever_service.DataRetrieverServiceClient import start_replay_agent [as 别名]
#.........这里部分代码省略.........
context_ids.append(self.dataset_management.create_parameter_context('binary', bin_context.dump()))
# Add another field that supports dictionary elements.
rec_context = ParameterContext('records', param_type=RecordType())
context_ids.append(self.dataset_management.create_parameter_context('records', rec_context.dump()))
pdict_id = self.dataset_management.create_parameter_dictionary('replay_pdict', parameter_context_ids=context_ids, temporal_context='time')
stream_def_id = self.pubsub_management.create_stream_definition('replay_stream', parameter_dictionary_id=pdict_id)
replay_stream, replay_route = self.pubsub_management.create_stream('replay', 'xp1', stream_definition_id=stream_def_id)
dataset_id = self.create_dataset(pdict_id)
scov = DatasetManagementService._get_coverage(dataset_id)
bb = CoverageCraft(scov)
bb.rdt['time'] = np.arange(100)
bb.rdt['temp'] = np.random.random(100) + 30
bb.sync_with_granule()
DatasetManagementService._persist_coverage(dataset_id, bb.coverage) # This invalidates it for multi-host configurations
# Set up the subscriber to verify the data
subscriber = StandaloneStreamSubscriber(self.exchange_space_name, self.validate_granule_subscription)
xp = self.container.ex_manager.create_xp('xp1')
self.queue_buffer.append(self.exchange_space_name)
subscriber.start()
subscriber.xn.bind(replay_route.routing_key, xp)
# Set up the replay agent and the client wrapper
# 1) Define the Replay (dataset and stream to publish on)
self.replay_id, process_id = self.data_retriever.define_replay(dataset_id=dataset_id, stream_id=replay_stream)
# 2) Make a client to the interact with the process (optionall provide it a process to bind with)
replay_client = ReplayClient(process_id)
# 3) Start the agent (launch the process)
self.data_retriever.start_replay_agent(self.replay_id)
# 4) Start replaying...
replay_client.start_replay()
# Wait till we get some granules
self.assertTrue(self.event.wait(5))
# We got granules, pause the replay, clear the queue and allow the process to finish consuming
replay_client.pause_replay()
gevent.sleep(1)
subscriber.xn.purge()
self.event.clear()
# Make sure there's no remaining messages being consumed
self.assertFalse(self.event.wait(1))
# Resume the replay and wait until we start getting granules again
replay_client.resume_replay()
self.assertTrue(self.event.wait(5))
# Stop the replay, clear the queues
replay_client.stop_replay()
gevent.sleep(1)
subscriber.xn.purge()
self.event.clear()
# Make sure that it did indeed stop
self.assertFalse(self.event.wait(1))
subscriber.stop()
@attr('SMOKE')