本文整理汇总了Python中interface.services.dm.iingestion_management_service.IngestionManagementServiceClient.unpersist_data_stream方法的典型用法代码示例。如果您正苦于以下问题:Python IngestionManagementServiceClient.unpersist_data_stream方法的具体用法?Python IngestionManagementServiceClient.unpersist_data_stream怎么用?Python IngestionManagementServiceClient.unpersist_data_stream使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类interface.services.dm.iingestion_management_service.IngestionManagementServiceClient
的用法示例。
在下文中一共展示了IngestionManagementServiceClient.unpersist_data_stream方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: TestDMEnd2End
# 需要导入模块: from interface.services.dm.iingestion_management_service import IngestionManagementServiceClient [as 别名]
# 或者: from interface.services.dm.iingestion_management_service.IngestionManagementServiceClient import unpersist_data_stream [as 别名]
#.........这里部分代码省略.........
self.i += 1
return stream_id, route, stream_def_id, dataset_id
def publish_hifi(self,stream_id,stream_route,offset=0):
'''
Publish deterministic data
'''
pub = StandaloneStreamPublisher(stream_id, stream_route)
stream_def = self.pubsub_management.read_stream_definition(stream_id=stream_id)
stream_def_id = stream_def._id
rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)
rdt['time'] = np.arange(10) + (offset * 10)
rdt['temp'] = np.arange(10) + (offset * 10)
pub.publish(rdt.to_granule())
def publish_fake_data(self,stream_id, route):
'''
Make four granules
'''
for i in xrange(4):
self.publish_hifi(stream_id,route,i)
def start_ingestion(self, stream_id, dataset_id):
'''
Starts ingestion/persistence for a given dataset
'''
ingest_config_id = self.get_ingestion_config()
self.ingestion_management.persist_data_stream(stream_id=stream_id, ingestion_configuration_id=ingest_config_id, dataset_id=dataset_id)
def stop_ingestion(self, stream_id):
ingest_config_id = self.get_ingestion_config()
self.ingestion_management.unpersist_data_stream(stream_id=stream_id, ingestion_configuration_id=ingest_config_id)
def validate_granule_subscription(self, msg, route, stream_id):
'''
Validation for granule format
'''
if msg == {}:
return
rdt = RecordDictionaryTool.load_from_granule(msg)
log.info('%s', rdt.pretty_print())
self.assertIsInstance(msg,Granule,'Message is improperly formatted. (%s)' % type(msg))
self.event.set()
def wait_until_we_have_enough_granules(self, dataset_id='',data_size=40):
'''
Loops until there is a sufficient amount of data in the dataset
'''
done = False
with gevent.Timeout(40):
while not done:
extents = self.dataset_management.dataset_extents(dataset_id, 'time')
granule = self.data_retriever.retrieve_last_data_points(dataset_id, 1)
rdt = RecordDictionaryTool.load_from_granule(granule)
if rdt['time'] and rdt['time'][0] != rdt._pdict.get_context('time').fill_value and extents >= data_size:
done = True
else:
gevent.sleep(0.2)
#--------------------------------------------------------------------------------
# Test Methods
#--------------------------------------------------------------------------------
示例2: TestDMEnd2End
# 需要导入模块: from interface.services.dm.iingestion_management_service import IngestionManagementServiceClient [as 别名]
# 或者: from interface.services.dm.iingestion_management_service.IngestionManagementServiceClient import unpersist_data_stream [as 别名]
#.........这里部分代码省略.........
self.i += 1
return stream_id, route, stream_def_id, dataset_id
def publish_hifi(self,stream_id,stream_route,offset=0):
'''
Publish deterministic data
'''
pub = StandaloneStreamPublisher(stream_id, stream_route)
stream_def = self.pubsub_management.read_stream_definition(stream_id=stream_id)
stream_def_id = stream_def._id
rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)
rdt['time'] = np.arange(10) + (offset * 10)
rdt['temp'] = np.arange(10) + (offset * 10)
pub.publish(rdt.to_granule())
def publish_fake_data(self,stream_id, route):
'''
Make four granules
'''
for i in xrange(4):
self.publish_hifi(stream_id,route,i)
def start_ingestion(self, stream_id, dataset_id):
'''
Starts ingestion/persistence for a given dataset
'''
ingest_config_id = self.get_ingestion_config()
self.ingestion_management.persist_data_stream(stream_id=stream_id, ingestion_configuration_id=ingest_config_id, dataset_id=dataset_id)
def stop_ingestion(self, stream_id):
ingest_config_id = self.get_ingestion_config()
self.ingestion_management.unpersist_data_stream(stream_id=stream_id, ingestion_configuration_id=ingest_config_id)
def stop_all_ingestion(self):
try:
[self.stop_ingestion(sid) for sid in self.streams]
except:
pass
def validate_granule_subscription(self, msg, route, stream_id):
'''
Validation for granule format
'''
if msg == {}:
return
rdt = RecordDictionaryTool.load_from_granule(msg)
log.info('%s', rdt.pretty_print())
self.assertIsInstance(msg,Granule,'Message is improperly formatted. (%s)' % type(msg))
self.event.set()
def wait_until_we_have_enough_granules(self, dataset_id='',data_size=40):
'''
Loops until there is a sufficient amount of data in the dataset
'''
done = False
with gevent.Timeout(40):
while not done:
extents = self.dataset_management.dataset_extents(dataset_id, 'time')[0]
granule = self.data_retriever.retrieve_last_data_points(dataset_id, 1)
rdt = RecordDictionaryTool.load_from_granule(granule)
if rdt['time'] and rdt['time'][0] != rdt._pdict.get_context('time').fill_value and extents >= data_size:
done = True
else:
gevent.sleep(0.2)
示例3: TestDMEnd2End
# 需要导入模块: from interface.services.dm.iingestion_management_service import IngestionManagementServiceClient [as 别名]
# 或者: from interface.services.dm.iingestion_management_service.IngestionManagementServiceClient import unpersist_data_stream [as 别名]
#.........这里部分代码省略.........
success = False
def verify_points():
replay_granule = self.data_retriever.retrieve_last_data_points(dataset_id,5)
rdt = RecordDictionaryTool.load_from_granule(replay_granule)
comp = rdt['time'] == np.arange(15,20)
if not isinstance(comp,bool):
return comp.all()
return False
success = poll(verify_points)
self.assertTrue(success)
def test_replay_with_parameters(self):
#--------------------------------------------------------------------------------
# Create the configurations and the dataset
#--------------------------------------------------------------------------------
# Get a precompiled parameter dictionary with basic ctd fields
pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict',id_only=True)
context_ids = self.dataset_management.read_parameter_contexts(pdict_id, id_only=True)
# Add a field that supports binary data input.
bin_context = ParameterContext('binary', param_type=ArrayType())
context_ids.append(self.dataset_management.create_parameter_context('binary', bin_context.dump()))
# Add another field that supports dictionary elements.
rec_context = ParameterContext('records', param_type=RecordType())
context_ids.append(self.dataset_management.create_parameter_context('records', rec_context.dump()))
pdict_id = self.dataset_management.create_parameter_dictionary('replay_pdict', parameter_context_ids=context_ids, temporal_context='time')
stream_def_id = self.pubsub_management.create_stream_definition('replay_stream', parameter_dictionary_id=pdict_id)
stream_id, route = self.pubsub_management.create_stream('replay_with_params', exchange_point=self.exchange_point_name, stream_definition_id=stream_def_id)
config_id = self.get_ingestion_config()
dataset_id = self.create_dataset(pdict_id)
self.ingestion_management.persist_data_stream(stream_id=stream_id, ingestion_configuration_id=config_id, dataset_id=dataset_id)
#--------------------------------------------------------------------------------
# Coerce the datastore into existence (beats race condition)
#--------------------------------------------------------------------------------
self.get_datastore(dataset_id)
self.launch_producer(stream_id)
self.wait_until_we_have_enough_granules(dataset_id,4)
query = {
'start_time': 0,
'end_time': 20,
'stride_time' : 2,
'parameters': ['time','temp']
}
retrieved_data = self.data_retriever.retrieve(dataset_id=dataset_id,query=query)
rdt = RecordDictionaryTool.load_from_granule(retrieved_data)
comp = np.arange(0,20,2) == rdt['time']
self.assertTrue(comp.all(),'%s' % rdt.pretty_print())
self.assertEquals(set(rdt.iterkeys()), set(['time','temp']))
extents = self.dataset_management.dataset_extents(dataset_id=dataset_id, parameters=['time','temp'])
self.assertTrue(extents['time']>=20)
self.assertTrue(extents['temp']>=20)
def test_repersist_data(self):
pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict',id_only=True)
stream_def_id = self.pubsub_management.create_stream_definition(name='ctd', parameter_dictionary_id=pdict_id)
stream_id, route = self.pubsub_management.create_stream(name='repersist', exchange_point=self.exchange_point_name, stream_definition_id=stream_def_id)
config_id = self.get_ingestion_config()
dataset_id = self.create_dataset(pdict_id)
self.ingestion_management.persist_data_stream(stream_id=stream_id, ingestion_configuration_id=config_id, dataset_id=dataset_id)
self.get_datastore(dataset_id)
self.publish_hifi(stream_id,route,0)
self.publish_hifi(stream_id,route,1)
self.wait_until_we_have_enough_granules(dataset_id,2)
self.ingestion_management.unpersist_data_stream(stream_id=stream_id,ingestion_configuration_id=config_id)
self.ingestion_management.persist_data_stream(stream_id=stream_id,ingestion_configuration_id=config_id,dataset_id=dataset_id)
self.publish_hifi(stream_id,route,2)
self.publish_hifi(stream_id,route,3)
self.wait_until_we_have_enough_granules(dataset_id,4)
success = False
with gevent.timeout.Timeout(5):
while not success:
replay_granule = self.data_retriever.retrieve(dataset_id)
rdt = RecordDictionaryTool.load_from_granule(replay_granule)
comp = rdt['time'] == np.arange(0,40)
if not isinstance(comp,bool):
success = comp.all()
gevent.sleep(1)
self.assertTrue(success)
示例4: TestDMEnd2End
# 需要导入模块: from interface.services.dm.iingestion_management_service import IngestionManagementServiceClient [as 别名]
# 或者: from interface.services.dm.iingestion_management_service.IngestionManagementServiceClient import unpersist_data_stream [as 别名]
#.........这里部分代码省略.........
def publish_hifi(self, stream_id, stream_route, offset=0):
"""
Publish deterministic data
"""
pub = StandaloneStreamPublisher(stream_id, stream_route)
stream_def = self.pubsub_management.read_stream_definition(stream_id=stream_id)
stream_def_id = stream_def._id
rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)
rdt["time"] = np.arange(10) + (offset * 10)
rdt["temp"] = np.arange(10) + (offset * 10)
pub.publish(rdt.to_granule())
def publish_fake_data(self, stream_id, route):
"""
Make four granules
"""
for i in xrange(4):
self.publish_hifi(stream_id, route, i)
def start_ingestion(self, stream_id, dataset_id):
"""
Starts ingestion/persistence for a given dataset
"""
ingest_config_id = self.get_ingestion_config()
self.ingestion_management.persist_data_stream(
stream_id=stream_id, ingestion_configuration_id=ingest_config_id, dataset_id=dataset_id
)
def stop_ingestion(self, stream_id):
ingest_config_id = self.get_ingestion_config()
self.ingestion_management.unpersist_data_stream(
stream_id=stream_id, ingestion_configuration_id=ingest_config_id
)
def stop_all_ingestion(self):
try:
[self.stop_ingestion(sid) for sid in self.streams]
except:
pass
def validate_granule_subscription(self, msg, route, stream_id):
"""
Validation for granule format
"""
if msg == {}:
return
rdt = RecordDictionaryTool.load_from_granule(msg)
log.info("%s", rdt.pretty_print())
self.assertIsInstance(msg, Granule, "Message is improperly formatted. (%s)" % type(msg))
self.event.set()
def wait_until_we_have_enough_granules(self, dataset_id="", data_size=40):
"""
Loops until there is a sufficient amount of data in the dataset
"""
done = False
with gevent.Timeout(40):
while not done:
extents = self.dataset_management.dataset_extents(dataset_id, "time")[0]
granule = self.data_retriever.retrieve_last_data_points(dataset_id, 1)
rdt = RecordDictionaryTool.load_from_granule(granule)
if rdt["time"] and rdt["time"][0] != rdt._pdict.get_context("time").fill_value and extents >= data_size:
done = True
示例5: TestDMEnd2End
# 需要导入模块: from interface.services.dm.iingestion_management_service import IngestionManagementServiceClient [as 别名]
# 或者: from interface.services.dm.iingestion_management_service.IngestionManagementServiceClient import unpersist_data_stream [as 别名]
#.........这里部分代码省略.........
# --------------------------------------------------------------------------------
# There is a race condition sometimes between the services and the process for
# the creation of the datastore and it's instance, this ensures the datastore
# exists before the process is even subscribing to data.
self.get_datastore(dataset_id)
self.publish_fake_data(stream_id)
self.wait_until_we_have_enough_granules(dataset_id, 2) # I just need two
replay_granule = self.data_retriever.retrieve(dataset_id, {"start_time": 0, "end_time": 6})
rdt = RecordDictionaryTool.load_from_granule(replay_granule)
comp = rdt["time"] == np.array([0, 1, 2, 3, 4, 5])
try:
log.info("Compared granule: %s", replay_granule.__dict__)
log.info("Granule tax: %s", replay_granule.taxonomy.__dict__)
except:
pass
self.assertTrue(comp.all())
def test_last_granule(self):
# --------------------------------------------------------------------------------
# Create the necessary configurations for the test
# --------------------------------------------------------------------------------
stream_id = self.pubsub_management.create_stream()
config_id = self.get_ingestion_config()
dataset_id = self.create_dataset()
self.ingestion_management.persist_data_stream(
stream_id=stream_id, ingestion_configuration_id=config_id, dataset_id=dataset_id
)
# --------------------------------------------------------------------------------
# Create the datastore first,
# --------------------------------------------------------------------------------
self.get_datastore(dataset_id)
self.publish_hifi(stream_id, 0)
self.publish_hifi(stream_id, 1)
self.wait_until_we_have_enough_granules(dataset_id, 2) # I just need two
replay_granule = self.data_retriever.retrieve_last_granule(dataset_id)
rdt = RecordDictionaryTool.load_from_granule(replay_granule)
comp = rdt["time"] == np.arange(10) + 10
self.assertTrue(comp.all())
def test_replay_with_parameters(self):
# --------------------------------------------------------------------------------
# Create the configurations and the dataset
# --------------------------------------------------------------------------------
stream_id = self.pubsub_management.create_stream()
config_id = self.get_ingestion_config()
dataset_id = self.create_dataset()
self.ingestion_management.persist_data_stream(
stream_id=stream_id, ingestion_configuration_id=config_id, dataset_id=dataset_id
)
# --------------------------------------------------------------------------------
# Coerce the datastore into existence (beats race condition)
# --------------------------------------------------------------------------------
self.get_datastore(dataset_id)
self.launch_producer(stream_id)
self.wait_until_we_have_enough_granules(dataset_id, 4)
query = {"start_time": 0, "end_time": 20, "parameters": ["time", "temp"]}
retrieved_data = self.data_retriever.retrieve(dataset_id=dataset_id, query=query)
rdt = RecordDictionaryTool.load_from_granule(retrieved_data)
comp = np.arange(20) == rdt["time"]
self.assertTrue(comp.all(), "%s" % rdt.pretty_print())
self.assertEquals(set(rdt.iterkeys()), set(["time", "temp"]))
def test_repersist_data(self):
stream_id = self.pubsub_management.create_stream()
config_id = self.get_ingestion_config()
dataset_id = self.create_dataset()
self.ingestion_management.persist_data_stream(
stream_id=stream_id, ingestion_configuration_id=config_id, dataset_id=dataset_id
)
self.get_datastore(dataset_id)
self.publish_hifi(stream_id, 0)
self.publish_hifi(stream_id, 1)
self.wait_until_we_have_enough_granules(dataset_id, 2)
self.ingestion_management.unpersist_data_stream(stream_id=stream_id, ingestion_configuration_id=config_id)
self.ingestion_management.persist_data_stream(
stream_id=stream_id, ingestion_configuration_id=config_id, dataset_id=dataset_id
)
self.publish_hifi(stream_id, 2)
self.publish_hifi(stream_id, 3)
self.wait_until_we_have_enough_granules(dataset_id, 4)
retrieved_granule = self.data_retriever.retrieve(dataset_id)
rdt = RecordDictionaryTool.load_from_granule(retrieved_granule)
comp = rdt["time"] == np.arange(0, 40)
self.assertTrue(comp.all(), "Uh-oh: %s" % rdt["time"])