本文整理汇总了Python中interface.services.dm.iingestion_management_service.IngestionManagementServiceClient类的典型用法代码示例。如果您正苦于以下问题:Python IngestionManagementServiceClient类的具体用法?Python IngestionManagementServiceClient怎么用?Python IngestionManagementServiceClient使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了IngestionManagementServiceClient类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: clean_subscriptions
def clean_subscriptions():
ingestion_management = IngestionManagementServiceClient()
pubsub = PubsubManagementServiceClient()
rr = ResourceRegistryServiceClient()
ingestion_config_ids = ingestion_management.list_ingestion_configurations(id_only=True)
for ic in ingestion_config_ids:
subscription_ids, assocs = rr.find_objects(subject=ic, predicate=PRED.hasSubscription, id_only=True)
for subscription_id, assoc in zip(subscription_ids, assocs):
rr.delete_association(assoc)
try:
pubsub.deactivate_subscription(subscription_id)
except:
log.exception("Unable to decativate subscription: %s", subscription_id)
pubsub.delete_subscription(subscription_id)
示例2: clean_subscriptions
def clean_subscriptions():
ingestion_management = IngestionManagementServiceClient()
pubsub = PubsubManagementServiceClient()
rr = ResourceRegistryServiceClient()
ingestion_config_ids = ingestion_management.list_ingestion_configurations(id_only=True)
for ic in ingestion_config_ids:
assocs = rr.find_associations(subject=ic, predicate=PRED.hasSubscription, id_only=False)
for assoc in assocs:
rr.delete_association(assoc)
try:
pubsub.deactivate_subscription(assoc.o)
except:
pass
pubsub.delete_subscription(assoc.o)
示例3: on_start
def on_start(self):
super(IngestionLauncher, self).on_start()
exchange_point = self.CFG.get("process", {}).get("exchange_point", "science_data")
couch_storage = self.CFG.get("process", {}).get("couch_storage", {})
couch_storage = CouchStorage(**couch_storage)
hdf_storage = self.CFG.get("process", {}).get("hdf_storage", {})
number_of_workers = self.CFG.get("process", {}).get("number_of_workers", 2)
ingestion_management_service = IngestionManagementServiceClient(node=self.container.node)
ingestion_id = ingestion_management_service.create_ingestion_configuration(
exchange_point_id=exchange_point,
couch_storage=couch_storage,
hdf_storage=hdf_storage,
number_of_workers=number_of_workers,
default_policy={},
)
ingestion_management_service.activate_ingestion_configuration(ingestion_id)
示例4: on_start
def on_start(self):
super(IngestionLauncher,self).on_start()
exchange_point = self.CFG.get_safe('ingestion.exchange_point','science_data')
couch_opts = self.CFG.get_safe('ingestion.couch_storage',{})
couch_storage = CouchStorage(**couch_opts)
hdf_opts = self.CFG.get_safe('ingestion.hdf_storage',{})
hdf_storage = HdfStorage(**hdf_opts)
number_of_workers = self.CFG.get_safe('ingestion.number_of_workers',2)
ingestion_management_service = IngestionManagementServiceClient(node=self.container.node)
ingestion_id = ingestion_management_service.create_ingestion_configuration(
exchange_point_id=exchange_point,
couch_storage=couch_storage,
hdf_storage=hdf_storage,
number_of_workers=number_of_workers
)
ingestion_management_service.activate_ingestion_configuration(ingestion_id)
示例5: DatasetManagementIntTest
class DatasetManagementIntTest(IonIntegrationTestCase):
def setUp(self):
import couchdb
super(DatasetManagementIntTest,self).setUp()
self._start_container()
self.container.start_rel_from_url('res/deploy/r2dm.yml')
self.db = self.container.datastore_manager.get_datastore('scidata', DataStore.DS_PROFILE.SCIDATA)
self.db_raw = self.db.server
self.dataset_management_client = DatasetManagementServiceClient(node=self.container.node)
self.ingestion_client = IngestionManagementServiceClient(node=self.container.node)
def _random_data(self, entropy):
random_pressures = [(random.random()*100) for i in xrange(entropy)]
random_salinity = [(random.random()*28) for i in xrange(entropy)]
random_temperature = [(random.random()*10)+32 for i in xrange(entropy)]
random_times = [random.randrange(1328205227, 1328896395) for i in xrange(entropy)]
random_lat = [(random.random()*10)+30 for i in xrange(entropy)]
random_lon = [(random.random()*10)+70 for i in xrange(entropy)]
return [random_pressures, random_salinity, random_temperature, random_times, random_lat, random_lon]
def _generate_point(self, entropy=5):
points = []
random_values = self._random_data(entropy)
point = ctd_stream_packet(stream_id='test_data', p=random_values[0], c=random_values[1], t=random_values[2],time=random_values[3], lat=random_values[4], lon=random_values[5], create_hdf=False)
return point
def test_get_dataset_bounds(self):
for i in xrange(3):
point = self._generate_point()
self.db.create(point)
dataset_id = self.dataset_management_client.create_dataset(stream_id='test_data', datastore_name='scidata')
bounds = self.dataset_management_client.get_dataset_bounds(dataset_id=dataset_id)
self.assertTrue(bounds['latitude_bounds'][0] > 30.0)
self.assertTrue(bounds['latitude_bounds'][1] < 40.0)
self.assertTrue(bounds['longitude_bounds'][0] > 70.0)
self.assertTrue(bounds['longitude_bounds'][1] < 80.0)
self.dataset_management_client.delete_dataset(dataset_id)
@unittest.skip('not ready yet')
def test_dataset_ingestion(self):
couch_storage = { 'server':'localhost', 'database':'scidata'}
ingestion_configuration_id = self.ingestion_client.create_ingestion_configuration(
exchange_point_id='science_data',
couch_storage=couch_storage,
hdf_storage={},
number_of_workers=4,
default_policy={})
示例6: setUp
def setUp(self):
self._start_container()
self.container.start_rel_from_url("res/deploy/r2deploy.yml")
self.ingestion_management = IngestionManagementServiceClient()
self.resource_registry = ResourceRegistryServiceClient()
self.pubsub_management = PubsubManagementServiceClient()
self.ingest_name = "basic"
self.exchange = "testdata"
示例7: __init__
def __init__(self):
self.ingestion_management = IngestionManagementServiceClient()
self.resource_registry = ResourceRegistryServiceClient()
self.data_product_management = DataProductManagementServiceClient()
self.dataset_management = DatasetManagementServiceClient()
self._paused_streams = []
self._w_covs = {}
self._ro_covs = {}
self._context_managed = False
示例8: setUp
def setUp(self):
self.datastore_name = 'datasets'
self.exchange_point = 'science_data'
self.exchange_space = 'science_granule_ingestion'
self.queue_name = self.exchange_space
self._start_container()
self.container.start_rel_from_url('res/deploy/r2dm.yml')
self.ingestion_management = IngestionManagementServiceClient()
self.pubsub = PubsubManagementServiceClient()
示例9: setUp
def setUp(self):
# Start container
#print 'instantiating container'
self._start_container()
self.container.start_rel_from_url('res/deploy/r2deploy.yml')
self.dpsc_cli = DataProductManagementServiceClient(node=self.container.node)
self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
self.damsclient = DataAcquisitionManagementServiceClient(node=self.container.node)
self.pubsubcli = PubsubManagementServiceClient(node=self.container.node)
self.ingestclient = IngestionManagementServiceClient(node=self.container.node)
self.process_dispatcher = ProcessDispatcherServiceClient()
self.dataset_management = DatasetManagementServiceClient()
self.unsc = UserNotificationServiceClient()
self.data_retriever = DataRetrieverServiceClient()
#------------------------------------------
# Create the environment
#------------------------------------------
datastore_name = CACHE_DATASTORE_NAME
self.db = self.container.datastore_manager.get_datastore(datastore_name)
self.stream_def_id = self.pubsubcli.create_stream_definition(name='SBE37_CDM')
self.process_definitions = {}
ingestion_worker_definition = ProcessDefinition(name='ingestion worker')
ingestion_worker_definition.executable = {
'module':'ion.processes.data.ingestion.science_granule_ingestion_worker',
'class' :'ScienceGranuleIngestionWorker'
}
process_definition_id = self.process_dispatcher.create_process_definition(process_definition=ingestion_worker_definition)
self.process_definitions['ingestion_worker'] = process_definition_id
self.pids = []
self.exchange_points = []
self.exchange_names = []
#------------------------------------------------------------------------------------------------
# First launch the ingestors
#------------------------------------------------------------------------------------------------
self.exchange_space = 'science_granule_ingestion'
self.exchange_point = 'science_data'
config = DotDict()
config.process.datastore_name = 'datasets'
config.process.queue_name = self.exchange_space
self.exchange_names.append(self.exchange_space)
self.exchange_points.append(self.exchange_point)
pid = self.process_dispatcher.schedule_process(self.process_definitions['ingestion_worker'],configuration=config)
log.debug("the ingestion worker process id: %s", pid)
self.pids.append(pid)
self.addCleanup(self.cleaning_up)
示例10: setUp
def setUp(self):
import couchdb
super(DatasetManagementIntTest,self).setUp()
self._start_container()
self.container.start_rel_from_url('res/deploy/r2dm.yml')
self.db = self.container.datastore_manager.get_datastore('scidata', DataStore.DS_PROFILE.SCIDATA)
self.db_raw = self.db.server
self.dataset_management_client = DatasetManagementServiceClient(node=self.container.node)
self.ingestion_client = IngestionManagementServiceClient(node=self.container.node)
示例11: setUp
def setUp(self):
self._start_container()
config = DotDict()
config.bootstrap.processes.ingestion.module = 'ion.processes.data.ingestion.ingestion_worker_a'
config.bootstrap.processes.replay.module = 'ion.processes.data.replay.replay_process_a'
self.container.start_rel_from_url('res/deploy/r2dm.yml', config)
self.datastore_name = 'test_datasets'
self.pubsub_management = PubsubManagementServiceClient()
self.ingestion_management = IngestionManagementServiceClient()
self.dataset_management = DatasetManagementServiceClient()
self.process_dispatcher = ProcessDispatcherServiceClient()
self.data_retriever = DataRetrieverServiceClient()
示例12: setUp
def setUp(self): # Love the non pep-8 convention
self._start_container()
self.container.start_rel_from_url('res/deploy/r2deploy.yml')
self.process_dispatcher = ProcessDispatcherServiceClient()
self.pubsub_management = PubsubManagementServiceClient()
self.resource_registry = ResourceRegistryServiceClient()
self.dataset_management = DatasetManagementServiceClient()
self.ingestion_management = IngestionManagementServiceClient()
self.data_retriever = DataRetrieverServiceClient()
self.event = Event()
self.exchange_space_name = 'test_granules'
self.exchange_point_name = 'science_data'
self.i = 0
self.cci = 0
示例13: ScienceGranuleIngestionIntTest
class ScienceGranuleIngestionIntTest(IonIntegrationTestCase):
def setUp(self):
self.datastore_name = 'datasets'
self.exchange_point = 'science_data'
self.exchange_space = 'science_granule_ingestion'
self.queue_name = self.exchange_space
self._start_container()
self.container.start_rel_from_url('res/deploy/r2dm.yml')
self.ingestion_management = IngestionManagementServiceClient()
self.pubsub = PubsubManagementServiceClient()
def build_granule(self):
return ScienceGranuleIngestionWorkerUnitTest.build_granule()
def launch_worker(self):
cfg = DotDict()
cfg.process.datastore_name = self.datastore_name
cfg.process.queue_name = self.queue_name
#@todo: replace with CEI friendly calls
pid = self.container.spawn_process('ingest_worker', 'ion.processes.data.ingestion.science_granule_ingestion_worker','ScienceGranuleIngestionWorker',cfg)
return pid
def create_ingestion_config(self):
ingest_queue = IngestionQueue(name=self.exchange_space, type='science_granule')
config_id = self.ingestion_management.create_ingestion_configuration(name='standard_ingest', exchange_point_id=self.exchange_point, queues=[ingest_queue])
return config_id
def create_stream(self):
stream_id = self.pubsub.create_stream()
return stream_id
def poll(self, evaluation_callback, *args, **kwargs):
now = time.time()
cutoff = now + 5
done = False
while not done:
if evaluation_callback(*args,**kwargs):
done = True
if now >= cutoff:
raise Timeout('No results found within the allotted time')
now = time.time()
return True
示例14: setUp
def setUp(self):
# Start container
self._start_container()
# self.container.start_rel_from_url('res/deploy/r2deploy.yml')
self.container.start_rel_from_url("res/deploy/r2deploy.yml")
print "started services"
# Now create client to DataProductManagementService
self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
self.damsclient = DataAcquisitionManagementServiceClient(node=self.container.node)
self.pubsubclient = PubsubManagementServiceClient(node=self.container.node)
self.ingestclient = IngestionManagementServiceClient(node=self.container.node)
self.imsclient = InstrumentManagementServiceClient(node=self.container.node)
self.dataproductclient = DataProductManagementServiceClient(node=self.container.node)
self.dataprocessclient = DataProcessManagementServiceClient(node=self.container.node)
self.datasetclient = DatasetManagementServiceClient(node=self.container.node)
self.omsclient = ObservatoryManagementServiceClient(node=self.container.node)
示例15: setUp
def setUp(self): # Love the non pep-8 convention
self._start_container()
self.container.start_rel_from_url("res/deploy/r2deploy.yml")
self.process_dispatcher = ProcessDispatcherServiceClient()
self.pubsub_management = PubsubManagementServiceClient()
self.resource_registry = ResourceRegistryServiceClient()
self.dataset_management = DatasetManagementServiceClient()
self.ingestion_management = IngestionManagementServiceClient()
self.data_retriever = DataRetrieverServiceClient()
self.pids = []
self.event = Event()
self.exchange_space_name = "test_granules"
self.exchange_point_name = "science_data"
self.i = 0
self.purge_queues()
self.queue_buffer = []
self.streams = []
self.addCleanup(self.stop_all_ingestion)