本文整理汇总了Python中pyon.ion.event.EventSubscriber类的典型用法代码示例。如果您正苦于以下问题:Python EventSubscriber类的具体用法?Python EventSubscriber怎么用?Python EventSubscriber使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了EventSubscriber类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_pub_on_different_subtypes
def test_pub_on_different_subtypes(self):
ar = event.AsyncResult()
gq = queue.Queue()
self.count = 0
def cb(event, *args, **kwargs):
self.count += 1
gq.put(event)
if event.description == "end":
ar.set()
sub = EventSubscriber(event_type="ResourceModifiedEvent", sub_type="st1", callback=cb)
sub.start()
pub1 = EventPublisher(event_type="ResourceModifiedEvent")
pub2 = EventPublisher(event_type="ContainerLifecycleEvent")
pub1.publish_event(origin="two", sub_type="st2", description="2")
pub2.publish_event(origin="three", sub_type="st1", description="3")
pub1.publish_event(origin="one", sub_type="st1", description="1")
pub1.publish_event(origin="four", sub_type="st1", description="end")
ar.get(timeout=5)
sub.stop()
res = []
for x in xrange(self.count):
res.append(gq.get(timeout=5))
self.assertEquals(len(res), 2)
self.assertEquals(res[0].description, "1")
示例2: process_execution
def process_execution(self, temp_vector, qc_params, bad_times):
interval_key = uuid4().hex
data_product_id = self.make_large_dataset(temp_vector)
async_queue = Queue()
def cb(event, *args, **kwargs):
if '_'.join(event.qc_parameter.split('_')[1:]) not in qc_params:
# I don't care about
return
times = event.temporal_values
self.assertEquals(len(times), bad_times)
async_queue.put(1)
es = EventSubscriber(event_type=OT.ParameterQCEvent, origin=data_product_id, callback=cb, auto_delete=True)
es.start()
self.addCleanup(es.stop)
config = DotDict()
config.process.interval_key = interval_key
config.process.qc_params = qc_params
self.sync_launch(config)
# So now the process is started, time to throw an event at it
ep = EventPublisher(event_type='TimerEvent')
ep.publish_event(origin=interval_key)
try:
async_queue.get(timeout=120)
except Empty:
raise AssertionError('QC was not flagged in time')
示例3: process_execution
def process_execution(self, temp_vector, qc_params, bad_times):
interval_key = uuid4().hex
data_product_id = self.make_large_dataset(temp_vector)
async_queue = Queue()
def cb(event, *args, **kwargs):
times = event.temporal_values
self.assertEquals(len(times), bad_times)
async_queue.put(1)
es = EventSubscriber(event_type=OT.ParameterQCEvent, origin=data_product_id, callback=cb, auto_delete=True)
es.start()
self.addCleanup(es.stop)
config = DotDict()
config.process.interval_key = interval_key
config.process.qc_params = qc_params
self.process_dispatcher.schedule_process(self.process_definition_id, process_id=self.process_id, configuration=config) # The process is now up and running maybe?
self.sync_launch(config)
# So now the process is started, time to throw an event at it
ep = EventPublisher(event_type='TimerEvent')
ep.publish_event(origin=interval_key)
try:
for i in xrange(24):
async_queue.get(timeout=10)
except Empty:
raise AssertionError('QC was not flagged in time: %d', i)
示例4: test_qc_interval_integration
def test_qc_interval_integration(self):
# 1 need to make a dataset that only has one discrete qc violation
# 2 Launch the process
# 3 Setup the scheduler to run it say three times
# 4 Get the Events and verify the data
#--------------------------------------------------------------------------------
# Make a dataset that has only one discrete qc violation
#--------------------------------------------------------------------------------
dp_id, dataset_id, stream_def_id = self.make_data_product()
ph = ParameterHelper(self.dataset_management, self.addCleanup)
monitor = DatasetMonitor(dataset_id)
self.addCleanup(monitor.stop)
for rdt in self.populate_vectors(stream_def_id, 1, lambda x : [41] + [39] * (x-1)):
ph.publish_rdt_to_data_product(dp_id, rdt)
self.assertTrue(monitor.event.wait(10)) # Give it 10 seconds to populate
#--------------------------------------------------------------------------------
# Launch the process
#--------------------------------------------------------------------------------
interval_key = uuid4().hex
config = DotDict()
config.process.interval_key = interval_key
config.process.qc_params = ['glblrng_qc'] # The others are tested in other tests for completeness
self.sync_launch(config)
async_queue = Queue()
def callback(event, *args, **kwargs):
times = event.temporal_values
self.assertEquals(len(times), 1)
async_queue.put(1)
es = EventSubscriber(event_type=OT.ParameterQCEvent, origin=dp_id, callback=callback, auto_delete=True)
es.start()
self.addCleanup(es.stop)
#--------------------------------------------------------------------------------
# Setup the scheduler
#--------------------------------------------------------------------------------
timer_id = self.scheduler_service.create_interval_timer(start_time=time.time(),
end_time=time.time()+13,
interval=5,
event_origin=interval_key)
#--------------------------------------------------------------------------------
# Get the events and verify them
#--------------------------------------------------------------------------------
try:
for i in xrange(2):
async_queue.get(timeout=10)
except Empty:
raise AssertionError('QC Events not raised')
示例5: test_derived_data_product
def test_derived_data_product(self):
pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='ctd parsed', parameter_dictionary_id=pdict_id)
self.addCleanup(self.pubsubcli.delete_stream_definition, ctd_stream_def_id)
tdom, sdom = time_series_domain()
dp = DataProduct(name='Instrument DP', temporal_domain=tdom.dump(), spatial_domain=sdom.dump())
dp_id = self.dpsc_cli.create_data_product(dp, stream_definition_id=ctd_stream_def_id)
self.addCleanup(self.dpsc_cli.force_delete_data_product, dp_id)
self.dpsc_cli.activate_data_product_persistence(dp_id)
self.addCleanup(self.dpsc_cli.suspend_data_product_persistence, dp_id)
dataset_ids, _ = self.rrclient.find_objects(subject=dp_id, predicate=PRED.hasDataset, id_only=True)
if not dataset_ids:
raise NotFound("Data Product %s dataset does not exist" % str(dp_id))
dataset_id = dataset_ids[0]
# Make the derived data product
simple_stream_def_id = self.pubsubcli.create_stream_definition(name='TEMPWAT stream def', parameter_dictionary_id=pdict_id, available_fields=['time','temp'])
tempwat_dp = DataProduct(name='TEMPWAT')
tempwat_dp_id = self.dpsc_cli.create_data_product(tempwat_dp, stream_definition_id=simple_stream_def_id, parent_data_product_id=dp_id)
self.addCleanup(self.dpsc_cli.delete_data_product, tempwat_dp_id)
self.dpsc_cli.activate_data_product_persistence(tempwat_dp_id)
self.addCleanup(self.dpsc_cli.suspend_data_product_persistence, tempwat_dp_id)
# Check that the streams associated with the data product are persisted with
stream_ids, _ = self.rrclient.find_objects(dp_id,PRED.hasStream,RT.Stream,True)
for stream_id in stream_ids:
self.assertTrue(self.ingestclient.is_persisted(stream_id))
stream_id = stream_ids[0]
route = self.pubsubcli.read_stream_route(stream_id=stream_id)
rdt = RecordDictionaryTool(stream_definition_id=ctd_stream_def_id)
rdt['time'] = np.arange(20)
rdt['temp'] = np.arange(20)
rdt['pressure'] = np.arange(20)
publisher = StandaloneStreamPublisher(stream_id,route)
dataset_modified = Event()
def cb(*args, **kwargs):
dataset_modified.set()
es = EventSubscriber(event_type=OT.DatasetModified, callback=cb, origin=dataset_id, auto_delete=True)
es.start()
self.addCleanup(es.stop)
publisher.publish(rdt.to_granule())
self.assertTrue(dataset_modified.wait(30))
tempwat_dataset_ids, _ = self.rrclient.find_objects(tempwat_dp_id, PRED.hasDataset, id_only=True)
tempwat_dataset_id = tempwat_dataset_ids[0]
granule = self.data_retriever.retrieve(tempwat_dataset_id, delivery_format=simple_stream_def_id)
rdt = RecordDictionaryTool.load_from_granule(granule)
np.testing.assert_array_equal(rdt['time'], np.arange(20))
self.assertEquals(set(rdt.fields), set(['time','temp']))
开发者ID:MauriceManning,项目名称:coi-services,代码行数:59,代码来源:test_data_product_management_service_integration.py
示例6: test_event_subscriber_auto_delete
def test_event_subscriber_auto_delete(self):
mocknode = Mock()
ev = EventSubscriber(event_type="ProcessLifecycleEvent", callback=lambda *a,**kw: None, auto_delete=sentinel.auto_delete, node=mocknode)
self.assertEquals(ev._auto_delete, sentinel.auto_delete)
# we don't want to have to patch out everything here, so call initialize directly, which calls create_channel for us
ev._setup_listener = Mock()
ev.initialize(sentinel.binding)
self.assertEquals(ev._chan.queue_auto_delete, sentinel.auto_delete)
示例7: start
def start(self):
log.debug("GovernanceController starting ...")
self._CFG = CFG
self.enabled = CFG.get_safe('interceptor.interceptors.governance.config.enabled', False)
log.info("GovernanceInterceptor enabled: %s" % str(self.enabled))
self.policy_event_subscriber = None
#containers default to not Org Boundary and ION Root Org
self._is_container_org_boundary = CFG.get_safe('container.org_boundary',False)
self._container_org_name = CFG.get_safe('container.org_name', CFG.get_safe('system.root_org', 'ION'))
self._container_org_id = None
self._system_root_org_name = CFG.get_safe('system.root_org', 'ION')
self._is_root_org_container = (self._container_org_name == self._system_root_org_name)
self.system_actor_id = None
self.system_actor_user_header = None
if self.enabled:
config = CFG.get_safe('interceptor.interceptors.governance.config')
self.initialize_from_config(config)
self.policy_event_subscriber = EventSubscriber(event_type=OT.PolicyEvent, callback=self.policy_event_callback)
self.policy_event_subscriber.start()
self.rr_client = ResourceRegistryServiceProcessClient(node=self.container.node, process=self.container)
self.policy_client = PolicyManagementServiceProcessClient(node=self.container.node, process=self.container)
示例8: start
def start(self):
log.debug("GovernanceController starting ...")
self._CFG = CFG
self.enabled = CFG.get_safe('interceptor.interceptors.governance.config.enabled', False)
if not self.enabled:
log.warn("GovernanceInterceptor disabled by configuration")
self.policy_event_subscriber = None
# Containers default to not Org Boundary and ION Root Org
self._is_container_org_boundary = CFG.get_safe('container.org_boundary', False)
self._container_org_name = CFG.get_safe('container.org_name', CFG.get_safe('system.root_org', 'ION'))
self._container_org_id = None
self._system_root_org_name = CFG.get_safe('system.root_org', 'ION')
self._is_root_org_container = (self._container_org_name == self._system_root_org_name)
self.system_actor_id = None
self.system_actor_user_header = None
self.rr_client = ResourceRegistryServiceProcessClient(process=self.container)
self.policy_client = PolicyManagementServiceProcessClient(process=self.container)
if self.enabled:
config = CFG.get_safe('interceptor.interceptors.governance.config')
self.initialize_from_config(config)
self.policy_event_subscriber = EventSubscriber(event_type=OT.PolicyEvent, callback=self.policy_event_callback)
self.policy_event_subscriber.start()
self._policy_snapshot = self._get_policy_snapshot()
self._log_policy_update("start_governance_ctrl", message="Container start")
示例9: start
def start(self):
## create queue listener and publisher
self.sender = EventPublisher(event_type="ContainerManagementResult")
self.receiver = EventSubscriber(event_type="ContainerManagementRequest", callback=self._receive_event)
with self.lock:
self.running = True
self.receiver.start()
log.info('ready for container management requests')
示例10: test_global_range_test
def test_global_range_test(self):
TestQCFunctions.test_global_range_test(self)
flagged = Event()
def cb(event, *args, **kwargs):
times = event.temporal_values
self.assertEquals(times,[0.0, 7.0])
flagged.set()
event_subscriber = EventSubscriber(event_type=OT.ParameterQCEvent,origin=self.dataset_id, callback=cb, auto_delete=True)
event_subscriber.start()
self.addCleanup(event_subscriber.stop)
self.ph.publish_rdt_to_data_product(self.dp_id, self.rdt)
self.dataset_monitor.event.wait(10)
rdt = RecordDictionaryTool.load_from_granule(self.data_retriever.retrieve(self.dataset_id))
np.testing.assert_array_almost_equal(rdt['tempwat_glblrng_qc'], [0, 1, 1, 1, 1, 1, 1, 0])
self.assertTrue(flagged.wait(10))
示例11: on_start
def on_start(self):
'''
Process initialization
'''
self._thread = self._process.thread_manager.spawn(self.thread_loop)
self._event_subscriber = EventSubscriber(event_type=OT.ResetQCEvent, callback=self.receive_event, auto_delete=True) # TODO Correct event types
self._event_subscriber.start()
self.timeout = self.CFG.get_safe('endpoint.receive.timeout', 10)
self.resource_registry = self.container.resource_registry
self.event_queue = Queue()
示例12: on_start
def on_start(self):
TransformDataProcess.on_start(self)
self.pubsub_management = PubsubManagementServiceProcessClient(process=self)
self.stored_values = StoredValueManager(self.container)
self.input_data_product_ids = self.CFG.get_safe('process.input_products', [])
self.output_data_product_ids = self.CFG.get_safe('process.output_products', [])
self.lookup_docs = self.CFG.get_safe('process.lookup_docs',[])
self.new_lookups = Queue()
self.lookup_monitor = EventSubscriber(event_type=OT.ExternalReferencesUpdatedEvent,callback=self._add_lookups, auto_delete=True)
self.lookup_monitor.start()
示例13: make_large_dataset
def make_large_dataset(self, temp_vector):
monitor_queue = Queue()
# Make 27 hours of data
ph = ParameterHelper(self.dataset_management, self.addCleanup)
data_product_id, dataset_id, stream_def_id = self.make_data_product()
es = EventSubscriber(event_type=OT.DatasetModified, origin=dataset_id, auto_delete=True, callback = lambda *args, **kwargs : monitor_queue.put(1))
es.start()
self.addCleanup(es.stop)
for rdt in self.populate_vectors(stream_def_id, 3, temp_vector):
ph.publish_rdt_to_data_product(data_product_id, rdt)
try:
for i in xrange(3):
monitor_queue.get(timeout=10)
except Empty:
raise AssertionError('Failed to populate dataset in time')
return data_product_id
示例14: on_start
def on_start(self):
# Persister thread
self._persist_greenlet = spawn(self._persister_loop, self.persist_interval)
log.debug('EventPersister persist greenlet started in "%s" (interval %s)', self.__class__.__name__, self.persist_interval)
# Event subscription
self.event_sub = EventSubscriber(pattern=EventSubscriber.ALL_EVENTS,
callback=self._on_event,
queue_name="event_persister")
self.event_sub.start()
示例15: check_localrange
def check_localrange(self):
log.info('check_localrange')
TestQCFunctions.check_localrange(self)
self.init_check()
flagged = Event()
def cb(event, *args, **kwargs):
times = event.temporal_values
if not event.qc_parameter == 'tempwat_loclrng_qc':
return
np.testing.assert_array_equal( times, np.array([ 3580144708.7555027, 3580144709.7555027, 3580144710.7555027, 3580144711.7555027, 3580144712.7555027]))
flagged.set()
event_subscriber = EventSubscriber(event_type = OT.ParameterQCEvent, origin=self.dp_id, callback=cb, auto_delete=True)
event_subscriber.start()
self.addCleanup(event_subscriber.stop)
self.ph.publish_rdt_to_data_product(self.dp_id, self.rdt)
self.assertTrue(self.dataset_monitor.wait())
rdt = RecordDictionaryTool.load_from_granule(self.data_retriever.retrieve(self.dataset_id))
np.testing.assert_array_almost_equal(rdt['tempwat_loclrng_qc'], [1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0])
self.assertTrue(flagged.wait(10))