本文整理汇总了Python中pyon.ion.event.EventSubscriber.start方法的典型用法代码示例。如果您正苦于以下问题:Python EventSubscriber.start方法的具体用法?Python EventSubscriber.start怎么用?Python EventSubscriber.start使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyon.ion.event.EventSubscriber
的用法示例。
在下文中一共展示了EventSubscriber.start方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: process_execution
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import start [as 别名]
def process_execution(self, temp_vector, qc_params, bad_times):
interval_key = uuid4().hex
data_product_id = self.make_large_dataset(temp_vector)
async_queue = Queue()
def cb(event, *args, **kwargs):
if '_'.join(event.qc_parameter.split('_')[1:]) not in qc_params:
# I don't care about
return
times = event.temporal_values
self.assertEquals(len(times), bad_times)
async_queue.put(1)
es = EventSubscriber(event_type=OT.ParameterQCEvent, origin=data_product_id, callback=cb, auto_delete=True)
es.start()
self.addCleanup(es.stop)
config = DotDict()
config.process.interval_key = interval_key
config.process.qc_params = qc_params
self.sync_launch(config)
# So now the process is started, time to throw an event at it
ep = EventPublisher(event_type='TimerEvent')
ep.publish_event(origin=interval_key)
try:
async_queue.get(timeout=120)
except Empty:
raise AssertionError('QC was not flagged in time')
示例2: test_pub_on_different_subtypes
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import start [as 别名]
def test_pub_on_different_subtypes(self):
ar = event.AsyncResult()
gq = queue.Queue()
self.count = 0
def cb(event, *args, **kwargs):
self.count += 1
gq.put(event)
if event.description == "end":
ar.set()
sub = EventSubscriber(event_type="ResourceModifiedEvent", sub_type="st1", callback=cb)
sub.start()
pub1 = EventPublisher(event_type="ResourceModifiedEvent")
pub2 = EventPublisher(event_type="ContainerLifecycleEvent")
pub1.publish_event(origin="two", sub_type="st2", description="2")
pub2.publish_event(origin="three", sub_type="st1", description="3")
pub1.publish_event(origin="one", sub_type="st1", description="1")
pub1.publish_event(origin="four", sub_type="st1", description="end")
ar.get(timeout=5)
sub.stop()
res = []
for x in xrange(self.count):
res.append(gq.get(timeout=5))
self.assertEquals(len(res), 2)
self.assertEquals(res[0].description, "1")
示例3: process_execution
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import start [as 别名]
def process_execution(self, temp_vector, qc_params, bad_times):
interval_key = uuid4().hex
data_product_id = self.make_large_dataset(temp_vector)
async_queue = Queue()
def cb(event, *args, **kwargs):
times = event.temporal_values
self.assertEquals(len(times), bad_times)
async_queue.put(1)
es = EventSubscriber(event_type=OT.ParameterQCEvent, origin=data_product_id, callback=cb, auto_delete=True)
es.start()
self.addCleanup(es.stop)
config = DotDict()
config.process.interval_key = interval_key
config.process.qc_params = qc_params
self.process_dispatcher.schedule_process(self.process_definition_id, process_id=self.process_id, configuration=config) # The process is now up and running maybe?
self.sync_launch(config)
# So now the process is started, time to throw an event at it
ep = EventPublisher(event_type='TimerEvent')
ep.publish_event(origin=interval_key)
try:
for i in xrange(24):
async_queue.get(timeout=10)
except Empty:
raise AssertionError('QC was not flagged in time: %d', i)
示例4: test_qc_interval_integration
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import start [as 别名]
def test_qc_interval_integration(self):
# 1 need to make a dataset that only has one discrete qc violation
# 2 Launch the process
# 3 Setup the scheduler to run it say three times
# 4 Get the Events and verify the data
#--------------------------------------------------------------------------------
# Make a dataset that has only one discrete qc violation
#--------------------------------------------------------------------------------
dp_id, dataset_id, stream_def_id = self.make_data_product()
ph = ParameterHelper(self.dataset_management, self.addCleanup)
monitor = DatasetMonitor(dataset_id)
self.addCleanup(monitor.stop)
for rdt in self.populate_vectors(stream_def_id, 1, lambda x : [41] + [39] * (x-1)):
ph.publish_rdt_to_data_product(dp_id, rdt)
self.assertTrue(monitor.event.wait(10)) # Give it 10 seconds to populate
#--------------------------------------------------------------------------------
# Launch the process
#--------------------------------------------------------------------------------
interval_key = uuid4().hex
config = DotDict()
config.process.interval_key = interval_key
config.process.qc_params = ['glblrng_qc'] # The others are tested in other tests for completeness
self.sync_launch(config)
async_queue = Queue()
def callback(event, *args, **kwargs):
times = event.temporal_values
self.assertEquals(len(times), 1)
async_queue.put(1)
es = EventSubscriber(event_type=OT.ParameterQCEvent, origin=dp_id, callback=callback, auto_delete=True)
es.start()
self.addCleanup(es.stop)
#--------------------------------------------------------------------------------
# Setup the scheduler
#--------------------------------------------------------------------------------
timer_id = self.scheduler_service.create_interval_timer(start_time=time.time(),
end_time=time.time()+13,
interval=5,
event_origin=interval_key)
#--------------------------------------------------------------------------------
# Get the events and verify them
#--------------------------------------------------------------------------------
try:
for i in xrange(2):
async_queue.get(timeout=10)
except Empty:
raise AssertionError('QC Events not raised')
示例5: test_derived_data_product
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import start [as 别名]
def test_derived_data_product(self):
pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='ctd parsed', parameter_dictionary_id=pdict_id)
self.addCleanup(self.pubsubcli.delete_stream_definition, ctd_stream_def_id)
tdom, sdom = time_series_domain()
dp = DataProduct(name='Instrument DP', temporal_domain=tdom.dump(), spatial_domain=sdom.dump())
dp_id = self.dpsc_cli.create_data_product(dp, stream_definition_id=ctd_stream_def_id)
self.addCleanup(self.dpsc_cli.force_delete_data_product, dp_id)
self.dpsc_cli.activate_data_product_persistence(dp_id)
self.addCleanup(self.dpsc_cli.suspend_data_product_persistence, dp_id)
dataset_ids, _ = self.rrclient.find_objects(subject=dp_id, predicate=PRED.hasDataset, id_only=True)
if not dataset_ids:
raise NotFound("Data Product %s dataset does not exist" % str(dp_id))
dataset_id = dataset_ids[0]
# Make the derived data product
simple_stream_def_id = self.pubsubcli.create_stream_definition(name='TEMPWAT stream def', parameter_dictionary_id=pdict_id, available_fields=['time','temp'])
tempwat_dp = DataProduct(name='TEMPWAT')
tempwat_dp_id = self.dpsc_cli.create_data_product(tempwat_dp, stream_definition_id=simple_stream_def_id, parent_data_product_id=dp_id)
self.addCleanup(self.dpsc_cli.delete_data_product, tempwat_dp_id)
self.dpsc_cli.activate_data_product_persistence(tempwat_dp_id)
self.addCleanup(self.dpsc_cli.suspend_data_product_persistence, tempwat_dp_id)
# Check that the streams associated with the data product are persisted with
stream_ids, _ = self.rrclient.find_objects(dp_id,PRED.hasStream,RT.Stream,True)
for stream_id in stream_ids:
self.assertTrue(self.ingestclient.is_persisted(stream_id))
stream_id = stream_ids[0]
route = self.pubsubcli.read_stream_route(stream_id=stream_id)
rdt = RecordDictionaryTool(stream_definition_id=ctd_stream_def_id)
rdt['time'] = np.arange(20)
rdt['temp'] = np.arange(20)
rdt['pressure'] = np.arange(20)
publisher = StandaloneStreamPublisher(stream_id,route)
dataset_modified = Event()
def cb(*args, **kwargs):
dataset_modified.set()
es = EventSubscriber(event_type=OT.DatasetModified, callback=cb, origin=dataset_id, auto_delete=True)
es.start()
self.addCleanup(es.stop)
publisher.publish(rdt.to_granule())
self.assertTrue(dataset_modified.wait(30))
tempwat_dataset_ids, _ = self.rrclient.find_objects(tempwat_dp_id, PRED.hasDataset, id_only=True)
tempwat_dataset_id = tempwat_dataset_ids[0]
granule = self.data_retriever.retrieve(tempwat_dataset_id, delivery_format=simple_stream_def_id)
rdt = RecordDictionaryTool.load_from_granule(granule)
np.testing.assert_array_equal(rdt['time'], np.arange(20))
self.assertEquals(set(rdt.fields), set(['time','temp']))
开发者ID:MauriceManning,项目名称:coi-services,代码行数:61,代码来源:test_data_product_management_service_integration.py
示例6: test_global_range_test
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import start [as 别名]
def test_global_range_test(self):
TestQCFunctions.test_global_range_test(self)
flagged = Event()
def cb(event, *args, **kwargs):
times = event.temporal_values
self.assertEquals(times,[0.0, 7.0])
flagged.set()
event_subscriber = EventSubscriber(event_type=OT.ParameterQCEvent,origin=self.dataset_id, callback=cb, auto_delete=True)
event_subscriber.start()
self.addCleanup(event_subscriber.stop)
self.ph.publish_rdt_to_data_product(self.dp_id, self.rdt)
self.dataset_monitor.event.wait(10)
rdt = RecordDictionaryTool.load_from_granule(self.data_retriever.retrieve(self.dataset_id))
np.testing.assert_array_almost_equal(rdt['tempwat_glblrng_qc'], [0, 1, 1, 1, 1, 1, 1, 0])
self.assertTrue(flagged.wait(10))
示例7: make_large_dataset
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import start [as 别名]
def make_large_dataset(self, temp_vector):
monitor_queue = Queue()
# Make 27 hours of data
ph = ParameterHelper(self.dataset_management, self.addCleanup)
data_product_id, dataset_id, stream_def_id = self.make_data_product()
es = EventSubscriber(event_type=OT.DatasetModified, origin=dataset_id, auto_delete=True, callback = lambda *args, **kwargs : monitor_queue.put(1))
es.start()
self.addCleanup(es.stop)
for rdt in self.populate_vectors(stream_def_id, 3, temp_vector):
ph.publish_rdt_to_data_product(data_product_id, rdt)
try:
for i in xrange(3):
monitor_queue.get(timeout=10)
except Empty:
raise AssertionError('Failed to populate dataset in time')
return data_product_id
示例8: check_localrange
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import start [as 别名]
def check_localrange(self):
log.info('check_localrange')
TestQCFunctions.check_localrange(self)
self.init_check()
flagged = Event()
def cb(event, *args, **kwargs):
times = event.temporal_values
if not event.qc_parameter == 'tempwat_loclrng_qc':
return
np.testing.assert_array_equal( times, np.array([ 3580144708.7555027, 3580144709.7555027, 3580144710.7555027, 3580144711.7555027, 3580144712.7555027]))
flagged.set()
event_subscriber = EventSubscriber(event_type = OT.ParameterQCEvent, origin=self.dp_id, callback=cb, auto_delete=True)
event_subscriber.start()
self.addCleanup(event_subscriber.stop)
self.ph.publish_rdt_to_data_product(self.dp_id, self.rdt)
self.assertTrue(self.dataset_monitor.wait())
rdt = RecordDictionaryTool.load_from_granule(self.data_retriever.retrieve(self.dataset_id))
np.testing.assert_array_almost_equal(rdt['tempwat_loclrng_qc'], [1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0])
self.assertTrue(flagged.wait(10))
示例9: QCProcessor
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import start [as 别名]
class QCProcessor(SimpleProcess):
def __init__(self):
self.event = Event() # Synchronizes the thread
self.timeout = 10
def on_start(self):
'''
Process initialization
'''
self._thread = self._process.thread_manager.spawn(self.thread_loop)
self._event_subscriber = EventSubscriber(event_type=OT.ResetQCEvent, callback=self.receive_event, auto_delete=True) # TODO Correct event types
self._event_subscriber.start()
self.timeout = self.CFG.get_safe('endpoint.receive.timeout', 10)
self.resource_registry = self.container.resource_registry
self.event_queue = Queue()
def on_quit(self):
'''
Stop and cleanup the thread
'''
self._event_subscriber.stop()
self.suspend()
def receive_event(self, event, *args, **kwargs):
log.error("Adding event to the event queue")
self.event_queue.put(event)
def thread_loop(self):
'''
Asynchronous event-loop
'''
threading.current_thread().name = '%s-qc-processor' % self.id
while not self.event.wait(1):
try:
self.qc_processing_loop()
except:
log.error("Error in QC Processing Loop", exc_info=True)
try:
self.event_processing_loop()
except:
log.error("Error in QC Event Loop", exc_info=True)
def qc_processing_loop(self):
'''
Iterates through available data products and evaluates QC
'''
data_products, _ = self.container.resource_registry.find_resources(restype=RT.DataProduct, id_only=False)
for data_product in data_products:
# Get the reference designator
try:
rd = self.get_reference_designator(data_product._id)
except BadRequest:
continue
parameters = self.get_parameters(data_product)
# Create a mapping of inputs to QC
qc_mapping = {}
# Creates a dictionary { data_product_name : parameter_name }
for p in parameters:
if p.ooi_short_name:
sname = p.ooi_short_name
g = re.match(r'([a-zA-Z-_]+)(_L[0-9])', sname)
if g:
sname = g.groups()[0]
qc_mapping[sname] = p.name
for p in parameters:
# for each parameter, if the name ends in _qc run the qc
if p.name.endswith('_qc'):
self.run_qc(data_product,rd, p, qc_mapping)
# Break early if we can
if self.event.is_set():
break
def event_processing_loop(self):
'''
Processes the events in the event queue
'''
log.error("Processing event queue")
self.event_queue.put(StopIteration)
for event in self.event_queue:
log.error("My event's reference designator: %s", event.origin)
def suspend(self):
'''
Stops the event loop
'''
self.event.set()
self._thread.join(self.timeout)
log.info("QC Thread Suspended")
def get_reference_designator(self, data_product_id=''):
'''
Returns the reference designator for a data product if it has one
'''
# First try to get the parent data product
data_product_ids, _ = self.resource_registry.find_objects(subject=data_product_id, predicate=PRED.hasDataProductParent, id_only=True)
if data_product_ids:
#.........这里部分代码省略.........
示例10: EOIRegistrationProcess
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import start [as 别名]
class EOIRegistrationProcess(SimpleProcess):
def on_start(self):
self.data_source_subscriber = EventSubscriber(event_type=OT.ResourceModifiedEvent,
origin_type=RT.DataSource,
callback=self._register_data_source)
self.provider_subscriber = EventSubscriber(event_type=OT.ResourceModifiedEvent,
origin_type=RT.ExternalDataProvider,
callback=self._register_provider)
self.data_source_subscriber.start()
self.provider_subscriber.start()
self.rr = self.container.resource_registry
self.using_eoi_services = CFG.get_safe('eoi.meta.use_eoi_services', False)
self.server = CFG.get_safe('eoi.importer_service.server', "localhost")+":"+str(CFG.get_safe('eoi.importer_service.port', 8844))
log.info("Using geoservices="+str(self.using_eoi_services))
if not self.using_eoi_services:
log.warn("not using geoservices...")
self.importer_service_available = self.check_for_importer_service()
if not self.importer_service_available:
log.warn("not using importer service...")
def check_for_importer_service(self):
'''
only gets run on start, used to identify if importer service is available
'''
try:
r = requests.get(self.server+'/service=alive&name=ooi&id=ooi')
log.info("importer service available, status code: %s", str(r.status_code))
#alive service returned ok
if r.status_code == 200:
return True
else:
return False
except Exception as e:
#SERVICE IS REALLY NOT AVAILABLE
log.warn("importer service is really not available...%s", e)
return False
def _register_data_source(self, event, *args, **kwargs):
'''
used to create a harvester
'''
if self.importer_service_available:
obj = self.rr.read(event.origin)
data_fields = []
for attrname, value in vars(obj).iteritems():
#generate th param list to pass to importer service using field names
if attrname is not "contact":
f = attrname.replace("_", "")+"="+str(obj[attrname])
data_fields.append(f)
param_list = '&'.join(data_fields)
request_string = self.server+'/service='+CREATE_HARVESTER+"&"+param_list
r = requests.get(request_string)
def _register_provider(self, event, *args, **kwargs):
if self.importer_service_available:
#print "provider id:", event.origin
pass
def on_quit(self):
self.data_source_subscriber.stop()
self.provider_subscriber.stop()
示例11: test_pub_on_different_subsubtypes
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import start [as 别名]
def test_pub_on_different_subsubtypes(self):
res_list = [DotDict(ar=event.AsyncResult(), gq=queue.Queue(), count=0) for i in xrange(4)]
def cb_gen(num):
def cb(event, *args, **kwargs):
res_list[num].count += 1
res_list[num].gq.put(event)
if event.description == "end":
res_list[num].ar.set()
return cb
sub0 = EventSubscriber(event_type="ResourceModifiedEvent", sub_type="st1.*", callback=cb_gen(0))
sub0.start()
sub1 = EventSubscriber(event_type="ResourceModifiedEvent", sub_type="st1.a", callback=cb_gen(1))
sub1.start()
sub2 = EventSubscriber(event_type="ResourceModifiedEvent", sub_type="*.a", callback=cb_gen(2))
sub2.start()
sub3 = EventSubscriber(event_type="ResourceModifiedEvent", sub_type="st1", callback=cb_gen(3))
sub3.start()
pub1 = EventPublisher(event_type="ResourceModifiedEvent")
pub1.publish_event(origin="one", sub_type="st1.a", description="1")
pub1.publish_event(origin="two", sub_type="st1", description="2")
pub1.publish_event(origin="three", sub_type="st1.b", description="3")
pub1.publish_event(origin="four", sub_type="st2.a", description="4")
pub1.publish_event(origin="five", sub_type="st2", description="5")
pub1.publish_event(origin="six", sub_type="a", description="6")
pub1.publish_event(origin="seven", sub_type="", description="7")
pub1.publish_event(origin="end", sub_type="st1.a", description="end")
pub1.publish_event(origin="end", sub_type="st1", description="end")
[res_list[i].ar.get(timeout=5) for i in xrange(3)]
sub0.stop()
sub1.stop()
sub2.stop()
sub3.stop()
for i in xrange(4):
res_list[i].res = []
for x in xrange(res_list[i].count):
res_list[i].res.append(res_list[i].gq.get(timeout=5))
self.assertEquals(len(res_list[0].res), 3)
self.assertEquals(res_list[0].res[0].description, "1")
self.assertEquals(len(res_list[1].res), 2)
self.assertEquals(res_list[1].res[0].description, "1")
self.assertEquals(len(res_list[2].res), 3)
self.assertEquals(res_list[2].res[0].description, "1")
self.assertEquals(len(res_list[3].res), 2)
self.assertEquals(res_list[3].res[0].description, "2")
示例12: ContainerManager
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import start [as 别名]
class ContainerManager(object):
def __init__(self, container, handlers=DEFAULT_HANDLERS):
self.container = container
self.running = False
# make sure start() completes before an event is handled,
# and any event is either handled before stop() begins,
# or the handler begins after stop() completes and the event is dropped
self.lock = Lock()
self.handlers = handlers[:]
def start(self):
# Install the container tracer (could be its own
self.container_tracer = ContainerTracer()
self.container_tracer.start_tracing()
self.container.tracer = CallTracer
self.container.tracer.configure(CFG.get_safe("container.tracer", {}))
## create queue listener and publisher
self.sender = EventPublisher(event_type="ContainerManagementResult")
self.receiver = EventSubscriber(event_type="ContainerManagementRequest", callback=self._receive_event)
with self.lock:
self.running = True
self.receiver.start()
log.info('ready for container management requests')
def stop(self):
log.debug('container management stopping')
with self.lock:
self.receiver.stop()
self.sender.close()
self.running = False
log.debug('container management stopped')
self.container_tracer.stop_tracing()
def add_handler(self, handler):
self.handlers.append(handler)
def _get_handlers(self, action):
out = []
for handler in self.handlers:
if handler.can_handle_request(action):
out.append(handler)
return out
def _receive_event(self, event, headers):
with self.lock:
if not isinstance(event, ContainerManagementRequest):
log.trace('ignoring wrong type event: %r', event)
return
if not self.running:
log.warn('ignoring admin message received after shutdown: %s', event.action)
return
predicate = ContainerSelector.from_object(event.predicate)
if predicate.should_handle(self.container):
log.trace('handling admin message: %s', event.action)
self._perform_action(event.action)
else:
log.trace('ignoring admin action: %s', event.action)
if SEND_RESULT_IF_NOT_SELECTED:
self.sender.publish_event(origin=self.container.id, action=event.action, outcome='not selected')
log.debug('received action: %s, outcome: not selected', event.action)
def _perform_action(self, action):
handlers = self._get_handlers(action)
if not handlers:
log.info('action accepted but no handlers found: %s', action)
result = 'unhandled'
self.sender.publish_event(origin=self.container.id, action=action, outcome=str(result))
log.debug('received action: %s, outcome: %s', action, result)
else:
for handler in handlers:
try:
result = handler.handle_request(action) or "completed"
except Exception,e:
log.error("handler %r failed to perform action: %s", handler, action, exc_info=True)
result = e
self.sender.publish_event(origin=self.container.id, action=action, outcome=str(result))
log.debug('performed action: %s, outcome: %s', action, result)
示例13: GovernanceController
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import start [as 别名]
class GovernanceController(object):
"""
This is a singleton object which handles governance functionality in the container.
"""
def __init__(self,container):
log.debug('GovernanceController.__init__()')
self.container = container
self.enabled = False
self.interceptor_by_name_dict = dict()
self.interceptor_order = []
self.policy_decision_point_manager = None
self.governance_dispatcher = None
# Holds a list per service operation of policy methods to check before the op in a process is allowed to be called
self._service_op_preconditions = dict()
self._is_container_org_boundary = False
self._container_org_name = None
self._container_org_id = None
# For policy debugging purposes. Keeps a list of most recent policy updates for later readout
self._policy_update_log = []
self._policy_snapshot = None
def start(self):
log.debug("GovernanceController starting ...")
self._CFG = CFG
self.enabled = CFG.get_safe('interceptor.interceptors.governance.config.enabled', False)
log.info("GovernanceInterceptor enabled: %s" % str(self.enabled))
self.policy_event_subscriber = None
#containers default to not Org Boundary and ION Root Org
self._is_container_org_boundary = CFG.get_safe('container.org_boundary',False)
self._container_org_name = CFG.get_safe('container.org_name', CFG.get_safe('system.root_org', 'ION'))
self._container_org_id = None
self._system_root_org_name = CFG.get_safe('system.root_org', 'ION')
self._is_root_org_container = (self._container_org_name == self._system_root_org_name)
self.system_actor_id = None
self.system_actor_user_header = None
if self.enabled:
config = CFG.get_safe('interceptor.interceptors.governance.config')
self.initialize_from_config(config)
self.policy_event_subscriber = EventSubscriber(event_type=OT.PolicyEvent, callback=self.policy_event_callback)
self.policy_event_subscriber.start()
self.rr_client = ResourceRegistryServiceProcessClient(node=self.container.node, process=self.container)
self.policy_client = PolicyManagementServiceProcessClient(node=self.container.node, process=self.container)
self._policy_snapshot = self._get_policy_snapshot()
self._log_policy_update("start_governance_ctrl", message="Container start")
def initialize_from_config(self, config):
self.governance_dispatcher = GovernanceDispatcher()
self.policy_decision_point_manager = PolicyDecisionPointManager(self)
if 'interceptor_order' in config:
self.interceptor_order = config['interceptor_order']
if 'governance_interceptors' in config:
gov_ints = config['governance_interceptors']
for name in gov_ints:
interceptor_def = gov_ints[name]
# Instantiate and put in by_name array
parts = interceptor_def["class"].split('.')
modpath = ".".join(parts[:-1])
classname = parts[-1]
module = __import__(modpath, fromlist=[classname])
classobj = getattr(module, classname)
classinst = classobj()
# Put in by_name_dict for possible re-use
self.interceptor_by_name_dict[name] = classinst
def stop(self):
log.debug("GovernanceController stopping ...")
if self.policy_event_subscriber is not None:
self.policy_event_subscriber.stop()
@property
def is_container_org_boundary(self):
return self._is_container_org_boundary
#.........这里部分代码省略.........
示例14: EventPersister
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import start [as 别名]
class EventPersister(StandaloneProcess):
def on_init(self):
# Time in between event persists
self.persist_interval = float(self.CFG.get_safe("process.event_persister.persist_interval", 1.0))
self.persist_blacklist = self.CFG.get_safe("process.event_persister.persist_blacklist", {})
self._event_type_blacklist = [entry['event_type'] for entry in self.persist_blacklist if entry.get('event_type', None) and len(entry) == 1]
self._complex_blacklist = [entry for entry in self.persist_blacklist if not (entry.get('event_type', None) and len(entry) == 1)]
if self._complex_blacklist:
log.warn("EventPersister does not yet support complex blacklist expressions: %s", self._complex_blacklist)
# Holds received events FIFO in syncronized queue
self.event_queue = Queue()
# Temporarily holds list of events to persist while datastore operation are not yet completed
# This is where events to persist will remain if datastore operation fails occasionally.
self.events_to_persist = None
# Number of unsuccessful attempts to persist in a row
self.failure_count = 0
# bookkeeping for greenlet
self._persist_greenlet = None
self._terminate_persist = Event() # when set, exits the persister greenlet
# The event subscriber
self.event_sub = None
# Registered event process plugins
self.process_plugins = {}
for plugin_name, plugin_cls, plugin_args in PROCESS_PLUGINS:
try:
plugin = named_any(plugin_cls)(**plugin_args)
self.process_plugins[plugin_name]= plugin
log.info("Loaded event processing plugin %s (%s)", plugin_name, plugin_cls)
except Exception as ex:
log.error("Cannot instantiate event processing plugin %s (%s): %s", plugin_name, plugin_cls, ex)
def on_start(self):
# Persister thread
self._persist_greenlet = spawn(self._persister_loop, self.persist_interval)
log.debug('EventPersister persist greenlet started in "%s" (interval %s)', self.__class__.__name__, self.persist_interval)
# Event subscription
self.event_sub = EventSubscriber(pattern=EventSubscriber.ALL_EVENTS,
callback=self._on_event,
queue_name="event_persister")
self.event_sub.start()
def on_quit(self):
# Stop event subscriber
self.event_sub.stop()
# tell the trigger greenlet we're done
self._terminate_persist.set()
# wait on the greenlets to finish cleanly
self._persist_greenlet.join(timeout=5)
def _on_event(self, event, *args, **kwargs):
self.event_queue.put(event)
def _in_blacklist(self, event):
if event.type_ in self._event_type_blacklist:
return True
if event.base_types:
for base_type in event.base_types:
if base_type in self._event_type_blacklist:
return True
# TODO: Complex event blacklist
return False
def _persister_loop(self, persist_interval):
log.debug('Starting event persister thread with persist_interval=%s', persist_interval)
# Event.wait returns False on timeout (and True when set in on_quit), so we use this to both exit cleanly and do our timeout in a loop
while not self._terminate_persist.wait(timeout=persist_interval):
try:
# leftover events_to_persist indicate previous attempt did not succeed
if self.events_to_persist and self.failure_count > 2:
bad_events = []
log.warn("Attempting to persist %s events individually" % (len(self.events_to_persist)))
for event in self.events_to_persist:
try:
self.container.event_repository.put_event(event)
except Exception:
bad_events.append(event)
if len(self.events_to_persist) != len(bad_events):
log.warn("Succeeded to persist some of the events - rest must be bad")
self._log_events(bad_events)
elif bad_events:
log.error("Discarding %s events after %s attempts!!" % (len(bad_events), self.failure_count))
self._log_events(bad_events)
self.events_to_persist = None
#.........这里部分代码省略.........
示例15: GovernanceController
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import start [as 别名]
class GovernanceController(object):
"""
This is a singleton object which handles governance functionality in the container.
"""
def __init__(self,container):
log.debug('GovernanceController.__init__()')
self.container = container
self.enabled = False
self.interceptor_by_name_dict = dict()
self.interceptor_order = []
self.policy_decision_point_manager = None
self.governance_dispatcher = None
# Holds a list per service operation of policy methods to check before the op in a process is allowed to be called
self._service_op_preconditions = dict()
self._is_container_org_boundary = False
self._container_org_name = None
self._container_org_id = None
def start(self):
log.debug("GovernanceController starting ...")
self._CFG = CFG
self.enabled = CFG.get_safe('interceptor.interceptors.governance.config.enabled', False)
log.info("GovernanceInterceptor enabled: %s" % str(self.enabled))
self.policy_event_subscriber = None
#containers default to not Org Boundary and ION Root Org
self._is_container_org_boundary = CFG.get_safe('container.org_boundary',False)
self._container_org_name = CFG.get_safe('container.org_name', CFG.get_safe('system.root_org', 'ION'))
self._container_org_id = None
self._system_root_org_name = CFG.get_safe('system.root_org', 'ION')
self._is_root_org_container = (self._container_org_name == self._system_root_org_name)
self.system_actor_id = None
self.system_actor_user_header = None
if self.enabled:
config = CFG.get_safe('interceptor.interceptors.governance.config')
self.initialize_from_config(config)
self.policy_event_subscriber = EventSubscriber(event_type=OT.PolicyEvent, callback=self.policy_event_callback)
self.policy_event_subscriber.start()
self.rr_client = ResourceRegistryServiceProcessClient(node=self.container.node, process=self.container)
self.policy_client = PolicyManagementServiceProcessClient(node=self.container.node, process=self.container)
def initialize_from_config(self, config):
self.governance_dispatcher = GovernanceDispatcher()
self.policy_decision_point_manager = PolicyDecisionPointManager(self)
if 'interceptor_order' in config:
self.interceptor_order = config['interceptor_order']
if 'governance_interceptors' in config:
gov_ints = config['governance_interceptors']
for name in gov_ints:
interceptor_def = gov_ints[name]
# Instantiate and put in by_name array
parts = interceptor_def["class"].split('.')
modpath = ".".join(parts[:-1])
classname = parts[-1]
module = __import__(modpath, fromlist=[classname])
classobj = getattr(module, classname)
classinst = classobj()
# Put in by_name_dict for possible re-use
self.interceptor_by_name_dict[name] = classinst
def stop(self):
log.debug("GovernanceController stopping ...")
if self.policy_event_subscriber is not None:
self.policy_event_subscriber.stop()
@property
def is_container_org_boundary(self):
return self._is_container_org_boundary
@property
def container_org_name(self):
return self._container_org_name
@property
def system_root_org_name(self):
return self._system_root_org_name
#.........这里部分代码省略.........