本文整理汇总了Python中pyon.ion.event.EventSubscriber.stop方法的典型用法代码示例。如果您正苦于以下问题:Python EventSubscriber.stop方法的具体用法?Python EventSubscriber.stop怎么用?Python EventSubscriber.stop使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyon.ion.event.EventSubscriber
的用法示例。
在下文中一共展示了EventSubscriber.stop方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_pub_on_different_subtypes
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import stop [as 别名]
def test_pub_on_different_subtypes(self):
ar = event.AsyncResult()
gq = queue.Queue()
self.count = 0
def cb(event, *args, **kwargs):
self.count += 1
gq.put(event)
if event.description == "end":
ar.set()
sub = EventSubscriber(event_type="ResourceModifiedEvent", sub_type="st1", callback=cb)
sub.start()
pub1 = EventPublisher(event_type="ResourceModifiedEvent")
pub2 = EventPublisher(event_type="ContainerLifecycleEvent")
pub1.publish_event(origin="two", sub_type="st2", description="2")
pub2.publish_event(origin="three", sub_type="st1", description="3")
pub1.publish_event(origin="one", sub_type="st1", description="1")
pub1.publish_event(origin="four", sub_type="st1", description="end")
ar.get(timeout=5)
sub.stop()
res = []
for x in xrange(self.count):
res.append(gq.get(timeout=5))
self.assertEquals(len(res), 2)
self.assertEquals(res[0].description, "1")
示例2: QCProcessor
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import stop [as 别名]
class QCProcessor(SimpleProcess):
def __init__(self):
self.event = Event() # Synchronizes the thread
self.timeout = 10
def on_start(self):
'''
Process initialization
'''
self._thread = self._process.thread_manager.spawn(self.thread_loop)
self._event_subscriber = EventSubscriber(event_type=OT.ResetQCEvent, callback=self.receive_event, auto_delete=True) # TODO Correct event types
self._event_subscriber.start()
self.timeout = self.CFG.get_safe('endpoint.receive.timeout', 10)
self.resource_registry = self.container.resource_registry
self.event_queue = Queue()
def on_quit(self):
'''
Stop and cleanup the thread
'''
self._event_subscriber.stop()
self.suspend()
def receive_event(self, event, *args, **kwargs):
log.error("Adding event to the event queue")
self.event_queue.put(event)
def thread_loop(self):
'''
Asynchronous event-loop
'''
threading.current_thread().name = '%s-qc-processor' % self.id
while not self.event.wait(1):
try:
self.qc_processing_loop()
except:
log.error("Error in QC Processing Loop", exc_info=True)
try:
self.event_processing_loop()
except:
log.error("Error in QC Event Loop", exc_info=True)
def qc_processing_loop(self):
'''
Iterates through available data products and evaluates QC
'''
data_products, _ = self.container.resource_registry.find_resources(restype=RT.DataProduct, id_only=False)
for data_product in data_products:
# Get the reference designator
try:
rd = self.get_reference_designator(data_product._id)
except BadRequest:
continue
parameters = self.get_parameters(data_product)
# Create a mapping of inputs to QC
qc_mapping = {}
# Creates a dictionary { data_product_name : parameter_name }
for p in parameters:
if p.ooi_short_name:
sname = p.ooi_short_name
g = re.match(r'([a-zA-Z-_]+)(_L[0-9])', sname)
if g:
sname = g.groups()[0]
qc_mapping[sname] = p.name
for p in parameters:
# for each parameter, if the name ends in _qc run the qc
if p.name.endswith('_qc'):
self.run_qc(data_product,rd, p, qc_mapping)
# Break early if we can
if self.event.is_set():
break
def event_processing_loop(self):
'''
Processes the events in the event queue
'''
log.error("Processing event queue")
self.event_queue.put(StopIteration)
for event in self.event_queue:
log.error("My event's reference designator: %s", event.origin)
def suspend(self):
'''
Stops the event loop
'''
self.event.set()
self._thread.join(self.timeout)
log.info("QC Thread Suspended")
def get_reference_designator(self, data_product_id=''):
'''
Returns the reference designator for a data product if it has one
'''
# First try to get the parent data product
data_product_ids, _ = self.resource_registry.find_objects(subject=data_product_id, predicate=PRED.hasDataProductParent, id_only=True)
if data_product_ids:
#.........这里部分代码省略.........
示例3: ContainerManager
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import stop [as 别名]
class ContainerManager(object):
def __init__(self, container, handlers=DEFAULT_HANDLERS):
self.container = container
self.running = False
# make sure start() completes before an event is handled,
# and any event is either handled before stop() begins,
# or the handler begins after stop() completes and the event is dropped
self.lock = Lock()
self.handlers = handlers[:]
def start(self):
# Install the container tracer (could be its own
self.container_tracer = ContainerTracer()
self.container_tracer.start_tracing()
self.container.tracer = CallTracer
self.container.tracer.configure(CFG.get_safe("container.tracer", {}))
## create queue listener and publisher
self.sender = EventPublisher(event_type="ContainerManagementResult")
self.receiver = EventSubscriber(event_type="ContainerManagementRequest", callback=self._receive_event)
with self.lock:
self.running = True
self.receiver.start()
log.info('ready for container management requests')
def stop(self):
log.debug('container management stopping')
with self.lock:
self.receiver.stop()
self.sender.close()
self.running = False
log.debug('container management stopped')
self.container_tracer.stop_tracing()
def add_handler(self, handler):
self.handlers.append(handler)
def _get_handlers(self, action):
out = []
for handler in self.handlers:
if handler.can_handle_request(action):
out.append(handler)
return out
def _receive_event(self, event, headers):
with self.lock:
if not isinstance(event, ContainerManagementRequest):
log.trace('ignoring wrong type event: %r', event)
return
if not self.running:
log.warn('ignoring admin message received after shutdown: %s', event.action)
return
predicate = ContainerSelector.from_object(event.predicate)
if predicate.should_handle(self.container):
log.trace('handling admin message: %s', event.action)
self._perform_action(event.action)
else:
log.trace('ignoring admin action: %s', event.action)
if SEND_RESULT_IF_NOT_SELECTED:
self.sender.publish_event(origin=self.container.id, action=event.action, outcome='not selected')
log.debug('received action: %s, outcome: not selected', event.action)
def _perform_action(self, action):
handlers = self._get_handlers(action)
if not handlers:
log.info('action accepted but no handlers found: %s', action)
result = 'unhandled'
self.sender.publish_event(origin=self.container.id, action=action, outcome=str(result))
log.debug('received action: %s, outcome: %s', action, result)
else:
for handler in handlers:
try:
result = handler.handle_request(action) or "completed"
except Exception,e:
log.error("handler %r failed to perform action: %s", handler, action, exc_info=True)
result = e
self.sender.publish_event(origin=self.container.id, action=action, outcome=str(result))
log.debug('performed action: %s, outcome: %s', action, result)
示例4: EventPersister
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import stop [as 别名]
class EventPersister(StandaloneProcess):
def on_init(self):
# Time in between event persists
self.persist_interval = float(self.CFG.get_safe("process.event_persister.persist_interval", 1.0))
self.persist_blacklist = self.CFG.get_safe("process.event_persister.persist_blacklist", {})
self._event_type_blacklist = [entry['event_type'] for entry in self.persist_blacklist if entry.get('event_type', None) and len(entry) == 1]
self._complex_blacklist = [entry for entry in self.persist_blacklist if not (entry.get('event_type', None) and len(entry) == 1)]
if self._complex_blacklist:
log.warn("EventPersister does not yet support complex blacklist expressions: %s", self._complex_blacklist)
# Holds received events FIFO in syncronized queue
self.event_queue = Queue()
# Temporarily holds list of events to persist while datastore operation are not yet completed
# This is where events to persist will remain if datastore operation fails occasionally.
self.events_to_persist = None
# Number of unsuccessful attempts to persist in a row
self.failure_count = 0
# bookkeeping for greenlet
self._persist_greenlet = None
self._terminate_persist = Event() # when set, exits the persister greenlet
# The event subscriber
self.event_sub = None
# Registered event process plugins
self.process_plugins = {}
for plugin_name, plugin_cls, plugin_args in PROCESS_PLUGINS:
try:
plugin = named_any(plugin_cls)(**plugin_args)
self.process_plugins[plugin_name]= plugin
log.info("Loaded event processing plugin %s (%s)", plugin_name, plugin_cls)
except Exception as ex:
log.error("Cannot instantiate event processing plugin %s (%s): %s", plugin_name, plugin_cls, ex)
def on_start(self):
# Persister thread
self._persist_greenlet = spawn(self._persister_loop, self.persist_interval)
log.debug('EventPersister persist greenlet started in "%s" (interval %s)', self.__class__.__name__, self.persist_interval)
# Event subscription
self.event_sub = EventSubscriber(pattern=EventSubscriber.ALL_EVENTS,
callback=self._on_event,
queue_name="event_persister")
self.event_sub.start()
def on_quit(self):
# Stop event subscriber
self.event_sub.stop()
# tell the trigger greenlet we're done
self._terminate_persist.set()
# wait on the greenlets to finish cleanly
self._persist_greenlet.join(timeout=5)
def _on_event(self, event, *args, **kwargs):
self.event_queue.put(event)
def _in_blacklist(self, event):
if event.type_ in self._event_type_blacklist:
return True
if event.base_types:
for base_type in event.base_types:
if base_type in self._event_type_blacklist:
return True
# TODO: Complex event blacklist
return False
def _persister_loop(self, persist_interval):
log.debug('Starting event persister thread with persist_interval=%s', persist_interval)
# Event.wait returns False on timeout (and True when set in on_quit), so we use this to both exit cleanly and do our timeout in a loop
while not self._terminate_persist.wait(timeout=persist_interval):
try:
# leftover events_to_persist indicate previous attempt did not succeed
if self.events_to_persist and self.failure_count > 2:
bad_events = []
log.warn("Attempting to persist %s events individually" % (len(self.events_to_persist)))
for event in self.events_to_persist:
try:
self.container.event_repository.put_event(event)
except Exception:
bad_events.append(event)
if len(self.events_to_persist) != len(bad_events):
log.warn("Succeeded to persist some of the events - rest must be bad")
self._log_events(bad_events)
elif bad_events:
log.error("Discarding %s events after %s attempts!!" % (len(bad_events), self.failure_count))
self._log_events(bad_events)
self.events_to_persist = None
#.........这里部分代码省略.........
示例5: GovernanceController
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import stop [as 别名]
class GovernanceController(object):
"""
This is a singleton object which handles governance functionality in the container.
"""
def __init__(self,container):
log.debug('GovernanceController.__init__()')
self.container = container
self.enabled = False
self.interceptor_by_name_dict = dict()
self.interceptor_order = []
self.policy_decision_point_manager = None
self.governance_dispatcher = None
# Holds a list per service operation of policy methods to check before the op in a process is allowed to be called
self._service_op_preconditions = dict()
self._is_container_org_boundary = False
self._container_org_name = None
self._container_org_id = None
def start(self):
log.debug("GovernanceController starting ...")
self._CFG = CFG
self.enabled = CFG.get_safe('interceptor.interceptors.governance.config.enabled', False)
log.info("GovernanceInterceptor enabled: %s" % str(self.enabled))
self.policy_event_subscriber = None
#containers default to not Org Boundary and ION Root Org
self._is_container_org_boundary = CFG.get_safe('container.org_boundary',False)
self._container_org_name = CFG.get_safe('container.org_name', CFG.get_safe('system.root_org', 'ION'))
self._container_org_id = None
self._system_root_org_name = CFG.get_safe('system.root_org', 'ION')
self._is_root_org_container = (self._container_org_name == self._system_root_org_name)
self.system_actor_id = None
self.system_actor_user_header = None
if self.enabled:
config = CFG.get_safe('interceptor.interceptors.governance.config')
self.initialize_from_config(config)
self.policy_event_subscriber = EventSubscriber(event_type=OT.PolicyEvent, callback=self.policy_event_callback)
self.policy_event_subscriber.start()
self.rr_client = ResourceRegistryServiceProcessClient(node=self.container.node, process=self.container)
self.policy_client = PolicyManagementServiceProcessClient(node=self.container.node, process=self.container)
def initialize_from_config(self, config):
self.governance_dispatcher = GovernanceDispatcher()
self.policy_decision_point_manager = PolicyDecisionPointManager(self)
if 'interceptor_order' in config:
self.interceptor_order = config['interceptor_order']
if 'governance_interceptors' in config:
gov_ints = config['governance_interceptors']
for name in gov_ints:
interceptor_def = gov_ints[name]
# Instantiate and put in by_name array
parts = interceptor_def["class"].split('.')
modpath = ".".join(parts[:-1])
classname = parts[-1]
module = __import__(modpath, fromlist=[classname])
classobj = getattr(module, classname)
classinst = classobj()
# Put in by_name_dict for possible re-use
self.interceptor_by_name_dict[name] = classinst
def stop(self):
log.debug("GovernanceController stopping ...")
if self.policy_event_subscriber is not None:
self.policy_event_subscriber.stop()
@property
def is_container_org_boundary(self):
return self._is_container_org_boundary
@property
def container_org_name(self):
return self._container_org_name
@property
def system_root_org_name(self):
return self._system_root_org_name
#.........这里部分代码省略.........
示例6: GovernanceController
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import stop [as 别名]
class GovernanceController(object):
"""
This is a singleton object which handles governance functionality in the container.
Registers event callback for PolicyEvent to update local policies on change.
"""
def __init__(self, container):
log.debug('GovernanceController.__init__()')
self.container = container
self.enabled = False
self.interceptor_by_name_dict = {}
self.interceptor_order = []
self.policy_decision_point_manager = None
self.governance_dispatcher = None
# Holds a list per service operation of policy methods to be called before operation is invoked
self._service_op_preconditions = {}
# Holds a list per process operation of policy methods to be called before operation is invoked
self._process_op_preconditions = {}
self._is_container_org_boundary = False
self._container_org_name = None
self._container_org_id = None
# For policy debugging purposes. Keeps a list of most recent policy updates for later readout
self._policy_update_log = []
self._policy_snapshot = None
def start(self):
log.debug("GovernanceController starting ...")
self._CFG = CFG
self.enabled = CFG.get_safe('interceptor.interceptors.governance.config.enabled', False)
if not self.enabled:
log.warn("GovernanceInterceptor disabled by configuration")
self.policy_event_subscriber = None
# Containers default to not Org Boundary and ION Root Org
self._is_container_org_boundary = CFG.get_safe('container.org_boundary', False)
self._container_org_name = CFG.get_safe('container.org_name', CFG.get_safe('system.root_org', 'ION'))
self._container_org_id = None
self._system_root_org_name = CFG.get_safe('system.root_org', 'ION')
self._is_root_org_container = (self._container_org_name == self._system_root_org_name)
self.system_actor_id = None
self.system_actor_user_header = None
self.rr_client = ResourceRegistryServiceProcessClient(process=self.container)
self.policy_client = PolicyManagementServiceProcessClient(process=self.container)
if self.enabled:
config = CFG.get_safe('interceptor.interceptors.governance.config')
self.initialize_from_config(config)
self.policy_event_subscriber = EventSubscriber(event_type=OT.PolicyEvent, callback=self.policy_event_callback)
self.policy_event_subscriber.start()
self._policy_snapshot = self._get_policy_snapshot()
self._log_policy_update("start_governance_ctrl", message="Container start")
def initialize_from_config(self, config):
self.governance_dispatcher = GovernanceDispatcher()
self.policy_decision_point_manager = PolicyDecisionPointManager(self)
self.interceptor_order = config.get('interceptor_order', None) or []
gov_ints = config.get('governance_interceptors', None) or {}
for name in gov_ints:
interceptor_def = gov_ints[name]
classobj = named_any(interceptor_def["class"])
classinst = classobj()
self.interceptor_by_name_dict[name] = classinst
def _ensure_system_actor(self):
"""Make sure we have a handle for the system actor"""
if self.system_actor_id is None:
system_actor = get_system_actor()
if system_actor is not None:
self.system_actor_id = system_actor._id
self.system_actor_user_header = get_system_actor_header(system_actor)
def stop(self):
log.debug("GovernanceController stopping ...")
if self.policy_event_subscriber is not None:
self.policy_event_subscriber.stop()
@property
def is_container_org_boundary(self):
return self._is_container_org_boundary
@property
def container_org_name(self):
return self._container_org_name
@property
def system_root_org_name(self):
return self._system_root_org_name
@property
#.........这里部分代码省略.........
示例7: EOIRegistrationProcess
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import stop [as 别名]
class EOIRegistrationProcess(SimpleProcess):
def on_start(self):
self.data_source_subscriber = EventSubscriber(event_type=OT.ResourceModifiedEvent,
origin_type=RT.DataSource,
callback=self._register_data_source)
self.provider_subscriber = EventSubscriber(event_type=OT.ResourceModifiedEvent,
origin_type=RT.ExternalDataProvider,
callback=self._register_provider)
self.data_source_subscriber.start()
self.provider_subscriber.start()
self.rr = self.container.resource_registry
self.using_eoi_services = CFG.get_safe('eoi.meta.use_eoi_services', False)
self.server = CFG.get_safe('eoi.importer_service.server', "localhost")+":"+str(CFG.get_safe('eoi.importer_service.port', 8844))
log.info("Using geoservices="+str(self.using_eoi_services))
if not self.using_eoi_services:
log.warn("not using geoservices...")
self.importer_service_available = self.check_for_importer_service()
if not self.importer_service_available:
log.warn("not using importer service...")
def check_for_importer_service(self):
'''
only gets run on start, used to identify if importer service is available
'''
try:
r = requests.get(self.server+'/service=alive&name=ooi&id=ooi')
log.info("importer service available, status code: %s", str(r.status_code))
#alive service returned ok
if r.status_code == 200:
return True
else:
return False
except Exception as e:
#SERVICE IS REALLY NOT AVAILABLE
log.warn("importer service is really not available...%s", e)
return False
def _register_data_source(self, event, *args, **kwargs):
'''
used to create a harvester
'''
if self.importer_service_available:
obj = self.rr.read(event.origin)
data_fields = []
for attrname, value in vars(obj).iteritems():
#generate th param list to pass to importer service using field names
if attrname is not "contact":
f = attrname.replace("_", "")+"="+str(obj[attrname])
data_fields.append(f)
param_list = '&'.join(data_fields)
request_string = self.server+'/service='+CREATE_HARVESTER+"&"+param_list
r = requests.get(request_string)
def _register_provider(self, event, *args, **kwargs):
if self.importer_service_available:
#print "provider id:", event.origin
pass
def on_quit(self):
self.data_source_subscriber.stop()
self.provider_subscriber.stop()
示例8: GovernanceController
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import stop [as 别名]
class GovernanceController(object):
"""
This is a singleton object which handles governance functionality in the container.
"""
def __init__(self,container):
log.debug('GovernanceController.__init__()')
self.container = container
self.enabled = False
self.interceptor_by_name_dict = dict()
self.interceptor_order = []
self.policy_decision_point_manager = None
self.governance_dispatcher = None
# Holds a list per service operation of policy methods to check before the op in a process is allowed to be called
self._service_op_preconditions = dict()
self._is_container_org_boundary = False
self._container_org_name = None
self._container_org_id = None
# For policy debugging purposes. Keeps a list of most recent policy updates for later readout
self._policy_update_log = []
self._policy_snapshot = None
def start(self):
log.debug("GovernanceController starting ...")
self._CFG = CFG
self.enabled = CFG.get_safe('interceptor.interceptors.governance.config.enabled', False)
log.info("GovernanceInterceptor enabled: %s" % str(self.enabled))
self.policy_event_subscriber = None
#containers default to not Org Boundary and ION Root Org
self._is_container_org_boundary = CFG.get_safe('container.org_boundary',False)
self._container_org_name = CFG.get_safe('container.org_name', CFG.get_safe('system.root_org', 'ION'))
self._container_org_id = None
self._system_root_org_name = CFG.get_safe('system.root_org', 'ION')
self._is_root_org_container = (self._container_org_name == self._system_root_org_name)
self.system_actor_id = None
self.system_actor_user_header = None
if self.enabled:
config = CFG.get_safe('interceptor.interceptors.governance.config')
self.initialize_from_config(config)
self.policy_event_subscriber = EventSubscriber(event_type=OT.PolicyEvent, callback=self.policy_event_callback)
self.policy_event_subscriber.start()
self.rr_client = ResourceRegistryServiceProcessClient(node=self.container.node, process=self.container)
self.policy_client = PolicyManagementServiceProcessClient(node=self.container.node, process=self.container)
self._policy_snapshot = self._get_policy_snapshot()
self._log_policy_update("start_governance_ctrl", message="Container start")
def initialize_from_config(self, config):
self.governance_dispatcher = GovernanceDispatcher()
self.policy_decision_point_manager = PolicyDecisionPointManager(self)
if 'interceptor_order' in config:
self.interceptor_order = config['interceptor_order']
if 'governance_interceptors' in config:
gov_ints = config['governance_interceptors']
for name in gov_ints:
interceptor_def = gov_ints[name]
# Instantiate and put in by_name array
parts = interceptor_def["class"].split('.')
modpath = ".".join(parts[:-1])
classname = parts[-1]
module = __import__(modpath, fromlist=[classname])
classobj = getattr(module, classname)
classinst = classobj()
# Put in by_name_dict for possible re-use
self.interceptor_by_name_dict[name] = classinst
def stop(self):
log.debug("GovernanceController stopping ...")
if self.policy_event_subscriber is not None:
self.policy_event_subscriber.stop()
@property
def is_container_org_boundary(self):
return self._is_container_org_boundary
#.........这里部分代码省略.........
示例9: test_pub_on_different_subsubtypes
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import stop [as 别名]
def test_pub_on_different_subsubtypes(self):
res_list = [DotDict(ar=event.AsyncResult(), gq=queue.Queue(), count=0) for i in xrange(4)]
def cb_gen(num):
def cb(event, *args, **kwargs):
res_list[num].count += 1
res_list[num].gq.put(event)
if event.description == "end":
res_list[num].ar.set()
return cb
sub0 = EventSubscriber(event_type="ResourceModifiedEvent", sub_type="st1.*", callback=cb_gen(0))
sub0.start()
sub1 = EventSubscriber(event_type="ResourceModifiedEvent", sub_type="st1.a", callback=cb_gen(1))
sub1.start()
sub2 = EventSubscriber(event_type="ResourceModifiedEvent", sub_type="*.a", callback=cb_gen(2))
sub2.start()
sub3 = EventSubscriber(event_type="ResourceModifiedEvent", sub_type="st1", callback=cb_gen(3))
sub3.start()
pub1 = EventPublisher(event_type="ResourceModifiedEvent")
pub1.publish_event(origin="one", sub_type="st1.a", description="1")
pub1.publish_event(origin="two", sub_type="st1", description="2")
pub1.publish_event(origin="three", sub_type="st1.b", description="3")
pub1.publish_event(origin="four", sub_type="st2.a", description="4")
pub1.publish_event(origin="five", sub_type="st2", description="5")
pub1.publish_event(origin="six", sub_type="a", description="6")
pub1.publish_event(origin="seven", sub_type="", description="7")
pub1.publish_event(origin="end", sub_type="st1.a", description="end")
pub1.publish_event(origin="end", sub_type="st1", description="end")
[res_list[i].ar.get(timeout=5) for i in xrange(3)]
sub0.stop()
sub1.stop()
sub2.stop()
sub3.stop()
for i in xrange(4):
res_list[i].res = []
for x in xrange(res_list[i].count):
res_list[i].res.append(res_list[i].gq.get(timeout=5))
self.assertEquals(len(res_list[0].res), 3)
self.assertEquals(res_list[0].res[0].description, "1")
self.assertEquals(len(res_list[1].res), 2)
self.assertEquals(res_list[1].res[0].description, "1")
self.assertEquals(len(res_list[2].res), 3)
self.assertEquals(res_list[2].res[0].description, "1")
self.assertEquals(len(res_list[3].res), 2)
self.assertEquals(res_list[3].res[0].description, "2")
示例10: TransformPrime
# 需要导入模块: from pyon.ion.event import EventSubscriber [as 别名]
# 或者: from pyon.ion.event.EventSubscriber import stop [as 别名]
class TransformPrime(TransformDataProcess):
binding=['output']
'''
Transforms which have an incoming stream and an outgoing stream.
Parameters:
process.stream_id Outgoing stream identifier.
process.exchange_point Route's exchange point.
process.routing_key Route's routing key.
process.queue_name Name of the queue to listen on.
process.routes streams,actor for each route {(stream_input_id, stream_output_id):actor}
Either the stream_id or both the exchange_point and routing_key need to be provided.
'''
def on_start(self):
TransformDataProcess.on_start(self)
self.pubsub_management = PubsubManagementServiceProcessClient(process=self)
self.stored_values = StoredValueManager(self.container)
self.input_data_product_ids = self.CFG.get_safe('process.input_products', [])
self.output_data_product_ids = self.CFG.get_safe('process.output_products', [])
self.lookup_docs = self.CFG.get_safe('process.lookup_docs',[])
self.new_lookups = Queue()
self.lookup_monitor = EventSubscriber(event_type=OT.ExternalReferencesUpdatedEvent,callback=self._add_lookups, auto_delete=True)
self.lookup_monitor.start()
def on_quit(self):
self.lookup_monitor.stop()
TransformDataProcess.on_quit(self)
def _add_lookups(self, event, *args, **kwargs):
if event.origin in self.input_data_product_ids + self.output_data_product_ids:
if isinstance(event.reference_keys, list):
self.new_lookups.put(event.reference_keys)
@memoize_lru(100)
def read_stream_def(self,stream_id):
return self.pubsub_management.read_stream_definition(stream_id=stream_id)
def recv_packet(self, msg, stream_route, stream_id):
process_routes = self.CFG.get_safe('process.routes', {})
for stream_in_id,routes in process_routes.iteritems():
if stream_id == stream_in_id:
for stream_out_id, actor in routes.iteritems():
if actor is None:
rdt_out = self._execute_transform(msg, (stream_in_id, stream_out_id))
self.publish(rdt_out.to_granule(), stream_out_id)
else:
outgoing = self._execute_actor(msg, actor, (stream_in_id, stream_out_id))
self.publish(outgoing, stream_out_id)
def publish(self, msg, stream_out_id):
publisher = getattr(self, stream_out_id)
publisher.publish(msg)
def _load_actor(self, actor):
'''
Returns callable execute method if it exists, otherwise it raises a BadRequest
'''
try:
module = __import__(actor['module'], fromlist=[''])
except ImportError:
log.exception('Actor could not be loaded')
raise
try:
cls = getattr(module, actor['class'])
except AttributeError:
log.exception('Module %s does not have class %s', repr(module), actor['class'])
raise
try:
execute = getattr(cls,'execute')
except AttributeError:
log.exception('Actor class does not contain execute method')
raise
return execute
def _execute_actor(self, msg, actor, streams):
stream_in_id,stream_out_id = streams
stream_def_out = self.read_stream_def(stream_out_id)
params = self.CFG.get_safe('process.params', {})
config = self.CFG.get_safe('process')
#do the stuff with the actor
params['stream_def'] = stream_def_out._id
executor = self._load_actor(actor)
try:
rdt_out = executor(msg, None, config, params, None)
except:
log.exception('Error running actor for %s', self.id)
raise
return rdt_out
def _merge_pdicts(self, pdict1, pdict2):
incoming_pdict = ParameterDictionary.load(pdict1)
outgoing_pdict = ParameterDictionary.load(pdict2)
merged_pdict = ParameterDictionary()
for k,v in incoming_pdict.iteritems():
ordinal, v = v
if k not in merged_pdict:
#.........这里部分代码省略.........