本文整理汇总了Python中pyon.event.event.EventPublisher.close方法的典型用法代码示例。如果您正苦于以下问题:Python EventPublisher.close方法的具体用法?Python EventPublisher.close怎么用?Python EventPublisher.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyon.event.event.EventPublisher
的用法示例。
在下文中一共展示了EventPublisher.close方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: SystemManagementService
# 需要导入模块: from pyon.event.event import EventPublisher [as 别名]
# 或者: from pyon.event.event.EventPublisher import close [as 别名]
class SystemManagementService(BaseSystemManagementService):
""" container management requests are handled by the event listener
ion.processes.event.container_manager.ContainerManager
which must be running on each container.
"""
def on_start(self,*a,**b):
super(SystemManagementService,self).on_start(*a,**b)
self.sender = EventPublisher()
def on_quit(self,*a,**b):
self.sender.close()
def perform_action(self, predicate, action):
userid = None # get from context
self.sender.publish_event(event_type=OT.ContainerManagementRequest, origin=userid, predicate=predicate, action=action)
def set_log_level(self, logger='', level='', recursive=False):
self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.ChangeLogLevel, logger=logger, level=level, recursive=recursive))
def reset_policy_cache(self, headers=None, timeout=None):
"""Clears and reloads the policy caches in all of the containers.
@throws BadRequest None
"""
self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.ResetPolicyCache))
def trigger_garbage_collection(self):
"""Triggers a garbage collection in all containers
@throws BadRequest None
"""
self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.TriggerGarbageCollection))
示例2: SystemManagementService
# 需要导入模块: from pyon.event.event import EventPublisher [as 别名]
# 或者: from pyon.event.event.EventPublisher import close [as 别名]
class SystemManagementService(BaseSystemManagementService):
""" container management requests are handled by the event listener
ion.processes.event.container_manager.ContainerManager
which must be running on each container.
"""
def on_start(self,*a,**b):
super(SystemManagementService,self).on_start(*a,**b)
self.sender = EventPublisher()
def on_quit(self,*a,**b):
self.sender.close()
def perform_action(self, predicate, action):
userid = None # get from context
self.sender.publish_event(event_type=OT.ContainerManagementRequest, origin=userid, predicate=predicate, action=action)
def set_log_level(self, logger='', level='', recursive=False):
self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.ChangeLogLevel, logger=logger, level=level, recursive=recursive))
def reset_policy_cache(self, headers=None, timeout=None):
"""Clears and reloads the policy caches in all of the containers.
@throws BadRequest None
"""
self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.ResetPolicyCache))
def trigger_garbage_collection(self):
"""Triggers a garbage collection in all containers
@throws BadRequest None
"""
self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.TriggerGarbageCollection))
def trigger_container_snapshot(self, snapshot_id='', include_snapshots=None, exclude_snapshots=None,
take_at_time='', clear_all=False, persist_snapshot=True, snapshot_kwargs=None):
if not snapshot_id:
snapshot_id = get_ion_ts()
if not snapshot_kwargs:
snapshot_kwargs = {}
self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.TriggerContainerSnapshot,
snapshot_id=snapshot_id,
include_snapshots=include_snapshots,
exclude_snapshots=exclude_snapshots,
take_at_time=take_at_time,
clear_all=clear_all,
persist_snapshot=persist_snapshot,
snapshot_kwargs=snapshot_kwargs))
log.info("Event to trigger container snapshots sent. snapshot_id=%s" % snapshot_id)
def start_gevent_block(self, alarm_mode=False):
self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.StartGeventBlock, alarm_mode=alarm_mode))
def stop_gevent_block(self):
self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.StopGeventBlock))
def prepare_system_shutdown(self, mode=''):
self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.PrepareSystemShutdown, mode=mode))
示例3: TransformEventPublisher
# 需要导入模块: from pyon.event.event import EventPublisher [as 别名]
# 或者: from pyon.event.event.EventPublisher import close [as 别名]
class TransformEventPublisher(TransformEventProcess):
def on_start(self):
event_type = self.CFG.get_safe('process.event_type', '')
self.publisher = EventPublisher(event_type=event_type)
def publish_event(self, *args, **kwargs):
raise NotImplementedError('Method publish_event not implemented')
def on_quit(self):
self.publisher.close()
示例4: SystemManagementService
# 需要导入模块: from pyon.event.event import EventPublisher [as 别名]
# 或者: from pyon.event.event.EventPublisher import close [as 别名]
class SystemManagementService(BaseSystemManagementService):
""" container management requests are handled by the event listener
ion.processes.event.container_manager.ContainerManager
which must be running on each container.
"""
def on_start(self,*a,**b):
super(SystemManagementService,self).on_start(*a,**b)
self.sender = EventPublisher()
def on_quit(self,*a,**b):
self.sender.close()
def perform_action(self, predicate, action):
userid = None # get from context
self.sender.publish_event(event_type="ContainerManagementRequest", origin=userid, predicate=predicate, action=action)
def set_log_level(self, logger='', level='', recursive=False):
self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject('ChangeLogLevel', logger=logger, level=level, recursive=recursive))
示例5: SchedulerService
# 需要导入模块: from pyon.event.event import EventPublisher [as 别名]
# 或者: from pyon.event.event.EventPublisher import close [as 别名]
class SchedulerService(BaseSchedulerService):
def __init__(self, *args, **kwargs):
BaseSchedulerService.__init__(self, *args, **kwargs)
self.schedule_entries = {}
self._no_reschedule = False
def on_start(self):
if CFG.get_safe("process.start_mode") == "RESTART":
self.on_system_restart()
self.pub = EventPublisher(event_type="ResourceEvent")
def on_quit(self):
self.pub.close()
# throw killswitch on future reschedules
self._no_reschedule = True
# terminate any pending spawns
self._stop_pending_timers()
def __notify(self, task, id, index):
log.debug("SchedulerService:__notify: - " + task.event_origin + " - Time: " + str(self.__now()) + " - ID: " + id + " -Index:" + str(index))
self.pub.publish_event(origin=task.event_origin)
def __now(self):
return datetime.utcnow()
def __now_posix(self, now):
return time.mktime(now.timetuple())
def _expire_callback(self, id, index):
task = self.__get_entry(id)
self.__notify(task, id, index)
if not self.__reschedule(id, index):
self.__delete(id, index)
def __calculate_next_interval(self, task, current_time):
if task.start_time < current_time:
next_interval = task.start_time
while (next_interval < current_time):
next_interval = next_interval + task.interval
return (next_interval - current_time)
else:
return (task.start_time - current_time) + task.interval
def __get_expire_time(self, task):
now = self.__now()
now_posix = self.__now_posix(now)
expires_in = []
if type(task) == TimeOfDayTimer:
for time_of_day in task.times_of_day:
expire_time = datetime(now.year, now.month, now.day, time_of_day['hour'], time_of_day['minute'], time_of_day['second'])
expires_in.append(ceil((expire_time - now).total_seconds()))
elif type(task) == IntervalTimer and (task.end_time == -1 or ((now_posix + task.interval) <= task.end_time)):
expires_in = [(self.__calculate_next_interval(task, now_posix))]
return expires_in
def __get_reschedule_expire_time(self, task, index):
expires_in = False
now = self.__now()
now_posix = self.__now_posix(now)
if type(task) == TimeOfDayTimer:
if task.expires > now_posix:
time_of_day = task.times_of_day[index]
tomorrow = now + timedelta(days=1)
expire_time = datetime(tomorrow.year, tomorrow.month, tomorrow.day, time_of_day['hour'], time_of_day['minute'], time_of_day['second'])
expires_in = (ceil((expire_time - now).total_seconds()))
else:
expires_in = False
elif type(task) == IntervalTimer and (task.end_time == -1 or ((now_posix + task.interval) <= task.end_time)):
if task.start_time <= now_posix:
expires_in = (task.interval)
else:
expires_in = ((task.start_time - now_posix) + task.interval)
return expires_in
def __validate_expire_times(self, expire_times):
for index, expire_time in enumerate(expire_times):
if expire_time < 0:
return False
return True
def __schedule(self, scheduler_entry, id=False):
# if "id" is set, it means scheduler_entry is already in Resource Regsitry. This can occur during a sytsem restart
spawns = []
task = scheduler_entry.entry
expire_times = self.__get_expire_time(task)
if not self.__validate_expire_times(expire_times):
log.error("SchedulerService:__schedule: scheduling: expire time is less than zero: ")
return False
if not id:
id, _ = self.clients.resource_registry.create(scheduler_entry)
self.__create_entry(task, spawns, id)
for index, expire_time in enumerate(expire_times):
log.debug("SchedulerService:__schedule: scheduling: - " + task.event_origin + " - Now: " + str(self.__now()) +
" - Expire: " + str(expire_time) + " - ID: " + id + " - Index:" + str(index))
#.........这里部分代码省略.........
示例6: UploadCalibrationProcessing
# 需要导入模块: from pyon.event.event import EventPublisher [as 别名]
# 或者: from pyon.event.event.EventPublisher import close [as 别名]
class UploadCalibrationProcessing(ImmediateProcess):
"""
Upload Calibration Processing Process
This process provides the capability to ION clients and operators to process uploaded calibration
coefficients to calibrate data products.
This parameters that this process accepts as configurations are:
- fuc_id: The FileUploadContext identifier, required, stores where the file was written
"""
def on_start(self):
ImmediateProcess.on_start(self)
# necessary arguments, passed in via configuration kwarg to schedule_process. process namespace to avoid collisions
fuc_id = self.CFG.get_safe('process.fuc_id',None) # FileUploadContext ID
# Clients
self.object_store = self.container.object_store
self.resource_registry = self.container.resource_registry
self.event_publisher = EventPublisher(OT.ResetQCEvent)
self.data_product_management = DataProductManagementServiceProcessClient(process=self)
self.create_map()
# run process
if fuc_id:
self.process(fuc_id)
# cleanup
self.event_publisher.close()
def process(self,fuc_id):
# get the Object (dict) containing details of the uploaded file
fuc = self.object_store.read(fuc_id)
if fuc['filetype'] == 'ZIP':
raise BadRequest("ZIP format not determined by project scientists yet (2014-04-21)")
#self.process_zip(fuc)
else:
self.process_csv(fuc)
def create_map(self):
'''
Creates a map from property numbers to datasets
'''
self.property_map = {}
for instrument_device in self.resource_registry.find_resources(restype=RT.InstrumentDevice)[0]:
if instrument_device.ooi_property_number:
self.property_map[instrument_device.ooi_property_number] = self.data_products_for_device(instrument_device)
def data_products_for_device(self, device):
data_products, _ = self.resource_registry.find_objects(device, PRED.hasOutputProduct, id_only=True)
return data_products
def dataset_for_data_product(self, data_product):
datasets, _ = self.resource_registry.find_objects(data_product, PRED.hasDataset, id_only=True)
return datasets[0]
def do_something_with_the_update(self, updates):
for property_no, calibration_update in updates.iteritems():
# Check to see if we even have an instrument with this property number
if property_no not in self.property_map:
continue
# Get the data product listings for this instrument
data_products = self.property_map[property_no]
# Go through each data product and update the data IF
# - There is a set of parameters that match those in the calibration
for data_product in data_products:
self.update_data_product(data_product, calibration_update)
def update_data_product(self, data_product, calibration_update):
parameters = [p.name for p in self.data_product_management.get_data_product_parameters(data_product)]
dataset_updates = []
for cal_name in calibration_update.iterkeys():
if cal_name in parameters:
dataset_id = self.dataset_for_data_product(data_product)
dataset_updates.append(dataset_id)
for dataset in dataset_updates:
self.apply_to_dataset(dataset, calibration_update)
def apply_to_dataset(self, dataset, calibration_update):
cov = DatasetManagementService._get_coverage(dataset, mode='r+')
try:
self.set_sparse_values(cov, calibration_update)
self.publish_calibration_event(dataset, calibration_update.keys())
finally:
cov.close()
def set_sparse_values(self, cov, calibration_update):
#.........这里部分代码省略.........
示例7: TransformWorker
# 需要导入模块: from pyon.event.event import EventPublisher [as 别名]
# 或者: from pyon.event.event.EventPublisher import close [as 别名]
class TransformWorker(TransformStreamListener):
CACHE_LIMIT=CFG.get_safe('container.ingestion_cache',5)
# Status publishes after a set of granules has been processed
STATUS_INTERVAL = 100
def __init__(self, *args,**kwargs):
super(TransformWorker, self).__init__(*args, **kwargs)
# the set of data processes hosted by this worker
self._dataprocesses = {}
self._streamid_map = {}
self._publisher_map = {}
self._transforms = {}
def on_start(self): #pragma no cover
#super(TransformWorker,self).on_start()
#--------------------------------------------------------------------------------
# Explicit on_start
#--------------------------------------------------------------------------------
# Skip TransformStreamListener and go to StreamProcess to avoid the subscriber being created
# We want explicit management of the thread and subscriber object for ingestion
#todo: check how to manage multi queue subscription (transform scenario 3)
TransformStreamProcess.on_start(self)
#todo: can the subscription be changed or updated when new dataprocesses are added ?
self.queue_name = self.CFG.get_safe('process.queue_name',self.id)
self.subscriber = StreamSubscriber(process=self, exchange_name=self.queue_name, callback=self.receive_callback)
self.thread_lock = RLock()
self._rpc_server = self.container.proc_manager._create_listening_endpoint(from_name=self.id, process=self)
self.add_endpoint(self._rpc_server)
self.start_listener()
#todo: determine and publish appropriate set of status events
self.event_publisher = EventPublisher(OT.DataProcessStatusEvent)
url = 'http://sddevrepo.oceanobservatories.org/releases/ion_example-0.1-py2.7.egg'
filepath = self.download_egg(url)
print filepath
import pkg_resources
pkg_resources.working_set.add_entry('ion_example-0.1-py2.7.egg')
from ion_example.add_arrays import add_arrays
def on_quit(self): #pragma no cover
self.event_publisher.close()
if self.subscriber_thread:
self.stop_listener()
super(TransformWorker, self).on_quit()
def start_listener(self):
# We use a lock here to prevent possible race conditions from starting multiple listeners and coverage clobbering
with self.thread_lock:
self.subscriber_thread = self._process.thread_manager.spawn(self.subscriber.listen, thread_name='%s-subscriber' % self.id)
def stop_listener(self):
# Avoid race conditions with coverage operations (Don't start a listener at the same time as closing one)
with self.thread_lock:
self.subscriber.close()
self.subscriber_thread.join(timeout=10)
self.subscriber_thread = None
@handle_stream_exception()
def recv_packet(self, msg, stream_route, stream_id):
''' receive packet for ingestion '''
log.debug('received granule for stream %s', stream_id)
if msg == {}:
log.error('Received empty message from stream: %s', stream_id)
return
# Message validation
if not isinstance(msg, Granule):
log.error('Ingestion received a message that is not a granule: %s', msg)
return
rdt = RecordDictionaryTool.load_from_granule(msg)
if rdt is None:
log.error('Invalid granule (no RDT) for stream %s', stream_id)
return
if not len(rdt):
log.debug('Empty granule for stream %s', stream_id)
return
dp_id_list = self.retrieve_dataprocess_for_stream(stream_id)
for dp_id in dp_id_list:
function, argument_list = self.retrieve_function_and_define_args(dp_id)
#.........这里部分代码省略.........
示例8: UploadCalibrationProcessing
# 需要导入模块: from pyon.event.event import EventPublisher [as 别名]
# 或者: from pyon.event.event.EventPublisher import close [as 别名]
class UploadCalibrationProcessing(ImmediateProcess):
"""
Upload Calibration Processing Process
This process provides the capability to ION clients and operators to process uploaded calibration
coefficients to calibrate data products.
This parameters that this process accepts as configurations are:
- fuc_id: The FileUploadContext identifier, required, stores where the file was written
"""
def on_start(self):
ImmediateProcess.on_start(self)
# necessary arguments, passed in via configuration kwarg to schedule_process. process namespace to avoid collisions
fuc_id = self.CFG.get_safe('process.fuc_id',None) # FileUploadContext ID
# Clients
self.object_store = self.container.object_store
self.event_publisher = EventPublisher(OT.ResetQCEvent)
# run process
self.process(fuc_id)
# cleanup
self.event_publisher.close()
def process(self,fuc_id):
# get the Object (dict) containing details of the uploaded file
fuc = self.object_store.read(fuc_id)
if fuc['filetype'] == 'ZIP':
raise BadRequest("ZIP format not determined by project scientists yet (2014-04-21)")
#self.process_zip(fuc)
else:
self.process_csv(fuc)
def process_csv(self, fuc):
# CSV file open here
csv_filename = fuc.get('path', None)
if csv_filename is None:
raise BadRequest("uploaded file has no path")
# keep track of the number of calibrations we actually process
nupdates = 0
updates = {} # keys are reference_designators, use to update object store after parsing CSV
with open(csv_filename, 'rb') as csvfile:
# eliminate blank lines
csvfile = (row for row in csvfile if len(row.strip()) > 0)
# eliminate commented lines
csvfile = (row for row in csvfile if not row.startswith('#'))
# open CSV reader
csv_reader = csv.reader(csvfile, delimiter=',') # skip commented lines
# iterate the rows returned by csv.reader
for row in csv_reader:
if len(row) != 6:
log.warn("invalid calibration line %s" % ','.join(row))
continue
try:
ipn = row[0] # instrument_property_number
name = row[1] # calibration_name
value = float(row[2]) # calibration_value
units = row[3]
description = row[4] # description
start_date = row[5] # start_date TODO date object?
d = {
'value':value,
'units':units,
'description':description,
'start_date':start_date
}
except ValueError as e:
continue #TODO error message? or just skip?
# get ipn key
if ipn not in updates:
updates[ipn] = {} # initialize empty array
if name not in updates[ipn]:
updates[ipn][name] = [] # will be array of dicts
updates[ipn][name].append(d)
nupdates = nupdates + 1
# insert the updates into object store
self.update_object_store(updates)
# update FileUploadContext object (change status to complete)
fuc['status'] = 'UploadCalibrationProcessing process complete - %d updates added to object store' % nupdates
self.object_store.update_doc(fuc)
# remove uploaded file
try:
os.remove(csv_filename)
except OSError:
pass # TODO take action to get this removed
#.........这里部分代码省略.........