本文整理汇总了Python中mpx.lib.persistent.PersistentDataObject类的典型用法代码示例。如果您正苦于以下问题:Python PersistentDataObject类的具体用法?Python PersistentDataObject怎么用?Python PersistentDataObject使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PersistentDataObject类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Logger
class Logger(ServiceNode):
##
# @author Craig Warren
# @param config
# @return None
def configure(self,config):
ServiceNode.configure(self,config)
##
# @author Craig Warren
# starts the logger service
# @return None
def start(self):
ServiceNode.start(self)
# this will correctly add the msglog as a child
# to the logger.
if 'msglog' not in self.children_names():
columns = mpx.lib.msglog.get_columns()
log = Log()
log.configure({'name':'msglog', 'parent':self})
for c in columns:
column = mpx.lib.factory('mpx.service.logger.column')
config = c.configuration()
config['parent'] = log
column.configure(config)
self._logs = PersistentDataObject(self)
self._logs.names = []
self._logs.load()
for name in self._logs.names:
if ((not mpx.lib.log.log_exists(name)) and
(name not in self.children_names())):
log = mpx.lib.log.log(name)
log.destroy()
del(log)
self._logs.names = []
for child in self.children_nodes():
if not isinstance(child, Alias):
# Don't manage other managers' logs...
self._logs.names.append(child.name)
self._logs.save()
##
# @author Craig Warren
# stops the logger service
# @return None
def stop(self):
return ServiceNode.stop(self)
##
# @author Craig Warren
# @param log_name
# the name of the log to return
# @return Log
# returns the log if it can't find the log it
# returns None
def get_log(self,log_name):
for child in self.children_nodes():
if child.name == log_name:
return child
return None
示例2: LastAlarm
class LastAlarm(CompositeNode,EventConsumerMixin):
def __init__(self):
self._last_alarm = None
self._started = 0
CompositeNode.__init__(self)
EventConsumerMixin.__init__(self,self._alarm_triggered)
def configure(self, config):
CompositeNode.configure(self, config)
def configuration(self):
config = CompositeNode.configuration(self)
return config
def start(self):
self._pdo = PersistentDataObject(self)
self._pdo.last_dictionary = None
self._pdo.load()
self._started = 1
self.parent.event_subscribe(self,AlarmTriggerEvent)
CompositeNode.start(self)
def stop(self):
selt._started = 0
self.parent.cancel(self,AlarmTriggerEvent)
CompositeNode.stop(self)
def _alarm_triggered(self, alarm):
self._last_alarm = alarm
self._pdo.last_dictionary = alarm.dictionary()
self._pdo.save()
def get(self, skipCache=0):
return self._last_alarm
def get_dictionary(self):
return self._pdo.last_dictionary
示例3: __init__
def __init__(self, node):
self.__lock = Lock()
self.__last_save = {}
self.max_seq = -1
self.pending_seqs = []
self.inprocess_seqs = []
PersistentDataObject.__init__(self, node, auto_load=True)
return
示例4: TriggeredExporter
class TriggeredExporter(SynchronizedExporter):
def __init__(self):
SynchronizedExporter.__init__(self)
self._sid = None
self.evt = None #dil - debug
def handle_log(self,event):
self.debug_information('Log export triggered.')
self.evt = event #dil - debug
value = event.results()[1]['value']
if isinstance(value,Exception):
raise value
if value: # only export when value is true
self.debug_information('Going to start export thread.')
if self._lock.acquire(0):
try:
thread = Thread(name=self.name, target=self.go,
args=(time.time(),))
thread.start()
finally:
self._lock.release()
else:
msglog.log('broadway',msglog.types.WARN,
('Last export still active, ' +
'skipping current request.'))
def configure(self, config):
set_attribute(self, 'trigger',REQUIRED,config)
SynchronizedExporter.configure(self, config)
def configuration(self):
config = SynchronizedExporter.configuration(self)
get_attribute(self,'trigger',config,str)
return config
def start(self):
Exporter.start(self)
if not self.running:
self.running = 1
self.connection = as_node(self.connection_node)
self._time_keeper = PersistentDataObject(self)
self._time_keeper.start_time = 0
self._time_keeper.load()
self._period = self.parent.parent.period
self._setup_trigger()
else:
raise EAlreadyRunning
def _setup_trigger(self):
try:
self._sid = SM.create_delivered(self, {1:as_node(self.trigger)})
except ENotStarted, ENoSuchNode:
msg = 'TriggeredExporter trigger: %s does not exist - could be nascent' % self._trigger
msglog.log('broadway',msglog.types.WARN,msg)
scheduler.seconds_from_now_do(60, self._setup_trigger)
示例5: __init__
def __init__(self, name, password_file=PASSWD_FILE, group_file=GROUP_FILE, shadow_file=SHADOW_FILE):
self.__lock = Lock()
self.__password_file = password_file
self.__group_file = group_file
self.__shadow_file = shadow_file
self.meta = {}
self.USERS.load()
if not self.USERS.has_key(self.name()):
msglog.log("broadway", msglog.types.INFO, ("No profile for user %s found, creating" " new profile" % name))
self.USERS[self.name()] = str(UUID())
PersistentDataObject.__init__(self, self.USERS[self.name()])
PersistentDataObject.load(self)
示例6: start
def start(self):
try:
self._pdo_lock.acquire()
try:
if self.__running:
return
self.__running = True
self._trendconfig = PersistentDictionary(filename(self), encode=None, decode=None)
if not self._trendconfig:
pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
if os.path.exists(pdodata.filename()):
msglog.log("broadway", msglog.types.INFO, "Migrating previous trend data")
pdodata.trends = {}
pdodata.load()
self._trendconfig.update(pdodata.trends)
del (pdodata)
finally:
self._pdo_lock.release()
super(TrendManager, self).start()
self.logger = node.as_internal_node(self.logger_url)
if self.has_child("trends"):
self.trends = self.get_child("trends")
else:
self.trends = CompositeNode()
self.trends.configure({"parent": self, "name": "trends"})
self.trends.start()
corrupt_trends = []
for trendname, trenddump in self._trendconfig.items():
msg = "Loading trend: %s" % trendname
msglog.log("trendmanager", msglog.types.INFO, msg)
try:
trend = unmarshal(trenddump)
except:
corrupt_trends.append(trendname)
msg = "Failed to load trend: %s" % trendname
msglog.log("trendmanager", msglog.types.ERR, msg)
msglog.exception(prefix="Handled")
for trendname in corrupt_trends:
try:
msg = "Deleting trend information: %s" % trendname
msglog.log("trendmanager", msglog.types.INFO, msg)
self._delete_trend_configuration(trendname)
if self.trends.has_child(trendname):
trend = self.trends.get_child(trendname)
trend.prune(force=True)
except:
msglog.exception(prefix="Handled")
except:
self.__running = False
raise
return
示例7: WritingTransporter
class WritingTransporter(Transporter):
def configure(self, config):
set_attribute(self, 'directory', '/tmp', config)
set_attribute(self, 'file_prefix', REQUIRED, config)
set_attribute(self, 'file_suffix', REQUIRED, config)
set_attribute(self, 'name_scheme', None, config)
set_attribute(self, 'timestamp_format', '%s', config)
Transporter.configure(self, config)
self._last = PersistentDataObject(self)
self._last.filename = None
self._last.count = 1
self._last.load()
def configuration(self):
config = Transporter.configuration(self)
get_attribute(self, 'directory', config)
get_attribute(self, 'file_prefix', config)
get_attribute(self, 'file_suffix', config)
get_attribute(self, 'name_scheme', config)
get_attribute(self, 'timestamp_format', config)
return config
def transport(self, data):
if type(data) == type(''):
data = StringIO.StringIO(data)
filename = self._generate_filename()
tempname = filename + '.tmp'
file = open(tempname,'w')
try:
write = data.read(1024)
while write:
file.write(write)
write = data.read(1024)
finally:
file.close()
os.chmod(tempname,0444)
os.rename(tempname,filename)
def _generate_filename(self):
filename = self.file_prefix
append = ''
if self.name_scheme == 'incremental':
append = '%s' % self._last.count
elif self.name_scheme == 'timestamp':
file_time = self.parent.time_function(self.parent.scheduled_time())
filename = filename + time.strftime(self.timestamp_format,file_time)
append = '_%s' % (self._last.count + 1)
if filename != self._last.filename:
self._last.count = 0
append = ''
self._last.count += 1
self._last.filename = filename
return os.path.join(self.directory,filename + append + self.file_suffix)
示例8: start
def start(self):
self.managernode = self.as_node(self.manager)
self.synclock.acquire()
try:
alarmsname = '%s (%s)' % (self.name, 'alarms')
eventsname = '%s (%s)' % (self.name, 'events')
self.alarms = PersistentDictionary(alarmsname,
encode=self.encode,
decode=self.decode)
self.events = PersistentDictionary(eventsname,
encode=self.encode,
decode=self.decode)
# Migrate PDO data from old style persistence.
pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
if os.path.exists(pdodata.filename()):
msglog.log('broadway', msglog.types.INFO,
"Migrating previous alarm and event data")
pdodata.events = {}
pdodata.alarms = {}
pdodata.load()
migrate(pdodata, self.decode)
self.rebuildstorage()
pdodata.destroy()
del(pdodata)
finally:
self.synclock.release()
self.securitymanager = self.as_node('/services/Security Manager')
register = self.managernode.register_for_type
self.sub = register(self.handle_event, StateEvent)
self.running.set()
super(AlarmConfigurator, self).start()
示例9: start
def start(self):
if self.__running:
return
if self.debug:
msglog.log('EnergywiseManager :', msglog.types.INFO, 'Inside start' )
CompositeNode.start(self)
# start_node = as_node('/services/EnergywiseManager/')
# self.configure_trend_in_switches(start_node, 60)
self.__running = True
self._pdo_lock.acquire()
self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
self._pdo.trends = {}
self._pdo.load()
self._pdo_lock.release()
if self.has_child('trends'):
self.trends = self.get_child('trends')
else:
self.trends = CompositeNode()
self.trends.configure({'parent':self, 'name':'trends'})
self.trends.start()
# start trending for saved domains
for domain,freq in self._pdo.trends.items():
try:
start_node = as_node(domain)
# self.configure_trend_in_switches( start_node,freq )
start_node.new_trend(freq)
except:
self.delete_trend_configuration(domain)
return
示例10: start
def start(self):
self._PDO = PersistentDataObject(self,dmtype=GC_NEVER)
self._PDO.exception_log_last_time = 0.0
self._PDO.load()
# Scan subtree of grandparent logger for channel (column) 'fsg_attrs'
# nodes containing info required for FSG Demo, so that we don't have
# to do the scan every time format() is called:
self._channels = {}
columns_node = self.parent.parent.parent.get_child('columns')
column_nodes = columns_node.children_nodes()
for column_node in column_nodes:
if column_node.name == 'timestamp':
continue
assert isinstance(column_node, ChannelAttrsColumn) \
or isinstance(column_node, ChannelAttrsDeltaColumn), \
'Column %s should be class ChannelAttrsColumn, but is class %s' \
% (column_node.name, column_node.__class__.__name__)
self._channels[column_node.name] = {
'channel_node':column_node,'values':[]
}
self._exception_log = None
try:
self._exception_log = as_node(self.exception_log_url)
except ENoSuchName:
pass
return
示例11: start
def start(self):
ServiceNode.start(self)
# this will correctly add the msglog as a child
# to the logger.
if 'msglog' not in self.children_names():
columns = mpx.lib.msglog.get_columns()
log = Log()
log.configure({'name':'msglog', 'parent':self})
for c in columns:
column = mpx.lib.factory('mpx.service.logger.column')
config = c.configuration()
config['parent'] = log
column.configure(config)
self._logs = PersistentDataObject(self)
self._logs.names = []
self._logs.load()
for name in self._logs.names:
if ((not mpx.lib.log.log_exists(name)) and
(name not in self.children_names())):
log = mpx.lib.log.log(name)
log.destroy()
del(log)
self._logs.names = []
for child in self.children_nodes():
if not isinstance(child, Alias):
# Don't manage other managers' logs...
self._logs.names.append(child.name)
self._logs.save()
示例12: start
def start(self):
self._pdo = PersistentDataObject(self)
self._pdo.last_dictionary = None
self._pdo.load()
self._started = 1
self.parent.event_subscribe(self,AlarmTriggerEvent)
CompositeNode.start(self)
示例13: configure
def configure(self, config):
self._pdo = PersistentDataObject(self)
self._pdo.stats_dict = {} #used to detect changes to xml files based on timestamp. Tuple (modify time, pickle string)
self._pdo.load()
# write_priority can be set for the entire control service, though
# it may be specialized at the individual application node level.
set_attribute(self, 'write_priority', 9, config, int)
CompositeNode.configure(self, config)
示例14: start
def start(self):
filename = '%s (%s)' % (self.name, 'triggers')
self.manager = self.nodespace.as_node(self.manager)
self._pdo_lock.acquire()
try:
if self._triggers is None:
self._triggers = PersistentDictionary(
filename, encode=None, decode=None)
if not self._triggers:
pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
if os.path.exists(pdodata.filename()):
msglog.log('broadway', msglog.types.INFO,
"Migrating previous trigger data.")
pdodata.triggers = {}
pdodata.load()
self._triggers.update(pdodata.triggers)
pdodata.destroy()
del(pdodata)
self._loadtriggers()
if self.secured:
self.security_manager = self.as_node("/services/Security Manager")
else:
self.security_manager = None
finally:
self._pdo_lock.release()
return super(TriggersConfigurator, self).start()
示例15: start
def start(self):
self.manager = self.nodespace.as_node(self.manager)
self._pdo_lock.acquire()
try:
self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
self._pdo.users = {}
self._pdo.roles = {}
self._pdo.policies = {}
self._pdo.load()
userdumps = self._pdo.users.items()
roledumps = self._pdo.roles.items()
policydumps = self._pdo.policies.items()
finally: self._pdo_lock.release()
super(SecurityConfigurator, self).start()
tstart = time.time()
for rolename, roledump in roledumps:
try: IPickles(cPickle.loads(roledump))()
except:
message = self.LOADFAILURE % (self.name, 'role', rolename)
msglog.log('broadway', msglog.types.ERR, message)
msglog.exception(prefix = 'Handled')
tend = time.time()
tlapse = tend - tstart
msglog.log('broadway', msglog.types.INFO,
'Security Configurator loaded '
'%s roles in %s seconds.' % (len(roledumps), tlapse))
tstart = time.time()
for policyname, policydump in policydumps:
try: IPickles(cPickle.loads(policydump))()
except:
message = self.LOADFAILURE % (self.name, 'policy', policyname)
msglog.log('broadway', msglog.types.ERR, message)
msglog.exception(prefix = 'Handled')
tend = time.time()
tlapse = tend - tstart
msglog.log('broadway', msglog.types.INFO,
'Security Configurator loaded '
'%s policies in %s seconds.' % (len(policydumps), tlapse))
tstart = time.time()
for username, userdump in userdumps:
try: IPickles(cPickle.loads(userdump))()
except:
message = self.LOADFAILURE % (self.name, 'user', username)
msglog.log('broadway', msglog.types.ERR, message)
msglog.exception(prefix = 'Handled')
tend = time.time()
tlapse = tend - tstart
msglog.log('broadway', msglog.types.INFO,
'Security Configurator loaded '
'%s users in %s seconds.' % (len(userdumps), tlapse))
return