当前位置: 首页>>代码示例>>Python>>正文


Python PersistentDataObject.save方法代码示例

本文整理汇总了Python中mpx.lib.persistent.PersistentDataObject.save方法的典型用法代码示例。如果您正苦于以下问题:Python PersistentDataObject.save方法的具体用法?Python PersistentDataObject.save怎么用?Python PersistentDataObject.save使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mpx.lib.persistent.PersistentDataObject的用法示例。


在下文中一共展示了PersistentDataObject.save方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: Logger

# 需要导入模块: from mpx.lib.persistent import PersistentDataObject [as 别名]
# 或者: from mpx.lib.persistent.PersistentDataObject import save [as 别名]
class Logger(ServiceNode):

    ##
    # @author Craig Warren
    # @param config
    # @return None
    def configure(self,config):
        ServiceNode.configure(self,config)
    ##
    # @author Craig Warren
    #   starts the logger service
    # @return None
    def start(self):
        ServiceNode.start(self)
        # this will correctly add the msglog as a child
        #  to the logger.
        if 'msglog' not in self.children_names():
            columns = mpx.lib.msglog.get_columns()
            log = Log()
            log.configure({'name':'msglog', 'parent':self})
            for c in columns:
                column = mpx.lib.factory('mpx.service.logger.column')
                config = c.configuration()
                config['parent'] = log
                column.configure(config)
        self._logs = PersistentDataObject(self)
        self._logs.names = []
        self._logs.load()
        for name in self._logs.names:
            if ((not mpx.lib.log.log_exists(name)) and 
                (name not in self.children_names())):
                log = mpx.lib.log.log(name)
                log.destroy()
                del(log)
        self._logs.names = []
        for child in self.children_nodes():
            if not isinstance(child, Alias):
                # Don't manage other managers' logs...
                self._logs.names.append(child.name)
        self._logs.save()

    ##
    # @author Craig Warren
    #   stops the logger service
    # @return None
    def stop(self):
        return ServiceNode.stop(self)

    ##
    # @author Craig Warren
    # @param log_name
    #   the name of the log to return
    # @return Log
    #   returns the log if it can't find the log it
    #   returns None
    def get_log(self,log_name):
        for child in self.children_nodes():
            if child.name == log_name:
                return child
        return None
开发者ID:mcruse,项目名称:monotone,代码行数:62,代码来源:logger.py

示例2: LastAlarm

# 需要导入模块: from mpx.lib.persistent import PersistentDataObject [as 别名]
# 或者: from mpx.lib.persistent.PersistentDataObject import save [as 别名]
class LastAlarm(CompositeNode,EventConsumerMixin):
    def __init__(self):
        self._last_alarm = None
        self._started = 0
        CompositeNode.__init__(self)
        EventConsumerMixin.__init__(self,self._alarm_triggered)
    def configure(self, config):
        CompositeNode.configure(self, config)
    def configuration(self):
        config = CompositeNode.configuration(self)
        return config
    def start(self):
        self._pdo = PersistentDataObject(self)
        self._pdo.last_dictionary = None
        self._pdo.load()
        self._started = 1
        self.parent.event_subscribe(self,AlarmTriggerEvent)
        CompositeNode.start(self)
    def stop(self):
        selt._started = 0
        self.parent.cancel(self,AlarmTriggerEvent)
        CompositeNode.stop(self)
    def _alarm_triggered(self, alarm):
        self._last_alarm = alarm
        self._pdo.last_dictionary = alarm.dictionary()
        self._pdo.save()
    def get(self, skipCache=0):
        return self._last_alarm
    def get_dictionary(self):
        return self._pdo.last_dictionary
开发者ID:mcruse,项目名称:monotone,代码行数:32,代码来源:last_alarm.py

示例3: SimplePersistentValue

# 需要导入模块: from mpx.lib.persistent import PersistentDataObject [as 别名]
# 或者: from mpx.lib.persistent.PersistentDataObject import save [as 别名]
class SimplePersistentValue(SimpleValue):
    def configure(self, config):
        SimpleValue.configure(self, config)
        self._pdo = PersistentDataObject(self)
        self._pdo.value = None
        self._pdo.conversion = None
        self._pdo.load()
        conversion = _get_name(self.conversion)
        if (self._pdo.value == None or 
            self._pdo.conversion != conversion):
            self._pdo.value = self.value
            self._pdo.conversion = conversion
            self._pdo.save()
        else:
            self.value = self._pdo.value
    def configuration(self):
        self.value = self._pdo.value
        return SimpleValue.configuration(self)
    def set(self,value,asyncOK=1):
        SimpleValue.set(self, value, asyncOK)
        self._pdo.value = self.value
        self._pdo.save()
    def get(self, skipCache=0):
        return self._pdo.value
开发者ID:mcruse,项目名称:monotone,代码行数:26,代码来源:simple_value.py

示例4: GarbageCollector

# 需要导入模块: from mpx.lib.persistent import PersistentDataObject [as 别名]
# 或者: from mpx.lib.persistent.PersistentDataObject import save [as 别名]
class GarbageCollector(ServiceNode):
    def __init__(self):
        ServiceNode.__init__(self)
        self.debug = 0
        self._registered = ()
        self._did_save = 0
        self._post_configuration=0

        if self.debug: print 'Initialized _registered to [] for %s.' % self
    def singleton_unload_hook(self):
        return
    ##
    # @param config
    # @return None
    def configure(self,config):
        ServiceNode.configure(self,config)
    def configuration(self):
        if self.debug: print 'In GarbageCollector:configuration().'
        config = ServiceNode.configuration(self)
        return config
    ##
    #   starts the data manager service
    # @return None
    def start(self):
        from mpx.lib.persistent import PersistentDataObject
        
        ServiceNode.start(self)
        if self.debug: print 'Garbage Collector Starting!'

        self._data = PersistentDataObject(self,dmtype=GC_NEVER)
  
        self._data.registered = []
        self._data.load()
        
        if self.debug: print 'GC Data is %s.' % self._data
        if self.debug: print 'GC Data._reg is %s.' % self._data.registered

    ##
    #   stops the data manager service
    # @return None
    def stop(self):
        return ServiceNode.stop(self)

    ##
    # set_faillist is the hook which allows the system to inform the data
    # manager about which nodes failed to start up.  Each list item should
    # be a dictionary with the following members:
    # name - the name of the node (without parent information)
    # parent - the parent of the node (with any relevant parent information,
    #          e.g. /services/com1
    # type - what type of failure occured.  Acceptable values are
    #        load and config.
    def set_faillist(self, faillist):
        if self.debug: print 'Got faillist of %s.' % faillist
        if self.debug: print 'Got reglist of %s.' % self._registered

        old_registered = self._data.registered[:]

        # By now, everyone should have had a chance to start up.
        # @fixme (real soon now, I promise):  Use the cool new method that
        # Mark and Shane suggested to consume an event from the root node
        # when all nodes have been started as a trigger for starting
        # the garbage collection process.
        self._data.registered = list(self._registered)
  
        # OK, now process our lists and see who is naughty and who is
        # nice.
        if self.debug: print '---- Starting to Process Potential Reaping List ----'
        for regnode in old_registered:
            purge_type = regnode['type']
            filename = regnode['filename']
            nodename = regnode['nodename']
            
            # If we are never supposed to purge this data, then don't bother
            # to do any more checking
            if purge_type == GC_NEVER:
                if self.debug: print '%s: Skipping because it is GC_NEVER.' % nodename
                continue

            if self.debug: print '%s: Checking.' % nodename
            
            node_did_register = 0
            node_did_fail = 0
            node_did_fail_on_config = 0
            node_did_fail_on_load = 0
            node_did_fail_on_start = 0
            parent_did_fail = 0
            should_purge = 0

            # If this node has registered with us, then we assume that
            # it started up and is present, etc.  This might not always
            # be the correct thing to do, but for now it seems like the
            # correct enough thing to do and should keep performance
            # from becoming an issue.
            if regnode in self._registered:
                if self.debug: print '%s: Appears to be happy.' % nodename
                node_did_register = 1
            else:
                # Check to see if the node or one of it's ancestors failed
                for failnode in faillist:
#.........这里部分代码省略.........
开发者ID:mcruse,项目名称:monotone,代码行数:103,代码来源:garbage_collector.py

示例5: set_meta_value

# 需要导入模块: from mpx.lib.persistent import PersistentDataObject [as 别名]
# 或者: from mpx.lib.persistent.PersistentDataObject import save [as 别名]
 def set_meta_value(self,name,value):
     self.meta[name] = value
     PersistentDataObject.save(self)
开发者ID:mcruse,项目名称:monotone,代码行数:5,代码来源:user.py

示例6: ExportersConfigurator

# 需要导入模块: from mpx.lib.persistent import PersistentDataObject [as 别名]
# 或者: from mpx.lib.persistent.PersistentDataObject import save [as 别名]

#.........这里部分代码省略.........
        self.updatepdo()
        return exporter.name
    security.protect('configure_node', 'Configure')
    def configure_node(self, name, config):
        exporter = self.manager.get_child(name)
        exporter.configure(config)
        self.updatepdo()
        return exporter.name
    security.protect('node_configuration', 'View')
    def node_configuration(self, name, extended=False):
        exporter = self.manager.get_child(name)
        return exporter.configuration()
    security.protect('configure_formatter', 'Configure')
    def configure_formatter(self, exporter, config):
        return self.configure_node(exporter, {"formatter": config})
    security.protect('formatter_configuration', 'View')
    def formatter_configuration(self, exporter, extended=False):
        return self.node_configuration(exporter).get("formatter", {})
    security.protect('configure_transporter', 'Configure')
    def configure_transporter(self, exporter, config):
        return self.configure_node(exporter, {"transporter": config})
    security.protect('transporter_configuration', 'View')
    def transporter_configuration(self, exporter, extended=False):
        return self.node_configuration(exporter).get("transporter", {})
    security.protect('trigger_configuration', 'View')
    def trigger_configuration(self, name=None):
        manager = self.nodespace.as_node('/services/Alarm Manager')
        sources = [manager] + manager.get_alarms()
        configuration = dict([(source.url, []) for source in sources])
        if name:
            exporter = self.manager.get_child(name)
            configuration.update(exporter.trigger_configuration())
        configs = []
        for source,events in configuration.items():
            configs.append({"source": source, "events": events})
        return configs
    security.protect('configure_triggers', 'Configure')
    def configure_triggers(self, name, triggers=()):
        configuration = {}
        for config in triggers:
            configuration[config["source"]] = config["events"]
        exporter = self.manager.get_child(name)
        exporter.configure_triggers(configuration)
        self.updatepdo()
    security.protect('get_node_names', 'View')
    def get_node_names(self):
        return self.manager.children_names()
    def updatepdo(self):
        exporters = {}
        self._pdo_lock.acquire()
        try:
            for exporter in self.manager.get_exporters():
                exporters[exporter.name] = cPickle.dumps(IPickles(exporter))
            self._pdo.exporters = exporters
            self._pdo.save()
        finally: 
            self._pdo_lock.release()
    def handle_request(self, request):
        update_pdo = False
        response = Response(request)
        request_data = request.get_post_data_as_dictionary()
        request_data.update(request.get_query_string_as_dictionary())
        if request_data.has_key('add'):
            adapt = self.create_exporter("New Exporter")
        elif request_data.has_key('remove'):
            name = urllib.unquote_plus(request_data['remove'][0])
            self.remove_exporter(name)
            adapt = self.container
        elif request_data.has_key('edit'):
            name = urllib.unquote_plus(request_data['edit'][0])
            update_pdo = False
            adapt = self.container.get_exporter(name)
        elif request_data.has_key('configure'):
            name = urllib.unquote_plus(request_data['configure'][0])
            exporter = self.container.get_exporter(name)
            config = {'Exporter': {}, 'Formatter': {}, 'Transporter': {}}
            for attrname in request_data.keys():
                splitname = attrname.split('.')
                if len(splitname) == 2 and config.has_key(splitname[0]):
                    config[splitname[0]][splitname[1]] = urllib.unquote_plus(request_data[attrname][0])
            exportconfig = config['Exporter']
            exportconfig['formatter'] = config['Formatter']
            exportconfig['transporter'] = config['Transporter']
            exporter.configure(exportconfig)
            update_pdo = True
            adapt = exporter
        else: 
            adapt = self.container
        if request_data.has_key('actionName'):
            target = urllib.unquote_plus(request_data.get('target')[0])
            action = urllib.unquote_plus(request_data.get('actionName')[0])
            params = map(urllib.unquote_plus, request_data.get('params'))
            exporter = self.container.get_exporter(target)
            method = getattr(exporter, action)
            result = method(*params)
            update_pdo = True
        if update_pdo:
            self.updatepdo()
        webadapter = IWebContent(adapt)
        response.send(webadapter.render())
开发者ID:mcruse,项目名称:monotone,代码行数:104,代码来源:request_handler.py

示例7: __save

# 需要导入模块: from mpx.lib.persistent import PersistentDataObject [as 别名]
# 或者: from mpx.lib.persistent.PersistentDataObject import save [as 别名]
 def __save(self):
     result = PersistentDataObject.save(self)
     self.__snapshot(self.saved())
     return result
开发者ID:mcruse,项目名称:monotone,代码行数:6,代码来源:trap_exporter.py

示例8: EnergywiseManager

# 需要导入模块: from mpx.lib.persistent import PersistentDataObject [as 别名]
# 或者: from mpx.lib.persistent.PersistentDataObject import save [as 别名]
class EnergywiseManager(CompositeNode):
    def __init__(self):
        CompositeNode.__init__(self)
        self._pdo_lock = Lock()
        self._pdo = None
        self.__running = False
        self.debug = 0
        return

    def configure(self,config):
        if self.debug:
            msglog.log('EnergywiseManager:', msglog.types.INFO,
                       'Inside configure' )
        CompositeNode.configure(self, config)
        set_attribute(self, 'debug', 0, config, int)
        return

    def configuration(self):
        config = CompositeNode.configuration(self)
        get_attribute(self, 'debug', config, str)
        return config

  

   # def configure_trend_in_switches(self, start_node, frequency):
    #    for child in start_node.children_nodes():
     #       if child.children_nodes():
      #          self.configure_trend_in_switches(child, frequency)
       #     else:
                # reached upto leaf, each energywise switch has trends as child
        #        child.new_trend(frequency)
        #return

    def delete_trend_configuration(self, trend_domain):
        self._pdo_lock.acquire()
        try:
            if self._pdo.trends.has_key(trend_domain): 
                # stop logging as well
                del self._pdo.trends[trend_domain]
            self._pdo.save()
        finally:
            self._pdo_lock.release()
        return
    def delete_trends(self, trendList):
        if self.debug:
            msglog.log('EnergywiseManager:', msglog.types.INFO, 
                       'Inside delete_trends' )

        for domain in trendList.split(':'):
            if domain:
                domain_node = as_node(domain)
                domain_node.delete_trend()
                self.delete_trend_configuration(domain)
        return
         
    def start(self):
        if self.__running:
            return
        if self.debug:
            msglog.log('EnergywiseManager :', msglog.types.INFO, 'Inside start' )
        CompositeNode.start(self)
#        start_node = as_node('/services/EnergywiseManager/')
#        self.configure_trend_in_switches(start_node, 60)
        self.__running = True
        self._pdo_lock.acquire()
        self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
        self._pdo.trends = {}
        self._pdo.load()
        self._pdo_lock.release()
        if self.has_child('trends'):
            self.trends = self.get_child('trends')
        else:
            self.trends = CompositeNode()
            self.trends.configure({'parent':self, 'name':'trends'})
            self.trends.start()
        # start trending for saved domains
        for domain,freq in self._pdo.trends.items():
            try:
                start_node = as_node(domain)
               # self.configure_trend_in_switches( start_node,freq )
                start_node.new_trend(freq)
            except:
                self.delete_trend_configuration(domain)
        return

    def get_trends(self):
        return self._pdo.trends.items()

    def add_trend_configuration(self, trend_period, trend_domain):
        self._pdo_lock.acquire()
        self._pdo.trends[trend_domain] = trend_period
        self._pdo.save()
        self._pdo_lock.release()
        return
    def save_trends(self, trend_list):
        # Traverse through _pdo.items and check if new domain is either subset
        # of any configured or superset. 
        # If subset return with msg already covered and dont save this
        # If superset then configure new ones and delete subset from 
        # _pdo.items
#.........这里部分代码省略.........
开发者ID:mcruse,项目名称:monotone,代码行数:103,代码来源:energywise_manager.py

示例9: CloudManager

# 需要导入模块: from mpx.lib.persistent import PersistentDataObject [as 别名]
# 或者: from mpx.lib.persistent.PersistentDataObject import save [as 别名]

#.........这里部分代码省略.........
        
        formation = cloudevent()
        if (self.is_peer_in_formation(self.peer,formation) == False):
            formation = [self.peer]
            self.message('Setting Cloud Formation to self.peer; no longer in Cloud.',msglog.types.INFO)
        
        self._setup_formation(formation,cloudevent.portal)

    
    def _setup_formation(self, formation,portal):
        scheduled, self._scheduled = self._scheduled, None
        if scheduled is not None:
            try: scheduled.cancel()
            except: pass
            else: self.message('Canceled pending dispatch of formation update.')
        self.nformation.set_portal(portal)
        self.nformation.set_formation(formation)
        self.target_formation = self.nformation.compute_targets()
        self.message('Resetting unreachables during Cloud setup.')
        self.reset_unreachables()
        (dispatch,delay)=self.nformation.compute_dispatch_info()
        if (dispatch):
            self._scheduled = scheduler.after(delay, self.dispatcher.dispatch, (FormationUpdated(self),))
            self.message('Scheduled dispatch in %s seconds.' % delay)
        else: self.message('Formation of one peer, no Updated event generated.')
        
        # Save the PDO, if the formation or portal has changed
        if((self._pdo.formation != formation) or (self._pdo.portal != portal) or (self._pdo.peer != self.peer)):
            self.message('New formation/portal found , hence pickling. New Formation is :%s portal is %s' %(str(formation),portal))
            self._pdo.formation=formation[:]
            self._pdo.portal=portal
            self._pdo.peer=self.peer
            tstart = time.time()
            self._pdo.save()
            tend = time.time()
            self.message('New formation pickled and saved in %s seconds.' % (tend - tstart))
        else:
            self.message('Formation/Portal has not changed. Not pickling it. ' )

    
    def update_formation(self, new_formation,portal):
        (no_of_excluded_peers,excludes)=self.nformation.compute_excludes(new_formation)
        if no_of_excluded_peers:
            self.message( 'Notifying removed participants: %s' % (excludes,))
            excludedevent = CloudEvent(self, self.peer, excludes,self.nformation.get_portal(),['CloudFormation'], new_formation)
            self.propogate(excludedevent)
        else: 
            self.message( 'All current Cloud member in new Cloud Formation.')
        self._setup_formation(new_formation,portal)
        self.handle_local_event(new_formation, ['CloudFormation'])

    def handle_propogation_failure(self, notifier):
        cloudevent = notifier.cloudevent
        target_peer = notifier.peer
        # TODO: generate comm failure error to propogate as well.
        # Progpogate event to Cloud Managers target_peer would have notified.
        
        '''
        The target_peer can be portal or a peer.
        If it is a portal then we will not put it in unreachables and also 
        we do not propogate the event.
        Log if we are not connecting to the portal 
        '''
        portal=self.nformation.get_portal()
        if((portal != None ) and (utils.same_host(target_peer,portal))):
            msg='Portal %s is not reachable .' % portal
开发者ID:mcruse,项目名称:monotone,代码行数:70,代码来源:manager.py

示例10: SynchronizedExporter

# 需要导入模块: from mpx.lib.persistent import PersistentDataObject [as 别名]
# 或者: from mpx.lib.persistent.PersistentDataObject import save [as 别名]

#.........这里部分代码省略.........
                    self._lock.release()
            else:
                msglog.log('broadway',msglog.types.WARN, 
                           ('Last export still active, ' + 
                            'skipping current request.'))
    def handle_error(self,exc):
        msglog.exception(exc)
    def configure(self, config):
        set_attribute(self,'log_multiple',1,config,int)
        set_attribute(self,'timeout',60,config,int)
        set_attribute(self,'connection_node','/services/network',config)
        set_attribute(self,'connection_attempts',3,config,int)
        Exporter.configure(self, config)
    def configuration(self):
        config = Exporter.configuration(self)
        get_attribute(self,'log_multiple',config,str)
        get_attribute(self,'connection_node',config)
        get_attribute(self,'connection_attempts',config)
        get_attribute(self,'timeout',config,int)
        return config
    def start(self):
        Exporter.start(self)
        if not self.running:
            self.running = 1
            self.connection = as_node(self.connection_node)
            self._event_count = self.log_multiple - 1
            self._time_keeper = PersistentDataObject(self)
            self._time_keeper.start_time = 0
            self._time_keeper.load()
            self._period = self.parent.parent.period
            self.parent.parent.event_subscribe(self, LogAddEntryEvent)
        else: 
            raise EAlreadyRunning
    def stop(self):
        self.running = 0
    def scheduled_time(self):
        return self._end_time
    def go(self, end_time):
        self.debug_information('Exporting.')
        self._lock.acquire()
        try:
            self._end_time = end_time
            self._export(end_time)
            self._end_time = None
            self.debug_information('Done Exporting.')
        except:
            msglog.exception()
        self._lock.release()
    def _export(self,end_time):
        attempts = 0
        connected = 0
        while attempts < self.connection_attempts:
            self.debug_information('Acquiring connection...')
            try:
                connected = self.connection.acquire()
            except:
                msglog.exception()
            if connected:
                self.debug_information('Connection acquired.')
                break
            self.debug_information('Failed to acquire.')
            attempts += 1
        else:
            self.debug_information('Connection failed, aborting.')
            raise EConnectionError('Failed to connect %s times' % attempts)
        try:
            last_break = 0
            end = end_time
            start_time = self._time_keeper.start_time
            while start_time <= end_time:
                self.debug_information('Getting data from %s to %s.' 
                                       % (start_time,end))
                data = self.log.get_range('timestamp',start_time,end)
                if not data:
                    self.debug_information('No Data to export.')
                    raise ENoData('timestamp',start_time,end)
                try:
                    self.debug_information('Calling format.')
                    output = self.formatter.format(data)
                    self.debug_information('Calling transport.')
                    self.transporter.transport(output)
                    self.debug_information('Done transporting.')
                    start_time = end + self._period
                except EBreakupTransfer, e:
                    entry = e.break_at
                    self.debug_information('Breaking up transfer.')
                    if entry['timestamp'] == last_break:
                        # prevents loop where transporter is just failing.
                        raise EIOError('EBreakupTransfer not progressing.')
                    last_break = entry['timestamp']
                    end = last_break - self._period
                    msglog.log('broadway',msglog.types.WARN,
                               'Breaking up data transfer at %s.' % end)
                else:
                    end = end_time
                    self._time_keeper.start_time = start_time
                    self._time_keeper.save()
        finally:
            if connected:
                self.connection.release()
开发者ID:mcruse,项目名称:monotone,代码行数:104,代码来源:synchronized_exporter.py

示例11: dict

# 需要导入模块: from mpx.lib.persistent import PersistentDataObject [as 别名]
# 或者: from mpx.lib.persistent.PersistentDataObject import save [as 别名]
import inspect
from mpx.lib.persistence import storage
from mpx.lib.persistence import datatypes
from mpx.lib.persistent import PersistentDataObject as PDO

data = dict([(str(key), string.ascii_letters) for key in range(10000)])
pdo = PDO('many-key-test')
pdict = datatypes.PersistentDictionary('many-key-test')
pdo.load()
pdodict = pdo.__dict__

items = data.items()
pdostart = time.time()
for key,value in items:
    pdodict[key] = value
    pdo.save()


pdostop = time.time()

pdictstart = time.time()
for key,value in items:
    pdict[key] = value


pdictstop = time.time()

print 'Took %0.4f seconds to set/save %d PDO attributes' % (pdostop - pdostart, len(items))
print 'Took %0.4f seconds to set/save %d PDict items' % (pdictstop - pdictstart, len(items))

开发者ID:mcruse,项目名称:monotone,代码行数:31,代码来源:testing.py

示例12: FTPTransporter

# 需要导入模块: from mpx.lib.persistent import PersistentDataObject [as 别名]
# 或者: from mpx.lib.persistent.PersistentDataObject import save [as 别名]
class FTPTransporter(Transporter):
    _last = None
    ##
    # Configure object.
    #
    # @key url  The url that data is to be sent
    #           to.  For example: ftp.hostname.com/tmp/.
    # @key username  The username for the FTP session.
    # @key password  The password for the FTP session.
    # @key file_prefix  The prefix for files that are
    #                   created when data is uploaded.
    # @key file_suffix The suffix to be appended to all
    #                  files created when data is uploaded.
    # @default .dat
    # @key name_scheme  Naming scheme to be used for created
    #                   each file.
    # @value timestamp  Insert timestamp between file_prefix
    #                   and file_suffix for each upload.
    # @value count  Insert incrmemental count of uploads between
    #               prefix and suffix.
    # @value none  Do not use a naming scheme.  Each upload
    #              will overwrite the previous upload.
    # @default timestamp.
    #
    def configure(self, config):
        set_attribute(self, 'host', REQUIRED, config)
        set_attribute(self, 'port', 21, config, int)
        set_attribute(self, 'directory', '', config)
        set_attribute(self, 'username', REQUIRED, config)
        set_attribute(self, 'password', REQUIRED, config)
        #CSCtn64870
        if (config.has_key('timeout') and config['timeout'] == ''):
            config['timeout'] = 'None'
        set_attribute(self, 'timeout', None, config, float)
        set_attribute(self, 'file_prefix', 'cisco', config)
        set_attribute(self, 'file_suffix', '.dat', config)
        set_attribute(self, 'name_scheme', 'timestamp', config)
        set_attribute(self, 'timestamp_format', '%s', config)
        set_attribute(self, 'passive_mode', 1, config, as_boolean)
        set_attribute(self, 'file_append', 0, config, as_boolean)
        Transporter.configure(self, config)
        if self._last is None:
            self._last = PersistentDataObject(self)
            self._last.filename = None
            self._last.count = 1
            self._last.load()
    def configuration(self):
        config = Transporter.configuration(self)
        get_attribute(self, 'host', config)
        get_attribute(self, 'port', config, str)
        get_attribute(self, 'directory', config)
        get_attribute(self, 'username', config)
        get_attribute(self, 'password', config)
        get_attribute(self, 'timeout', config, str)
        get_attribute(self, 'file_prefix', config)
        get_attribute(self, 'file_suffix', config)
        get_attribute(self, 'name_scheme', config)
        get_attribute(self, 'timestamp_format', config)
        get_attribute(self, 'passive_mode', config, as_onoff)
        get_attribute(self, 'file_append', config, str)
        return config
    def transport(self, data):
        filename = self._generate_filename()
        if type(data) == type(''):
            data = StringIO(data)
        ftp = ftplib.FTP()
        ftp.connect(self.host, self.port, self.timeout)
        finished = 0
        try:
            ftp.login(self.username, self.password)
            ftp.set_pasv(self.passive_mode != 0)
            if self.file_append and not self.name_scheme:
                ftp.storlines('APPE ' + self._full_file_name(filename), data)
            else:
                ftp.storlines('STOR ' + self._full_file_name(filename), data)
            self._last.save()
            finished = 1
            data.close()
        finally:
            if not finished:
                # quit hangs is exception.
                ftp.close()
            else:
                try:
                    ftp.quit()
                except:
                    ftp.close()
    def _generate_filename(self):
        append = ''
        filename = self.file_prefix
        if self.name_scheme == 'incremental':
            append = '%s' % self._last.count
        elif self.name_scheme == 'timestamp':
            filetime = self.parent.time_function(self.parent.scheduled_time())
            filename += time.strftime(self.timestamp_format, filetime)
            append = '_%s' % (self._last.count + 1)
            if filename != self._last.filename:
                self._last.count = 0
                append = ''
        self._last.count += 1
#.........这里部分代码省略.........
开发者ID:mcruse,项目名称:monotone,代码行数:103,代码来源:standard.py

示例13: XMLFormatter

# 需要导入模块: from mpx.lib.persistent import PersistentDataObject [as 别名]
# 或者: from mpx.lib.persistent.PersistentDataObject import save [as 别名]

#.........这里部分代码省略.........
            for channel_name in log_rec_dict.keys():
                if channel_name == 'timestamp':
                    continue
                if not self._channels.has_key(channel_name):
                    if not channel_name in removed_channels:
                        msglog.log('fsg:xml_formatter',msglog.types.ERR, \
                                   'Channel %s has been removed from the configuration.' \
                                   % channel_name)
                        removed_channels.append(channel_name)
                    continue
                data_to_send = 1
                self._channels[channel_name]['values'].append((timestamp,log_rec_dict[channel_name],))
        channel_names = self._channels.keys() # it's a list
        # Organize all data from exception log, if any:
        exception_dicts = {} # K:trigger name, V:time-sorted list of  2tuples
                             # (timestamp, message)
        if not self._exception_log is None:
            if self._PDO.exception_log_last_time > start_time:
                start_time = self._PDO.exception_log_last_time + 0.00001 # do not re-send already-sent data
            exception_data = self._exception_log.get_range('timestamp',start_time,end_time)
            for log_rec_dict in exception_data:
                trigger_node_url = log_rec_dict['trigger_node_url']
                trigger_node = as_node(trigger_node_url)
                assert isinstance(trigger_node, FsgComparisonTrigger), \
                       'Node %s should be FsgComparisonTrigger, is %s' \
                       % (trigger_node.name, trigger_node.__class__)
                timestamp = log_rec_dict['timestamp']
                trigger_node_msg = log_rec_dict['trigger_node_msg']
                if not exception_dicts.has_key(trigger_node_url):
                    exception_dicts[trigger_node_url] = {'trigger_node_url':trigger_node_url,'timestamps':[(timestamp,trigger_node_msg,)]}
                else:
                    exception_dicts[trigger_node_url]['timestamps'].append((timestamp,trigger_node_msg,))
                self._PDO.exception_log_last_time = timestamp
                self._PDO.save()
        if (data_to_send == 0) and (len(exception_dicts) == 0):
            msglog.log('fsg:xml_formatter',msglog.types.INFO,'No data or exceptions to send.')
            return None # nothing to send
        # Create an output stream to minimize the combined size of the XML
        # file and the remaining point_dicts contents during formatting:
        stream = StreamWithCallback(self.output_callback)
        stream.set_meta('channel_names',channel_names)
        stream.set_meta('exception_data',exception_dicts.values()) # pass in a list of "values" (dicts), to allow easy iteration
        stream.set_meta('index',0) # number of point time-value lists written to XML output stream
        formatter = SGMLFormatter()
        # Write opening tags:
        formatter.open_tag('data', 
                           info=self.location_info,
                           key=self.location_key
                           )
        formatter.open_tag('device', 
                           info=self.panel_info,
                           key=self.panel_key
                           )
        output = formatter.output()
        self.debug_print(output,None,1)
        stream.write(output)
        stream.set_meta('formatter',formatter)
        stream.set_meta('remaining', '')
        data_mode = 'channels'
        if data_to_send == 0:
            data_mode = 'exceptions' # no data for channels, so skip 'em
        stream.set_meta('data_mode',data_mode)
        return stream
    def output_callback(self, stream):
        remaining = stream.get_meta_value('remaining')
        if remaining:
开发者ID:mcruse,项目名称:monotone,代码行数:70,代码来源:xml_formatter.py

示例14: save

# 需要导入模块: from mpx.lib.persistent import PersistentDataObject [as 别名]
# 或者: from mpx.lib.persistent.PersistentDataObject import save [as 别名]
 def save(self):
     PersistentDataObject.save(self)
     msglog.log("interfaces....BBMD", msglog.types.INFO, "Saved BBMD table to Persistent Storage")
开发者ID:,项目名称:,代码行数:5,代码来源:

示例15: Control

# 需要导入模块: from mpx.lib.persistent import PersistentDataObject [as 别名]
# 或者: from mpx.lib.persistent.PersistentDataObject import save [as 别名]
class Control(CompositeNode):
    ##
    # This attribute is used in the introspective generation
    # of configuration data.
    __module__ = mpx.service.control.__name__
    
    def __init__(self):
        CompositeNode.__init__(self)
        self._status = 'initialized'
        self._stale_apps = []
    def configure(self, config):
        self._pdo = PersistentDataObject(self)
        self._pdo.stats_dict = {} #used to detect changes to xml files based on timestamp. Tuple (modify time, pickle string)
        self._pdo.load()
        # write_priority can be set for the entire control service, though
        # it may be specialized at the individual application node level. 
        set_attribute(self, 'write_priority', 9, config, int)
        CompositeNode.configure(self, config)
    def configuration(self):
        config = CompositeNode.configuration(self)
        self.pdo_file = self._pdo.filename()
        get_attribute(self, 'write_priority', config)
        get_attribute(self, 'pdo_file', config, str)
        return config
    def _save_pdo(self): #no locking needed since the load and save cannot overlap
        start_time = time.time()
        self._pdo.save()
        msglog.log(self.as_node_url(),msglog.types.INFO,
                   'Control service configuration data saved in: %s seconds' % (str(time.time() - start_time),))
    def start(self):
        self._status = 'starting'
        self.stats_dict = {} #clear out stats dict to force reload of app
        self.application_change_detector(1) #starting
    def _start(self):
        CompositeNode.start(self) #get the children ready for a trip...
        #now that the children are started, go back through the list and finish up the "graphical compile"
        for n in self.children_nodes():
            if n.hasattr('map_output_connections'):
                n.map_output_connections()
        for n in self.children_nodes():
            if n.hasattr('map_reference_output_connections'):
                n.map_reference_output_connections()
        for n in self.children_nodes():
            if n.hasattr('resolve_inputs'):
                n.resolve_inputs()
        for n in self.children_nodes():
            if n.hasattr('prepare_run_list'):
                n.prepare_run_list()
        for n in self.children_nodes():
            if n.hasattr('trigger_run_list'):
                n.trigger_run_list()
    def prune_orphaned_schedules(self):
        # remove schedules under /services/time/local/TIM that have no app
        manager = as_node('/services/time/local')
        if manager.has_child('TIM'):
            try:
                sh = as_node('/services/time/local/TIM')
                name_header = 'RZSched_'
                # create list of RZSched_'s under the TIM node
                schedules = filter(lambda k:k[:len(name_header)] == name_header, sh.children_names())
                # compare appname after RZSched_, upto : with our children names
                orphans = filter(lambda k:k.split('_')[1].split(':')[0] not in self.children_names(), schedules)
                for o in orphans:
                    try:
                        sh.get_child(o).prune()
                        msglog.log('Graphical Control:', 'pruned orphaned schedule: ', o)
                    except:
                        msglog.exception()
                if len(orphans):
                    sh.save_schedule()
            except:
                msglog.exception()

    def check_and_load_application_files(self, starting=0):
        app_reloaded = starting #return value to signal that the children need to be started
        save_pdo = 0 #flag to control saving config data to pdo
        files = os.listdir(config_path) #/var/mpx/config/services/control (usually)
        xml_filenames = []
        for f in files:
            if f.find('.xml') > 0 and len(f) == (f.find('.xml') + 4): #any xml file in this folder is assumed to be a control app
                xml_filenames.append(f)
                modify_time = os.stat(config_path + f)[8]
                stale_pdo = True
                no_stats_pdo = True
                if f in self._pdo.stats_dict: #check for change since last time
                    no_stats_pdo = False
                    if self._pdo.stats_dict[f][0] == modify_time:
                        stale_pdo = False #old news, no change detected
                #self.stats_dict[f]=modify_time
                if starting or no_stats_pdo or (stale_pdo and ALLOW_APP_RELOAD): #need to (re)load application
                    if app_reloaded == 0: #only stop all application nodes for the first detected change
                        try:
                            self._status = 'Stopping %s' % (f,)
                            msglog.log(self.as_node_url(),msglog.types.INFO,
                                'Stage 0:  Stop Application templates.')
                            for c in self.children_nodes():
                                if hasattr(c, '_stop_running_app'):
                                    c._stop_running_app()
                        except:
                            msglog.exception()
#.........这里部分代码省略.........
开发者ID:mcruse,项目名称:monotone,代码行数:103,代码来源:control.py


注:本文中的mpx.lib.persistent.PersistentDataObject.save方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。