当前位置: 首页>>代码示例>>Python>>正文


Python logger.log函数代码示例

本文整理汇总了Python中shinken.log.logger.log函数的典型用法代码示例。如果您正苦于以下问题:Python log函数的具体用法?Python log怎么用?Python log使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了log函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: add

 def add(self, elt):
     cls_type = elt.__class__.my_type
     if cls_type == "brok":
         # For brok, we TAG brok with our instance_id
         elt.data["instance_id"] = 0
         self.broks_internal_raised.append(elt)
         return
     elif cls_type == "externalcommand":
         print "Adding in queue an external command", ExternalCommand.__dict__
         self.external_commands.append(elt)
     # Maybe we got a Message from the modules, it's way to ask something
     # like from now a full data from a scheduler for example.
     elif cls_type == "message":
         # We got a message, great!
         print elt.__dict__
         if elt.get_type() == "NeedData":
             data = elt.get_data()
             # Full instance id mean : I got no data for this scheduler
             # so give me all dumbass!
             if "full_instance_id" in data:
                 c_id = data["full_instance_id"]
                 logger.log("A module is asking me to get all initial data from the scheduler %d" % c_id)
                 # so we just reset the connection adn the running_id, it will just get all new things
                 try:
                     self.schedulers[c_id]["con"] = None
                     self.schedulers[c_id]["running_id"] = 0
                 except KeyError:  # maybe this instance was not known, forget it
                     print "WARNING: a module ask me a full_instance_id for an unknown ID!", c_id
开发者ID:wAmpIre,项目名称:shinken,代码行数:28,代码来源:brokerdaemon.py

示例2: load

    def load(self):
        now = int(time.time())
        """ Try to import the requested modules ; put the imported modules in self.imported_modules.
The previous imported modules, if any, are cleaned before. """ 
        # We get all modules file with .py
        modules_files = [ fname[:-3] for fname in os.listdir(self.modules_path) 
                         if fname.endswith(".py") ]

        # And directories
        modules_files.extend([ fname for fname in os.listdir(self.modules_path)
                               if os.path.isdir(os.path.join(self.modules_path, fname)) ])

        # Now we try to load thems
        # So first we add their dir into the sys.path
        if not self.modules_path in sys.path:
            sys.path.append(self.modules_path)

        # We try to import them, but we keep only the one of
        # our type
        del self.imported_modules[:]
        for fname in modules_files:
            #print "Try to load", fname
            try:
                m = __import__(fname)
                if not hasattr(m, 'properties'):
                    continue

                # We want to keep only the modules of our type
                if self.modules_type in m.properties['daemons']:
                    self.imported_modules.append(m)
            except Exception , exp:
                logger.log("Warning in importing module : %s" % exp)
开发者ID:mleinart,项目名称:shinken,代码行数:32,代码来源:modulesmanager.py

示例3: is_me

 def is_me(self):
     logger.log(
         "And arbiter is launched with the hostname:%s from an arbiter point of view of addr :%s"
         % (self.host_name, socket.getfqdn()),
         print_it=False,
     )
     return self.host_name == socket.getfqdn() or self.host_name == socket.gethostname()
开发者ID:jfbutkiewicz,项目名称:Windows-Setup-for-Shinken,代码行数:7,代码来源:arbiterlink.py

示例4: compensate_system_time_change

    def compensate_system_time_change(self, difference):
        """ Compensate a system time change of difference for all hosts/services/checks/notifs """
        logger.log('Warning: A system time change of %d has been detected.  Compensating...' % difference)
        # We only need to change some value
        self.program_start = max(0, self.program_start + difference)

        # Then we compasate all host/services
        for h in self.sched.hosts:
            h.compensate_system_time_change(difference)
        for s in self.sched.services:
            s.compensate_system_time_change(difference)

        # Now all checks and actions
        for c in self.sched.checks.values():
            # Already launch checks should not be touch
            if c.status == 'scheduled':
                t_to_go = c.t_to_go
                ref = c.ref
                new_t = max(0, t_to_go + difference)
                # But it's no so simple, we must match the timeperiod
                new_t = ref.check_period.get_next_valid_time_from_t(new_t)
                # But maybe no there is no more new value! Not good :(
                # Say as error, with error output
                if new_t is None:
                    c.state = 'waitconsume'
                    c.exit_status = 2
                    c.output = '(Error: there is no available check time after time change!)'
                    c.check_time = time.time()
                    c.execution_time = 0
                else:
                    c.t_to_go = new_t
                    ref.next_chk = new_t

        # Now all checks and actions
        for c in self.sched.actions.values():
            # Already launch checks should not be touch
            if c.status == 'scheduled':
                t_to_go = c.t_to_go

                #  Event handler do not have ref
                ref = getattr(c, 'ref', None)
                new_t = max(0, t_to_go + difference)

                # Notification should be check with notification_period
                if c.is_a == 'notification':
                    # But it's no so simple, we must match the timeperiod
                    new_t = ref.notification_period.get_next_valid_time_from_t(new_t)
                    # And got a creation_time variable too
                    c.creation_time = c.creation_time + difference

                # But maybe no there is no more new value! Not good :(
                # Say as error, with error output
                if new_t is None:
                    c.state = 'waitconsume'
                    c.exit_status = 2
                    c.output = '(Error: there is no available check time after time change!)'
                    c.check_time = time.time()
                    c.execution_time = 0
                else:
                    c.t_to_go = new_t
开发者ID:zoranzaric,项目名称:shinken,代码行数:60,代码来源:schedulerdaemon.py

示例5: setup_new_conf

    def setup_new_conf(self):
        conf = self.new_conf
        self.new_conf = None
        self.cur_conf = conf
        # Got our name from the globals
        if 'receiver_name' in conf['global']:
            name = conf['global']['receiver_name']
        else:
            name = 'Unnamed receiver'
        self.name = name
        self.log.load_obj(self, name)

        print "[%s] Sending us configuration %s" % (self.name, conf)

        if not self.have_modules:
            self.modules = mods = conf['global']['modules']
            self.have_modules = True
            logger.log("[%s] We received modules %s " % (self.name,  mods))

        # Set our giving timezone from arbiter
        use_timezone = conf['global']['use_timezone']
        if use_timezone != 'NOTSET':
            logger.log("[%s] Setting our timezone to" % (self.name, use_timezone))
            os.environ['TZ'] = use_timezone
            time.tzset()
开发者ID:pjjw,项目名称:shinken,代码行数:25,代码来源:receiverdaemon.py

示例6: get_scheduler_ordered_list

    def get_scheduler_ordered_list(self, r):
        # get scheds, alive and no spare first
        scheds =  []
        for s in r.schedulers:
            scheds.append(s)

        # now the spare scheds of higher realms
        # they are after the sched of realm, so
        # they will be used after the spare of
        # the realm
        for higher_r in r.higher_realms:
            for s in higher_r.schedulers:
                if s.spare:
                    scheds.append(s)

        # Now we sort the scheds so we take master, then spare
        # the dead, but we do not care about thems
        scheds.sort(alive_then_spare_then_deads)
        scheds.reverse() #pop is last, I need first

        #DBG: dump
        print_sched = [s.get_name() for s in scheds]
        print_sched.reverse()
        print_string = '[%s] Schedulers order : ' % r.get_name()
        for s in print_sched:
            print_string += '%s ' % s
        logger.log(print_string)
        #END DBG

        return scheds
开发者ID:wAmpIre,项目名称:shinken,代码行数:30,代码来源:dispatcher.py

示例7: hook_save_retention

    def hook_save_retention(self, daemon):
        log_mgr = logger
        logger.log("[PickleRetentionGeneric] asking me to update the retention objects")

        #Now the flat file method
        try:
            # Open a file near the path, with .tmp extension
            # so in cae or problem, we do not lost the old one
            f = open(self.path+'.tmp', 'wb')
            
            # We get interesting retention data from the daemon it self
            all_data = daemon.get_retention_data()
            
            # And we save it on file :)

            #s = cPickle.dumps(all_data)
            #s_compress = zlib.compress(s)
            cPickle.dump(all_data, f, protocol=cPickle.HIGHEST_PROTOCOL)
            #f.write(s_compress)
            f.close()
            
            # Now move the .tmp fiel to the real path
            shutil.move(self.path+'.tmp', self.path)
        except IOError , exp:
            log_mgr.log("Error: retention file creation failed, %s" % str(exp))
            return
开发者ID:zoranzaric,项目名称:shinken,代码行数:26,代码来源:pickle_retention_file_generic.py

示例8: search

def search(look_at):
    # Now really publish it
    proxy = CONFIG['shinken.io']['proxy']
    api_key = CONFIG['shinken.io']['api_key']
    
    # Ok we will push the file with a 10s timeout
    c = pycurl.Curl()
    c.setopt(c.POST, 0)
    c.setopt(c.CONNECTTIMEOUT, 10)
    c.setopt(c.TIMEOUT, 10)
    if proxy:
        c.setopt(c.PROXY, proxy)

    args = {'keywords':','.join(look_at)}
    c.setopt(c.URL, str('shinken.io/searchcli?'+urllib.urlencode(args)))
    response = StringIO()
    c.setopt(pycurl.WRITEFUNCTION, response.write)
    #c.setopt(c.VERBOSE, 1)
    c.perform()
    r = c.getinfo(pycurl.HTTP_CODE)
    c.close()
    if r != 200:
        logger.error("There was a critical error : %s" % response.getvalue())
        sys.exit(2)
    else:    
        ret  = json.loads(response.getvalue().replace('\\/', '/'))
        status = ret.get('status')
        result   = ret.get('result')
        if status != 200:
            logger.log(result)
            return []
        return result
开发者ID:kjellski,项目名称:shinken,代码行数:32,代码来源:cli.py

示例9: main

 def main(self):
     self.set_signal_handler()
     logger.log("[%s[%d]]: Now running.." % (self.name, os.getpid()))
     while not self.interrupted:
         self.do_loop_turn()
     self.do_stop()
     logger.log("[%s]: exiting now.." % (self.name))
开发者ID:bs-github,项目名称:shinken,代码行数:7,代码来源:basemodule.py

示例10: publish_archive

def publish_archive(archive):
    # Now really publish it
    proxy = CONFIG['shinken.io']['proxy']
    api_key = CONFIG['shinken.io']['api_key']
    
    # Ok we will push the file with a 10s timeout
    c = pycurl.Curl()
    c.setopt(c.POST, 1)
    c.setopt(c.CONNECTTIMEOUT, 10)
    c.setopt(c.TIMEOUT, 10)
    if proxy:
        c.setopt(c.PROXY, proxy)
    c.setopt(c.URL, "http://shinken.io/push")
    c.setopt(c.HTTPPOST, [("api_key", api_key),
                          ("data",
                           (c.FORM_FILE, str(archive),
                            c.FORM_CONTENTTYPE, "application/x-gzip"))
                          ])
    response = StringIO()
    c.setopt(pycurl.WRITEFUNCTION, response.write)
    c.setopt(c.VERBOSE, 1)
    c.perform()
    r = c.getinfo(pycurl.HTTP_CODE)
    c.close()
    if r != 200:
        logger.error("There was a critical error : %s" % response.getvalue())
        sys.exit(2)
    else:    
        ret  = json.loads(response.getvalue().replace('\\/', '/'))
        status = ret.get('status')
        text   = ret.get('text')
        if status == 200:
            logger.log(text)
        else:
            logger.error(text)
开发者ID:kjellski,项目名称:shinken,代码行数:35,代码来源:cli.py

示例11: get_new_broks

    def get_new_broks(self, type='scheduler'):
            # Get the good links tab for looping..
        links = self.get_links_from_type(type)
        if links is None:
            logger.log('DBG: Type unknown for connection! %s' % type)
            return

        # We check for new check in each schedulers and put
        # the result in new_checks
        for sched_id in links:
            try:
                con = links[sched_id]['con']
                if con is not None: # None = not initilized
                    tmp_broks = con.get_broks()
                    for b in tmp_broks.values():
                        b.instance_id = links[sched_id]['instance_id']

                    # Ok, we can add theses broks to our queues
                    self.add_broks_to_queue(tmp_broks.values())

                else: # no con? make the connection
                    self.pynag_con_init(sched_id, type=type)
            # Ok, con is not known, so we create it
            except KeyError , exp:
                print exp
                self.pynag_con_init(sched_id, type=type)
            except Pyro.errors.ProtocolError , exp:
                logger.log("[%s] Connection problem to the %s %s : %s" % (self.name, type, links[sched_id]['name'], str(exp)))
                links[sched_id]['con'] = None
开发者ID:bs-github,项目名称:shinken,代码行数:29,代码来源:brokerdaemon.py

示例12: init

    def init(self):
        logger.log("I connect to NDO database")
        self.db = DBMysql(self.host, self.user, self.password, self.database, 
                          self.character_set, table_prefix='nagios_', port=self.port)
        self.connect_database()

        # Cache for hosts and services
        # The structure is as follow:
        # First the instance id then the host / (host,service desc) to access the wanted data
        self.services_cache_sync = {}
        self.hosts_cache_sync = {}

        # We need to search for centreon_specific fields, like long_output
        query = u"select TABLE_NAME from information_schema.columns where TABLE_SCHEMA='ndo' and TABLE_NAME='nagios_servicestatus' and COLUMN_NAME='long_output';"
        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            self.centreon_version = False
        else:
            self.centreon_version = True
            logger.log("[MySQL/NDO] Using the centreon version")

        # Cache for database id
        # In order not to query the database every time
        self.database_id_cache = {}

        # Mapping service_id in Shinken and in database
        # Because can't acces host_name from a service everytime :(
        self.mapping_service_id = {}

        # Todo list to manage brok
        self.todo = []
开发者ID:jfbutkiewicz,项目名称:Windows-Setup-for-Shinken,代码行数:32,代码来源:ndodb_mysql_broker.py

示例13: is_correct

    def is_correct(self):
        state = True #guilty or not? :)
        cls = self.__class__

        #All of the above are checks in the notificationways part
        for prop, entry in cls.properties.items():
            if prop not in _special_properties:
                if not hasattr(self, prop) and entry.required:
                    print self.get_name(), " : I do not have", prop
                    state = False #Bad boy...

        #There is a case where there is no nw : when there is not special_prop defined
        #at all!!
        if self.notificationways == []:
            for p in _special_properties:
                print self.get_name()," : I'm missing the property %s" % p
                state = False

        if hasattr(self, 'contact_name'):
            for c in cls.illegal_object_name_chars:
                if c in self.contact_name:
                    logger.log("%s : My contact_name got the caracter %s that is not allowed." % (self.get_name(), c))
                    state = False
        else:
            if hasattr(self, 'alias'): #take the alias if we miss the contact_name
                self.contact_name = self.alias

        return state
开发者ID:bs-github,项目名称:shinken,代码行数:28,代码来源:contact.py

示例14: no_loop_in_parents

    def no_loop_in_parents(self):
        # Ok, we say "from now, no loop :) "
        r = True

        # Create parent graph
        parents = Graph()

        # With all hosts as nodes
        for h in self:
            if h is not None:
                parents.add_node(h)

        # And now fill edges
        for h in self:
            for p in h.parents:
                if p is not None:
                    parents.add_edge(p, h)

        # Now get the list of all hosts in a loop
        host_in_loops = parents.loop_check()

        # and raise errors about it
        for h in host_in_loops:
            logger.log("Error: The host '%s' is part of a circular parent/child chain!" % h.get_name())
            r = False

        return r
开发者ID:zoranzaric,项目名称:shinken,代码行数:27,代码来源:host.py

示例15: add

 def add(self, elt):
     cls_type = elt.__class__.my_type
     if cls_type == 'brok':
         # For brok, we TAG brok with our instance_id
         elt.data['instance_id'] = 0
         self.broks_internal_raised.append(elt)
         return
     elif cls_type == 'externalcommand':
         print "Adding in queue an external command", ExternalCommand.__dict__
         self.external_commands.append(elt)
     # Maybe we got a Message from the modules, it's way to ask something
     #like from now a full data from a scheduler for example.
     elif cls_type == 'message':
         # We got a message, great!
         print elt.__dict__
         if elt.get_type() == 'NeedData':
             data = elt.get_data()
             # Full instance id mean : I got no data for this scheduler
             # so give me all dumbass!
             if 'full_instance_id' in data:
                 c_id = data['full_instance_id']
                 logger.log('A module is asking me to get all initial data from the scheduler %d' % c_id)
                 # so we just reset the connexion adn the running_id, it will just get all new things
                 self.schedulers[c_id]['con'] = None
                 self.schedulers[c_id]['running_id'] = 0
开发者ID:zoranzaric,项目名称:shinken,代码行数:25,代码来源:brokerdaemon.py


注:本文中的shinken.log.logger.log函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。