当前位置: 首页>>代码示例>>Python>>正文


Python logger.debug函数代码示例

本文整理汇总了Python中shinken.log.logger.debug函数的典型用法代码示例。如果您正苦于以下问题:Python debug函数的具体用法?Python debug怎么用?Python debug使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了debug函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_page

def get_page():
    commands_list = []

    try:
        # Getting lists of informations for the commands
        time_stamp_list = []
        host_name_list = []
        service_description_list = []
        return_code_list = []
        output_list = []
        time_stamp_list = request.forms.getall(key="time_stamp")
        logger.debug("[WS_Arbiter] time_stamp_list: %s" % (time_stamp_list))
        host_name_list = request.forms.getall(key="host_name")
        logger.debug("[WS_Arbiter] host_name_list: %s" % (host_name_list))
        service_description_list = request.forms.getall(key="service_description")
        logger.debug("[WS_Arbiter] service_description_list: %s" % (service_description_list))
        return_code_list = request.forms.getall(key="return_code")
        logger.debug("[WS_Arbiter] return_code_list: %s" % (return_code_list))
        output_list = request.forms.getall(key="output")
        logger.debug("[WS_Arbiter] output_list: %s" % (output_list))
        commands_list = get_commands(
            time_stamp_list, host_name_list, service_description_list, return_code_list, output_list
        )
    except Exception, e:
        logger.error("[WS_Arbiter] failed to get the lists: %s" % str(e))
        commands_list = []
开发者ID:axadil,项目名称:mod-ws-arbiter,代码行数:26,代码来源:module.py

示例2: set_value

def set_value(obj_ref, output=None, perfdata=None, return_code=None):
    obj = get_object(obj_ref)
    if not obj:
        return
    output = output or obj.output
    perfdata = perfdata or obj.perf_data
    if return_code is None:
      return_code = obj.state_id

    logger.debug("[trigger] Setting %s %s %s for object %s" % (output, perfdata, return_code, obj.get_full_name()))

    if perfdata:
        output = output + ' | ' + perfdata

    now = time.time()
    cls = obj.__class__
    i = obj.launch_check(now, force=True)
    for chk in obj.checks_in_progress:
        if chk.id == i:
            logger.debug("[trigger] I found the check I want to change")
            c = chk
            # Now we 'transform the check into a result'
            # So exit_status, output and status is eaten by the host
            c.exit_status = return_code
            c.get_outputs(output, obj.max_plugins_output_length)
            c.status = 'waitconsume'
            c.check_time = now
            # IMPORTANT: tag this check as from a trigger, so we will not
            # loop in an infinite way for triggers checks!
            c.from_trigger = True
开发者ID:JamesYuan,项目名称:shinken,代码行数:30,代码来源:trigger_functions.py

示例3: get_live_data_log

    def get_live_data_log(self):
        """Like get_live_data, but for log objects"""
        # finalize the filter stacks
        self.mongo_time_filter_stack.and_elements(self.mongo_time_filter_stack.qsize())
        self.mongo_filter_stack.and_elements(self.mongo_filter_stack.qsize())
        if self.use_aggressive_sql:
            # Be aggressive, get preselected data from sqlite and do less
            # filtering in python. But: only a subset of Filter:-attributes
            # can be mapped to columns in the logs-table, for the others
            # we must use "always-true"-clauses. This can result in
            # funny and potentially ineffective sql-statements
            mongo_filter_func = self.mongo_filter_stack.get_stack()
        else:
            # Be conservative, get everything from the database between
            # two dates and apply the Filter:-clauses in python
            mongo_filter_func = self.mongo_time_filter_stack.get_stack()
        dbresult = []
        mongo_filter = mongo_filter_func()
        logger.debug("[Logstore MongoDB] Mongo filter is %s" % str(mongo_filter))
        # We can apply the filterstack here as well. we have columns and filtercolumns.
        # the only additional step is to enrich log lines with host/service-attributes
        # A timerange can be useful for a faster preselection of lines

        filter_element = eval('{ ' + mongo_filter + ' }')
        logger.debug("[LogstoreMongoDB] Mongo filter is %s" % str(filter_element))
        columns = ['logobject', 'attempt', 'logclass', 'command_name', 'comment', 'contact_name', 'host_name', 'lineno', 'message', 'plugin_output', 'service_description', 'state', 'state_type', 'time', 'type']
        if not self.is_connected == CONNECTED:
            logger.warning("[LogStoreMongoDB] sorry, not connected")
        else:
            dbresult = [Logline([(c,) for c in columns], [x[col] for col in columns]) for x in self.db[self.collection].find(filter_element).sort([(u'time', pymongo.ASCENDING), (u'lineno', pymongo.ASCENDING)])]
        return dbresult
开发者ID:Caez83,项目名称:mod-logstore-mongodb,代码行数:31,代码来源:module.py

示例4: show_minemap

def show_minemap():
    user = app.request.environ['USER']

    # Apply search filter if exists ...
    search = app.request.query.get('search', "type:host")
    if not "type:host" in search:
        search = "type:host "+search
    logger.debug("[WebUI-worldmap] search parameters '%s'", search)
    items = app.datamgr.search_hosts_and_services(search, user, get_impacts=False)
    
    # Fetch elements per page preference for user, default is 25
    elts_per_page = app.prefs_module.get_ui_user_preference(user, 'elts_per_page', 25)

    # We want to limit the number of elements
    step = int(app.request.GET.get('step', elts_per_page))
    start = int(app.request.GET.get('start', '0'))
    end = int(app.request.GET.get('end', start + step))
        
    # If we overflow, came back as normal
    total = len(items)
    if start > total:
        start = 0
        end = step

    navi = app.helper.get_navi(total, start, step=step)

    return {'navi': navi, 'search_string': search, 'items': items[start:end], 'page': "minemap"}
开发者ID:vizvayu,项目名称:mod-webui,代码行数:27,代码来源:minemap.py

示例5: manage_log_brok

    def manage_log_brok(self, brok):
        """
        Parse a Shinken log brok to enqueue a log line for Index insertion
        """
        d = date.today()
        index_name = self.index_prefix + "-" + d.strftime("%Y.%m.%d")

        line = brok.data["log"]
        if re.match("^\[[0-9]*\] [A-Z][a-z]*.:", line):
            # Match log which NOT have to be stored
            logger.warning("[elastic-logs] do not store: %s", line)
            return

        logline = Logline(line=line)
        logline_dict = logline.as_dict()
        logline_dict.update({"@timestamp": datetime.utcfromtimestamp(int(logline_dict["time"])).isoformat() + "Z"})
        values = {"_index": index_name, "_type": "shinken-logs", "_source": logline_dict}

        # values = logline.as_dict()
        if logline.logclass != LOGCLASS_INVALID:
            logger.debug("[elastic-logs] store log line values: %s", values)
            self.logs_cache.append(values)
        else:
            logger.info("[elastic-logs] This line is invalid: %s", line)

        return
开发者ID:descrepes,项目名称:mod-elastic-logs,代码行数:26,代码来源:module.py

示例6: get_graph_uris

    def get_graph_uris(self, elt, graphstart=None, graphend=None, duration=None, source='detail'):
        ''' Aggregate the get_graph_uris of all the submodules. 
            The source parameter defines the source of the calling: 
            Are we displaying graphs for the element detail page (detail), 
            or a widget in the dashboard (dashboard) ?
            
            If duration is not None, we consider it as a number of seconds to graph and 
            we call the module get_relative_graphs_uri
            
            If get_relative_graphs_uri is not a module function we compute graphstart and 
            graphend and we call we call the module get_graphs_uri
            
            If graphstart and graphend are not None, we call the module get_graphs_uri
        '''
        uris = []
        for mod in self.modules:
            if not duration:
                uris.extend(mod.get_graph_uris(elt, graphstart, graphend, source))
            else:
                f = getattr(mod, 'get_relative_graph_uris', None)
                if f and callable(f):
                    uris.extend(f(elt, duration, source))
                else:
                    graphend = time.time()
                    graphstart = graphend - duration
                    uris.extend(mod.get_graph_uris(elt, graphstart, graphend, source))
                
            logger.debug("[WebUI] Got graphs: %s", uris)

        for uri in uris:
            uri['img_src'] = '/graph?url=' + urllib.quote(uri['img_src'])

        return uris
开发者ID:Azef1,项目名称:mod-webui,代码行数:33,代码来源:graphs.py

示例7: do_pynag_con_init

    def do_pynag_con_init(self, id, type='scheduler'):
        # Get the good links tab for looping..
        links = self.get_links_from_type(type)
        if links is None:
            logger.debug('Type unknown for connection! %s', type)
            return

        if type == 'scheduler':
            # If sched is not active, I do not try to init
            # it is just useless
            is_active = links[id]['active']
            if not is_active:
                return

        # If we try to connect too much, we slow down our tests
        if self.is_connection_try_too_close(links[id]):
            return

        # Ok, we can now update it
        links[id]['last_connection'] = time.time()

        # DBG: print "Init connection with", links[id]['uri']
        running_id = links[id]['running_id']
        # DBG: print "Running id before connection", running_id
        uri = links[id]['uri']
        try:
            con = links[id]['con'] = HTTPClient(uri=uri, strong_ssl=links[id]['hard_ssl_name_check'])
        except HTTPExceptions, exp:
            # But the multiprocessing module is not compatible with it!
            # so we must disable it immediately after
            logger.info("Connection problem to the %s %s: %s", type, links[id]['name'], str(exp))
            links[id]['con'] = None
            return
开发者ID:h4wkmoon,项目名称:shinken,代码行数:33,代码来源:brokerdaemon.py

示例8: get_instance

def get_instance(plugin):
    logger.debug("[MySQLImport]: Get MySQL importer instance for plugin %s" % plugin.get_name())
    if not MySQLdb:
        raise Exception('Missing module python-mysqldb. Please install it.')
    host = plugin.host
    login = plugin.login
    password = plugin.password
    database = plugin.database
    reqlist = {}
    reqlist['hosts'] = getattr(plugin, 'reqhosts', None)
    reqlist['commands'] = getattr(plugin, 'reqcommands', None)
    reqlist['timeperiods'] = getattr(plugin, 'reqtimeperiods', None)
    reqlist['notificationways'] = getattr(plugin, 'reqnotificationways', None)
    reqlist['services'] = getattr(plugin, 'reqservices', None)
    reqlist['servicegroups'] = getattr(plugin, 'reqservicegroups', None)
    reqlist['contacts'] = getattr(plugin, 'reqcontacts', None)
    reqlist['contactgroups'] = getattr(plugin, 'reqcontactgroups', None)
    reqlist['hostgroups'] = getattr(plugin, 'reqhostgroups', None)
    reqlist['hostdependencies'] = getattr(plugin, 'reqhostdependencies', None)
    reqlist['servicedependencies'] = getattr(plugin, 'reqservicedependencies', None)
    reqlist['realms'] = getattr(plugin, 'reqrealms', None)
    reqlist['schedulers'] = getattr(plugin, 'reqschedulers', None)
    reqlist['pollers'] = getattr(plugin, 'reqpollers', None)
    reqlist['brokers'] = getattr(plugin, 'reqbrokers', None)
    reqlist['reactionners'] = getattr(plugin, 'reqreactionners', None)
    reqlist['receivers'] = getattr(plugin, 'reqreceivers', None)

    instance = MySQL_importer_arbiter(plugin, host, login, password, database, reqlist)
    return instance
开发者ID:dgilm,项目名称:mod-import-mysql,代码行数:29,代码来源:module.py

示例9: manage_unknown_service_check_result_brok

    def manage_unknown_service_check_result_brok(self, b):
        data = b.data

        tags = {
            "host_name": data['host_name'],
            "service_description": data['service_description']
        }

        post_data = []

        post_data.extend(
            self.get_check_result_perfdata_points(
                b.data['perf_data'],
                b.data['time_stamp'],
                tags=tags
            )
        )

        try:
            logger.debug(
                "[influxdb broker] Generated points: %s" % str(post_data))
        except UnicodeEncodeError:
            pass

        self.extend_buffer(post_data)
开发者ID:cyberflow,项目名称:mod-influxdb,代码行数:25,代码来源:module.py

示例10: process_check_result

    def process_check_result(self, databuffer, IV):
        # 208 is the size of fixed received data ... NSCA packets are 208+512 (720) or 208+4096 (4304)
        if not databuffer:
            logger.warning("[NSCA] Received an empty NSCA packet")
            return

        logger.debug("[NSCA] Received NSCA packet: %s", binascii.hexlify(databuffer))

        payload_length = len(databuffer) - 208
        if payload_length != 512 and payload_length != 4096:
            logger.warning("[NSCA] Received packet with unusual payload length: %d.", payload_length)
            
        if self.payload_length != -1 and payload_length != self.payload_length:
            logger.warning("[NSCA] Dropping packet with incorrect payload length.")
            return
            
        (timestamp, rc, hostname, service, output) = self.read_check_result(databuffer, IV, payload_length)
        current_time = time.time()
        check_result_age = current_time - timestamp
        if timestamp > current_time and self.check_future_packet:
            logger.warning("[NSCA] Dropping packet with future timestamp.")
        elif check_result_age > self.max_packet_age:
            logger.info(
                "[NSCA] Dropping packet with stale timestamp - packet was %s seconds old. Timestamp: %s for %s/%s" % \
                (check_result_age, timestamp, hostname, service))
        else:
            self.post_command(timestamp, rc, hostname, service, output)
开发者ID:shinken-monitoring,项目名称:mod-nsca,代码行数:27,代码来源:module.py

示例11: get_commands

def get_commands(time_stamps, hosts, services, return_codes, outputs):
    """Composing a command list based on the information received in
    POST request"""

    commands = []

    current_time_stamp = int(time.time())

    def _compose_command(t, h, s, r, o):
        """Simple function to create a command from the inputs"""
        cmd = ""
        if not s or s == "":
            cmd = '[%s] PROCESS_HOST_CHECK_RESULT;%s;%s;%s' % (t if t is not None else current_time_stamp, h, r, o)
        else:
            cmd = '[%s] PROCESS_SERVICE_CHECK_RESULT;%s;%s;%s;%s' % (t if t is not None else current_time_stamp, h, s, r, o)
        logger.debug("[Ws_arbiter] CMD: %s" % (cmd))
        commands.append(cmd)

    # Trivial case: empty commmand list
    if (return_codes is None or len(return_codes) == 0):
        return commands

    # Sanity check: if we get N return codes, we must have N hosts.
    # The other values could be None
    if (len(return_codes) != len(hosts)):
        logger.error("[Ws_arbiter] number of return codes (%d) does not match number of hosts (%d)" % (len(return_codes), len(hosts)))
        abort(400, "number of return codes does not match number of hosts")

    map(_compose_command, time_stamps, hosts, services, return_codes, outputs)
    logger.debug("[Ws_arbiter] commands = %s" % (str(commands)))
    return commands
开发者ID:David-,项目名称:shinken,代码行数:31,代码来源:module.py

示例12: hook_tick

    def hook_tick(self, brok):
        """Each second the broker calls the hook_tick function
        Every tick try to flush the buffer
        """

        if self.buffer == []:
            return

        # Todo : why we need this?
        if self.ticks >= self.tick_limit:
            # If the number of ticks where data was not
            # sent successfully to the raw socket reaches the buffer limit.
            # Reset the buffer and reset the ticks
            self.buffer = []
            self.ticks = 0
            return

        # Real memory size
        if sum(x.__sizeof__() for x in self.buffer) > self.max_buffer_size:
            logger.debug("[RawSocket broker] Buffer size exceeded. I delete %d lines"
                         % self.lines_deleted)
            self.buffer = self.buffer[self.lines_deleted:]

        self.ticks += 1

        try:
            self.con.sendall('\n'.join(self.buffer).encode('UTF-8') + '\n')
        except IOError, err:
            logger.error("[RawSocket broker] Failed sending to the Raw network socket! IOError:%s"
                         % str(err))
            self.init()
            return
开发者ID:savoirfairelinux,项目名称:mod-rawsocket,代码行数:32,代码来源:module.py

示例13: set_ui_user_preference

    def set_ui_user_preference(self, user, key, value):
        if not self.is_connected:
            if not self.open():
                logger.error("[WebUI-MongoDBPreferences] error during initialization, no database connection!")
                return None

        if not user:
            logger.warning("[WebUI-MongoDBPreferences] error set_ui_user_preference, no user!")
            return None

        try:
            # check a collection exist for this user
            u = self.db.ui_user_preferences.find_one({'_id': user.get_name()})
            if not u:
                # no collection for this user? create a new one
                self.db.ui_user_preferences.save({'_id': user.get_name(), key: value})

            r = self.db.ui_user_preferences.update({'_id': user.get_name()}, {'$set': {key: value}})
            # Maybe there was no doc there, if so, create an empty one
            if not r:
                # Maybe the user exist, if so, get the whole user entry
                u = self.db.ui_user_preferences.find_one({'_id': user.get_name()})
                if not u:
                    logger.debug ("[WebUI-MongoDBPreferences] No user entry for %s, I create a new one", user.get_name())
                    self.db.ui_user_preferences.save({'_id': user.get_name(), key: value})
                else:  # ok, it was just the key that was missing, just update it and save it
                    u[key] = value
                    logger.debug ("[WebUI-MongoDBPreferences] Just saving the new key in the user pref")
                    self.db.ui_user_preferences.save(u)
        except Exception, e:
            logger.warning("[WebUI-MongoDBPreferences] Exception: %s", str(e))
            self.is_connected = False
            return None
开发者ID:Azef1,项目名称:mod-webui,代码行数:33,代码来源:prefs.py

示例14: do_recheck

def do_recheck():
    # Getting lists of informations for the commands
    time_stamp          = request.forms.get('time_stamp', int(time.time()))
    host_name           = request.forms.get('host_name', '')
    service_description = request.forms.get('service_description', '')
    logger.debug("[WS_Arbiter] Timestamp '%s' - host: '%s', service: '%s'" % (time_stamp,
                                                                              host_name,
                                                                              service_description
                                                                             )
                )

    if not host_name:
        abort(400, 'Missing parameter host_name')

    if service_description:
        # SCHEDULE_FORCED_SVC_CHECK;<host_name>;<service_description>;<check_time>
        command = '[%s] SCHEDULE_FORCED_SVC_CHECK;%s;%s;%s\n' % (time_stamp,
                                                                 host_name,
                                                                 service_description,
                                                                 time_stamp)
    else:
        # SCHEDULE_FORCED_HOST_CHECK;<host_name>;<check_time>
        command = '[%s] SCHEDULE_FORCED_HOST_CHECK;%s;%s\n' % (time_stamp,
                                                               host_name,
                                                               time_stamp)

    # We check for auth if it's not anonymously allowed
    check_auth()

    # Adding commands to the main queue()
    logger.debug("[WS_Arbiter] command =  %s" % command)
    ext = ExternalCommand(command)
    app.from_q.put(ext)
开发者ID:geektophe,项目名称:mod-ws-arbiter,代码行数:33,代码来源:module.py

示例15: linkify_hg_by_realms

    def linkify_hg_by_realms(self, realms):
        # Now we explode the realm value if we've got one
        # The group realm must not override a host one (warning?)
        for hg in self:
            if not hasattr(hg, 'realm'):
                continue

            # Maybe the value is void?
            if not hg.realm.strip():
                continue

            r = realms.find_by_name(hg.realm.strip())
            if r is not None:
                hg.realm = r
                logger.debug("[hostgroups] %s is in %s realm", hg.get_name(), r.get_name())
            else:
                err = "the hostgroup %s got an unknown realm '%s'" % (hg.get_name(), hg.realm)
                hg.configuration_errors.append(err)
                hg.realm = None
                continue

            for h in hg:
                if h is None:
                    continue
                if h.realm is None or h.got_default_realm:  # default value not hasattr(h, 'realm'):
                    logger.debug("[hostgroups] apply a realm %s to host %s from a hostgroup rule (%s)",  \
                        hg.realm.get_name(), h.get_name(), hg.get_name())
                    h.realm = hg.realm
                else:
                    if h.realm != hg.realm:
                        logger.warning("[hostgroups] host %s it not in the same realm than it's hostgroup %s",  \
                            h.get_name(), hg.get_name())
开发者ID:G2fx,项目名称:shinken,代码行数:32,代码来源:hostgroup.py


注:本文中的shinken.log.logger.debug函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。