当前位置: 首页>>代码示例>>Python>>正文


Python DBMysql.fetchone方法代码示例

本文整理汇总了Python中shinken.db_mysql.DBMysql.fetchone方法的典型用法代码示例。如果您正苦于以下问题:Python DBMysql.fetchone方法的具体用法?Python DBMysql.fetchone怎么用?Python DBMysql.fetchone使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在shinken.db_mysql.DBMysql的用法示例。


在下文中一共展示了DBMysql.fetchone方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: Ndodb_Mysql_broker

# 需要导入模块: from shinken.db_mysql import DBMysql [as 别名]
# 或者: from shinken.db_mysql.DBMysql import fetchone [as 别名]
class Ndodb_Mysql_broker(BaseModule):
    def __init__(self, conf):
        BaseModule.__init__(self, conf)
        # Mapping for name of dataand transform function
        self.mapping = {
            "program_status": {
                "program_start": {"name": "program_start_time", "transform": de_unixify},
                "pid": {"name": "process_id", "transform": None},
                "last_alive": {"name": "status_update_time", "transform": de_unixify},
                "is_running": {"name": "is_currently_running", "transform": None},
            }
        }

        self.host = conf.host
        self.user = conf.user
        self.password = conf.password
        self.database = conf.database
        self.character_set = conf.character_set

    # Called by Broker so we can do init stuff
    # TODO : add conf param to get pass with init
    # Conf from arbiter!
    def init(self):
        print "I connect to NDO database"
        self.db = DBMysql(
            self.host, self.user, self.password, self.database, self.character_set, table_prefix="nagios_"
        )
        self.connect_database()

        # Cache for hosts and services
        # will be flushed when we got a net instance id
        # or something like that
        self.services_cache = {}
        self.hosts_cache = {}

    # Get a brok, parse it, and put in in database
    # We call functions like manage_ TYPEOFBROK _brok that return us queries
    def manage_brok(self, b):
        # We've got problem with instance_id == 0 so we add 1 every where
        if "instance_id" in b.data:
            b.data["instance_id"] = b.data["instance_id"] + 1
        # print "(Ndo) I search manager:", manager
        queries = BaseModule.manage_brok(self, b)
        if queries is not None:
            for q in queries:
                self.db.execute_query(q)
            return
        # print "(ndodb)I don't manage this brok type", b

    # Create the database connection
    # TODO : finish (begin :) ) error catch and conf parameters...
    def connect_database(self):
        self.db.connect_database()

    def get_host_object_id_by_name(self, host_name):
        # First look in cache.
        if host_name in self.hosts_cache:
            return self.hosts_cache[host_name]

        # Not in cache, not good
        query = u"SELECT object_id from nagios_objects where name1='%s' and objecttype_id='1'" % host_name
        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            return 0
        else:
            self.hosts_cache[host_name] = row[0]
            return row[0]

    def get_hostgroup_object_id_by_name(self, hostgroup_name):
        query = u"SELECT object_id from nagios_objects where name1='%s' and objecttype_id='3'" % hostgroup_name
        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            return 0
        else:
            return row[0]

    def get_service_object_id_by_name(self, host_name, service_description):
        # first look in cache
        if (host_name, service_description) in self.services_cache:
            return self.services_cache[(host_name, service_description)]

        # else; not in cache :(
        query = u"SELECT object_id from nagios_objects where name1='%s' and name2='%s' and objecttype_id='2'" % (
            host_name,
            service_description,
        )
        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            return 0
        else:
            self.services_cache[(host_name, service_description)] = row[0]
            return row[0]

    def get_servicegroup_object_id_by_name(self, servicegroup_name):
        query = u"SELECT object_id from nagios_objects where name1='%s' and objecttype_id='4'" % servicegroup_name
        self.db.execute_query(query)
        row = self.db.fetchone()
#.........这里部分代码省略.........
开发者ID:wAmpIre,项目名称:shinken,代码行数:103,代码来源:ndodb_mysql_broker.py

示例2: Ndodb_Mysql_broker

# 需要导入模块: from shinken.db_mysql import DBMysql [as 别名]
# 或者: from shinken.db_mysql.DBMysql import fetchone [as 别名]
class Ndodb_Mysql_broker(BaseModule):
    def __init__(self, conf):
        BaseModule.__init__(self, conf)
        # Mapping for name of data and transform function
        self.mapping = {
            'program_status' : {
                                'program_start' : {'name' : 'program_start_time', 'transform' : de_unixify},
                                'pid' : {'name' : 'process_id', 'transform' : None},
                                'last_alive' : {'name' : 'status_update_time', 'transform' : de_unixify},
                                'is_running' : {'name' : 'is_currently_running', 'transform' : None}
                                },
            }
        
        self.host = conf.host
        self.user = conf.user
        self.password = conf.password
        self.database = conf.database
        self.character_set = conf.character_set
        self.port = int(getattr(conf, 'port', '3306'))
        
        # Centreon ndo add some fields like long_output that are not in the vanilla ndo
        self.centreon_version = False
        self.synchronise_database_id = int(conf.synchronise_database_id)


    # Called by Broker so we can do init stuff
    # TODO : add conf param to get pass with init
    # Conf from arbiter!
    def init(self):
        logger.log("I connect to NDO database")
        self.db = DBMysql(self.host, self.user, self.password, self.database, 
                          self.character_set, table_prefix='nagios_', port=self.port)
        self.connect_database()

        # Cache for hosts and services
        # The structure is as follow:
        # First the instance id then the host / (host,service desc) to access the wanted data
        self.services_cache_sync = {}
        self.hosts_cache_sync = {}

        # We need to search for centreon_specific fields, like long_output
        query = u"select TABLE_NAME from information_schema.columns where TABLE_SCHEMA='ndo' and TABLE_NAME='nagios_servicestatus' and COLUMN_NAME='long_output';"
        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            self.centreon_version = False
        else:
            self.centreon_version = True
            logger.log("[MySQL/NDO] Using the centreon version")

        # Cache for database id
        # In order not to query the database every time
        self.database_id_cache = {}

        # Mapping service_id in Shinken and in database
        # Because can't acces host_name from a service everytime :(
        self.mapping_service_id = {}

        # Todo list to manage brok
        self.todo = []


    # Get a brok, parse it, and put in in database
    # We call functions like manage_ TYPEOFBROK _brok that return us queries
    def manage_brok(self, b):
        # We need to do some brok mod, so we copy it
        new_b = copy.deepcopy(b)
    
        # If we syncronize, must look for id change
        if self.synchronise_database_id != '0' and 'instance_id' in new_b.data:
            # If we use database sync, we have to synchronise database id
            # so we wait for the instance name
            if 'instance_name' not in new_b.data :
                self.todo.append(new_b)
                return  
                  
            # We convert the id to write properly in the base using the 
            # instance_name to reuse the instance_id in the base.
            else:
                new_b.data['instance_id'] = self.convert_id(new_b.data['instance_id'], new_b.data['instance_name'])
                self.todo.append(new_b)
                for brok in self.todo :
                    # We have to put the good instance ID to all brok waiting
                    # in the list then execute the query
                    brok.data['instance_id'] = new_b.data['instance_id']
                    queries = BaseModule.manage_brok(self, brok)
                    if queries is not None:
                        for q in queries :
                            self.db.execute_query(q)
                # We've finished to manage the todo, so we empty it
                self.todo = []
                return

        # Executed if we don't synchronise or there is no instance_id
        queries = BaseModule.manage_brok(self,new_b)
        
        if queries is not None:
            for q in queries :
                self.db.execute_query(q)
            return
#.........这里部分代码省略.........
开发者ID:bs-github,项目名称:shinken,代码行数:103,代码来源:ndodb_mysql_broker.py

示例3: Ndodb_Mysql_broker

# 需要导入模块: from shinken.db_mysql import DBMysql [as 别名]
# 或者: from shinken.db_mysql.DBMysql import fetchone [as 别名]
class Ndodb_Mysql_broker(BaseModule):

    """ This Class is a plugin for the Shinken Broker. It is in charge
    to brok information into the database. For the moment
    only Mysql is supported. This code is __imported__ from Broker.
    The managed_brok function is called by Broker for manage the broks. It calls
    the manage_*_brok functions that create queries, and then run queries.

    """

    def __init__(self, conf):
        BaseModule.__init__(self, conf)
        # Mapping for name of data and transform function
        self.mapping = {
            'program_status': {
                'program_start': {'name': 'program_start_time', 'transform': de_unixify},
                'pid': {'name': 'process_id', 'transform': None},
                'last_alive': {'name': 'status_update_time', 'transform': de_unixify},
                'is_running': {'name': 'is_currently_running', 'transform': None},
                'last_log_rotation': {'name': 'last_log_rotation', 'transform': de_unixify},
                'last_command_check': {'name': 'last_command_check', 'transform': de_unixify}
                },
            }

        self.host = conf.host
        self.user = conf.user
        self.password = conf.password
        self.database = conf.database
        self.character_set = conf.character_set
        self.port = int(getattr(conf, 'port', '3306'))
        self.prefix = getattr(conf, 'prefix', 'nagios_')

        # Centreon ndo add some fields like long_output
        # that are not in the vanilla ndo
        self.centreon_version = False
        self.synchronize_database_id = int(conf.synchronize_database_id)

    # Called by Broker so we can do init stuff
    # TODO: add conf param to get pass with init
    # Conf from arbiter!
    def init(self):
        logger.info("I connect to NDO database")
        self.db = DBMysql(self.host, self.user, self.password, self.database,
                          self.character_set, table_prefix=self.prefix,
                          port=self.port)
        self.connect_database()

        # Cache for hosts and services
        # The structure is as follow:
        # First the instance id then the host / (host,service desc)
        # to access the wanted data
        self.services_cache_sync = {}
        self.hosts_cache_sync = {}

        # We need to search for centreon_specific fields, like long_output
        query = u"select TABLE_NAME from information_schema.columns " \
                "where TABLE_SCHEMA='ndo' and " \
                "TABLE_NAME='%sservicestatus' and " \
                "COLUMN_NAME='long_output';" % self.prefix

        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            self.centreon_version = False
        else:
            self.centreon_version = True
            logger.info("[MySQL/NDO] Using the centreon version")

        # Cache for database id
        # In order not to query the database every time
        self.database_id_cache = {}

        # Mapping service_id in Shinken and in database
        # Because can't acces host_name from a service everytime :(
        self.mapping_service_id = {}

        # Todo list to manage brok
        self.todo = {}

    # Get a brok, parse it, and put in in database
    # We call functions like manage_ TYPEOFBROK _brok that return us queries
    def manage_brok(self, b):
        # We need to do some brok mod, so we copy it
        new_b = copy.deepcopy(b)

        # If we synchronize, must look for id change
        if self.synchronize_database_id != 0 and 'instance_id' in new_b.data:
            # If we use database sync, we have to synchronize database id
            # so we wait for the instance name
            brok_id = new_b.data['instance_id']
            converted_instance_id = self.convert_id(brok_id)
            if converted_instance_id is not None:
                new_b.data['instance_id'] = converted_instance_id
                queries = BaseModule.manage_brok(self, new_b)
                if queries is not None:
                    for q in queries:
                        self.db.execute_query(q)

            if converted_instance_id is None:
                if brok_id in self.todo:
#.........这里部分代码省略.........
开发者ID:sckevmit,项目名称:shinken,代码行数:103,代码来源:ndodb_mysql_broker.py

示例4: Glpidb_broker

# 需要导入模块: from shinken.db_mysql import DBMysql [as 别名]
# 或者: from shinken.db_mysql.DBMysql import fetchone [as 别名]

#.........这里部分代码省略.........
                table = 'glpi_plugin_monitoring_servicescatalogs'
            query = self.db_backend.create_update_query(table, data, where_clause)
            try:
                self.db_backend.execute_query(query)
            except Exception as exp:
                logger.error("[glpidb] error '%s' when executing query: %s", exp, query)

        # Update acknowledge table if service becomes OK
        #if self.update_acknowledges and b.data['state_id'] == 0 and b.data['last_state_id'] != 0:
        # Update acknowledge table if service is OK
        if self.update_acknowledges and b.data['state_id'] == 0:
            data = {}
            data['end_time'] = datetime.datetime.fromtimestamp( int(b.data['last_chk']) ).strftime('%Y-%m-%d %H:%M:%S')
            data['expired'] = '1'

            where_clause = {'items_id': service_cache['items_id'], 'itemtype': "PluginMonitoringService"}
            query = self.db_backend.create_update_query('glpi_plugin_monitoring_acknowledges', data, where_clause)
            logger.debug("[glpidb] acknowledge query: %s", query)
            try:
                self.db_backend.execute_query(query)
            except Exception as exp:
                logger.error("[glpidb] error '%s' when executing query: %s", exp, query)

    ## Update Shinken all hosts/services state
    def record_shinken_state(self, hostname, service, b):
        # Insert/update in shinken state table
        logger.debug("[glpidb] record shinken state: %s/%s: %s", hostname, service, b.data)

        # Test if record still exists
        exists = None
        query = "SELECT COUNT(*) AS nbRecords FROM `glpi_plugin_monitoring_shinkenstates` WHERE hostname='%s' AND service='%s';" % (hostname, service)
        try:
            self.db_backend.execute_query(query)
            res = self.db_backend.fetchone()
            exists = True if res[0] > 0 else False
        except Exception as exp:
            # No more table update because table does not exist or is bad formed ...
            self.update_shinken_state = False
            logger.error("[glpidb] error '%s' when executing query: %s", exp, query)

        # Escape SQL fields ...
        # b.data['output'] = MySQLdb.escape_string(b.data['output'])
        # b.data['long_output'] = MySQLdb.escape_string(b.data['long_output'])
        # b.data['perf_data'] = MySQLdb.escape_string(b.data['perf_data'])

        data = {}
        data['hostname'] = hostname
        data['service'] = service
        data['state'] = b.data['state_id']
        data['state_type'] = b.data['state_type']
        data['last_output'] = ("%s \n %s", b.data['output'], b.data['long_output']) if (len(b.data['long_output']) > 0) else b.data['output']
        data['last_check'] = datetime.datetime.fromtimestamp( int(b.data['last_chk']) ).strftime('%Y-%m-%d %H:%M:%S')
        data['last_perfdata'] = b.data['perf_data']
        data['is_ack'] = '1' if b.data['problem_has_been_acknowledged'] else '0'

        if exists:
            where_clause = {'hostname': hostname, 'service': service}
            query = self.db_backend.create_update_query('glpi_plugin_monitoring_shinkenstates', data, where_clause)
            try:
                self.db_backend.execute_query(query)
            except Exception as exp:
                logger.error("[glpidb] error '%s' when executing query: %s", exp, query)
        else:
            query = self.db_backend.create_insert_query('glpi_plugin_monitoring_shinkenstates', data)
            try:
                self.db_backend.execute_query(query)
开发者ID:shinken-debian-modules,项目名称:shinken-mod-glpidb,代码行数:70,代码来源:module.py


注:本文中的shinken.db_mysql.DBMysql.fetchone方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。