当前位置: 首页>>代码示例>>Python>>正文


Python DBMysql.execute_query方法代码示例

本文整理汇总了Python中shinken.db_mysql.DBMysql.execute_query方法的典型用法代码示例。如果您正苦于以下问题:Python DBMysql.execute_query方法的具体用法?Python DBMysql.execute_query怎么用?Python DBMysql.execute_query使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在shinken.db_mysql.DBMysql的用法示例。


在下文中一共展示了DBMysql.execute_query方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: TestConfig

# 需要导入模块: from shinken.db_mysql import DBMysql [as 别名]
# 或者: from shinken.db_mysql.DBMysql import execute_query [as 别名]
class TestConfig(ShinkenTest):
    # setUp is inherited from ShinkenTest

    def create_db(self):
        self.db = DBMysql(host='localhost', user='root', password='root', database='merlin', character_set='utf8')

    def test_connect_database(self):
        if not DBMysql:
            return
        self.create_db()
        try:
            self.db.connect_database()
        except Exception:  # arg, no database here? sic!
            pass

    def test_execute_query(self):
        if not DBMysql:
            return
        self.create_db()
        try:
            self.db.connect_database()
            q = "DELETE FROM service WHERE instance_id = '0'"
            self.db.execute_query(q)
        except Exception:
            pass
开发者ID:David-,项目名称:shinken,代码行数:27,代码来源:test_db_mysql.py

示例2: Ndodb_Mysql_broker

# 需要导入模块: from shinken.db_mysql import DBMysql [as 别名]
# 或者: from shinken.db_mysql.DBMysql import execute_query [as 别名]
class Ndodb_Mysql_broker(BaseModule):
    def __init__(self, conf):
        BaseModule.__init__(self, conf)
        # Mapping for name of dataand transform function
        self.mapping = {
            "program_status": {
                "program_start": {"name": "program_start_time", "transform": de_unixify},
                "pid": {"name": "process_id", "transform": None},
                "last_alive": {"name": "status_update_time", "transform": de_unixify},
                "is_running": {"name": "is_currently_running", "transform": None},
            }
        }

        self.host = conf.host
        self.user = conf.user
        self.password = conf.password
        self.database = conf.database
        self.character_set = conf.character_set

    # Called by Broker so we can do init stuff
    # TODO : add conf param to get pass with init
    # Conf from arbiter!
    def init(self):
        print "I connect to NDO database"
        self.db = DBMysql(
            self.host, self.user, self.password, self.database, self.character_set, table_prefix="nagios_"
        )
        self.connect_database()

        # Cache for hosts and services
        # will be flushed when we got a net instance id
        # or something like that
        self.services_cache = {}
        self.hosts_cache = {}

    # Get a brok, parse it, and put in in database
    # We call functions like manage_ TYPEOFBROK _brok that return us queries
    def manage_brok(self, b):
        # We've got problem with instance_id == 0 so we add 1 every where
        if "instance_id" in b.data:
            b.data["instance_id"] = b.data["instance_id"] + 1
        # print "(Ndo) I search manager:", manager
        queries = BaseModule.manage_brok(self, b)
        if queries is not None:
            for q in queries:
                self.db.execute_query(q)
            return
        # print "(ndodb)I don't manage this brok type", b

    # Create the database connection
    # TODO : finish (begin :) ) error catch and conf parameters...
    def connect_database(self):
        self.db.connect_database()

    def get_host_object_id_by_name(self, host_name):
        # First look in cache.
        if host_name in self.hosts_cache:
            return self.hosts_cache[host_name]

        # Not in cache, not good
        query = u"SELECT object_id from nagios_objects where name1='%s' and objecttype_id='1'" % host_name
        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            return 0
        else:
            self.hosts_cache[host_name] = row[0]
            return row[0]

    def get_hostgroup_object_id_by_name(self, hostgroup_name):
        query = u"SELECT object_id from nagios_objects where name1='%s' and objecttype_id='3'" % hostgroup_name
        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            return 0
        else:
            return row[0]

    def get_service_object_id_by_name(self, host_name, service_description):
        # first look in cache
        if (host_name, service_description) in self.services_cache:
            return self.services_cache[(host_name, service_description)]

        # else; not in cache :(
        query = u"SELECT object_id from nagios_objects where name1='%s' and name2='%s' and objecttype_id='2'" % (
            host_name,
            service_description,
        )
        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            return 0
        else:
            self.services_cache[(host_name, service_description)] = row[0]
            return row[0]

    def get_servicegroup_object_id_by_name(self, servicegroup_name):
        query = u"SELECT object_id from nagios_objects where name1='%s' and objecttype_id='4'" % servicegroup_name
        self.db.execute_query(query)
        row = self.db.fetchone()
#.........这里部分代码省略.........
开发者ID:wAmpIre,项目名称:shinken,代码行数:103,代码来源:ndodb_mysql_broker.py

示例3: Ndodb_Mysql_broker

# 需要导入模块: from shinken.db_mysql import DBMysql [as 别名]
# 或者: from shinken.db_mysql.DBMysql import execute_query [as 别名]
class Ndodb_Mysql_broker(BaseModule):

    """ This Class is a plugin for the Shinken Broker. It is in charge
    to brok information into the database. For the moment
    only Mysql is supported. This code is __imported__ from Broker.
    The managed_brok function is called by Broker for manage the broks. It calls
    the manage_*_brok functions that create queries, and then run queries.

    """

    def __init__(self, conf):
        BaseModule.__init__(self, conf)
        # Mapping for name of data and transform function
        self.mapping = {
            'program_status': {
                'program_start': {'name': 'program_start_time', 'transform': de_unixify},
                'pid': {'name': 'process_id', 'transform': None},
                'last_alive': {'name': 'status_update_time', 'transform': de_unixify},
                'is_running': {'name': 'is_currently_running', 'transform': None},
                'last_log_rotation': {'name': 'last_log_rotation', 'transform': de_unixify},
                'last_command_check': {'name': 'last_command_check', 'transform': de_unixify}
                },
            }

        self.host = conf.host
        self.user = conf.user
        self.password = conf.password
        self.database = conf.database
        self.character_set = conf.character_set
        self.port = int(getattr(conf, 'port', '3306'))
        self.prefix = getattr(conf, 'prefix', 'nagios_')

        # Centreon ndo add some fields like long_output
        # that are not in the vanilla ndo
        self.centreon_version = False
        self.synchronize_database_id = int(conf.synchronize_database_id)

    # Called by Broker so we can do init stuff
    # TODO: add conf param to get pass with init
    # Conf from arbiter!
    def init(self):
        logger.info("I connect to NDO database")
        self.db = DBMysql(self.host, self.user, self.password, self.database,
                          self.character_set, table_prefix=self.prefix,
                          port=self.port)
        self.connect_database()

        # Cache for hosts and services
        # The structure is as follow:
        # First the instance id then the host / (host,service desc)
        # to access the wanted data
        self.services_cache_sync = {}
        self.hosts_cache_sync = {}

        # We need to search for centreon_specific fields, like long_output
        query = u"select TABLE_NAME from information_schema.columns " \
                "where TABLE_SCHEMA='ndo' and " \
                "TABLE_NAME='%sservicestatus' and " \
                "COLUMN_NAME='long_output';" % self.prefix

        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            self.centreon_version = False
        else:
            self.centreon_version = True
            logger.info("[MySQL/NDO] Using the centreon version")

        # Cache for database id
        # In order not to query the database every time
        self.database_id_cache = {}

        # Mapping service_id in Shinken and in database
        # Because can't acces host_name from a service everytime :(
        self.mapping_service_id = {}

        # Todo list to manage brok
        self.todo = {}

    # Get a brok, parse it, and put in in database
    # We call functions like manage_ TYPEOFBROK _brok that return us queries
    def manage_brok(self, b):
        # We need to do some brok mod, so we copy it
        new_b = copy.deepcopy(b)

        # If we synchronize, must look for id change
        if self.synchronize_database_id != 0 and 'instance_id' in new_b.data:
            # If we use database sync, we have to synchronize database id
            # so we wait for the instance name
            brok_id = new_b.data['instance_id']
            converted_instance_id = self.convert_id(brok_id)
            if converted_instance_id is not None:
                new_b.data['instance_id'] = converted_instance_id
                queries = BaseModule.manage_brok(self, new_b)
                if queries is not None:
                    for q in queries:
                        self.db.execute_query(q)

            if converted_instance_id is None:
                if brok_id in self.todo:
#.........这里部分代码省略.........
开发者ID:sckevmit,项目名称:shinken,代码行数:103,代码来源:ndodb_mysql_broker.py

示例4: Glpidb_broker

# 需要导入模块: from shinken.db_mysql import DBMysql [as 别名]
# 或者: from shinken.db_mysql.DBMysql import execute_query [as 别名]

#.........这里部分代码省略.........
                        print "Call function for", type, prop
                        f = mapping[prop]['transform']
                        val = f(val)
                    name = prop
                    if 'name' in mapping[prop]:
                        name = mapping[prop]['name']
                    to_add.append((name, val))
                    to_del.append(prop)
                else:
                    to_del.append(prop)
            for prop in to_del:
                del new_brok.data[prop]
            for (name, val) in to_add:
                new_brok.data[name] = val
        else:
            print "No preprocess type", brok.type
            print brok.data
        return new_brok



    #Get a brok, parse it, and put in in database
    #We call functions like manage_ TYPEOFBROK _brok that return us queries
    def manage_brok(self, b):
        type = b.type
        # To update check in glpi_plugin_monitoring_hosts
        manager = 'manage_'+type+'up_brok'
        if hasattr(self, manager):
            new_b = self.preprocess(type, b, 0)
            f = getattr(self, manager)
            queries = f(new_b)
            #Ok, we've got queries, now : run them!
            for q in queries :
                self.db_backend.execute_query(q)
        manager = 'manage_'+type+'_brok'
        if hasattr(self, manager):
            new_b = self.preprocess(type, b, '1')
            if 'host_name' in new_b.data:
               if 'plugin_monitoring_services_id' not in new_b.data:
                  return
            f = getattr(self, manager)
            queries = f(new_b)
            #Ok, we've got queries, now : run them!
            for q in queries :
                self.db_backend.execute_query(q)
            return


    #Host result
    #def manage_host_check_result_brok(self, b):
        #logger.info("GLPI : data in DB %s " % b)
        #b.data['date'] = time.strftime('%Y-%m-%d %H:%M:%S')
        #query = self.db_backend.create_insert_query('glpi_plugin_monitoring_serviceevents', b.data)
        #return [query]


    #Host result
    #def manage_host_check_resultup_brok(self, b):
        #logger.info("GLPI : data in DB %s " % b)
        #new_data = copy.deepcopy(b.data)
        #new_data['last_check'] = time.strftime('%Y-%m-%d %H:%M:%S')
        #new_data['id'] = b.data['plugin_monitoring_services_id']
        #del new_data['plugin_monitoring_services_id']
        #del new_data['perf_data']
        #del new_data['output']
        #del new_data['latency']
开发者ID:Morkxy,项目名称:shinken,代码行数:70,代码来源:glpidb_broker.py

示例5: Ndodb_Mysql_broker

# 需要导入模块: from shinken.db_mysql import DBMysql [as 别名]
# 或者: from shinken.db_mysql.DBMysql import execute_query [as 别名]
class Ndodb_Mysql_broker(BaseModule):
    def __init__(self, conf):
        BaseModule.__init__(self, conf)
        # Mapping for name of data and transform function
        self.mapping = {
            'program_status' : {
                                'program_start' : {'name' : 'program_start_time', 'transform' : de_unixify},
                                'pid' : {'name' : 'process_id', 'transform' : None},
                                'last_alive' : {'name' : 'status_update_time', 'transform' : de_unixify},
                                'is_running' : {'name' : 'is_currently_running', 'transform' : None}
                                },
            }
        
        self.host = conf.host
        self.user = conf.user
        self.password = conf.password
        self.database = conf.database
        self.character_set = conf.character_set
        self.port = int(getattr(conf, 'port', '3306'))
        
        # Centreon ndo add some fields like long_output that are not in the vanilla ndo
        self.centreon_version = False
        self.synchronise_database_id = int(conf.synchronise_database_id)


    # Called by Broker so we can do init stuff
    # TODO : add conf param to get pass with init
    # Conf from arbiter!
    def init(self):
        logger.log("I connect to NDO database")
        self.db = DBMysql(self.host, self.user, self.password, self.database, 
                          self.character_set, table_prefix='nagios_', port=self.port)
        self.connect_database()

        # Cache for hosts and services
        # The structure is as follow:
        # First the instance id then the host / (host,service desc) to access the wanted data
        self.services_cache_sync = {}
        self.hosts_cache_sync = {}

        # We need to search for centreon_specific fields, like long_output
        query = u"select TABLE_NAME from information_schema.columns where TABLE_SCHEMA='ndo' and TABLE_NAME='nagios_servicestatus' and COLUMN_NAME='long_output';"
        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            self.centreon_version = False
        else:
            self.centreon_version = True
            logger.log("[MySQL/NDO] Using the centreon version")

        # Cache for database id
        # In order not to query the database every time
        self.database_id_cache = {}

        # Mapping service_id in Shinken and in database
        # Because can't acces host_name from a service everytime :(
        self.mapping_service_id = {}

        # Todo list to manage brok
        self.todo = []


    # Get a brok, parse it, and put in in database
    # We call functions like manage_ TYPEOFBROK _brok that return us queries
    def manage_brok(self, b):
        # We need to do some brok mod, so we copy it
        new_b = copy.deepcopy(b)
    
        # If we syncronize, must look for id change
        if self.synchronise_database_id != '0' and 'instance_id' in new_b.data:
            # If we use database sync, we have to synchronise database id
            # so we wait for the instance name
            if 'instance_name' not in new_b.data :
                self.todo.append(new_b)
                return  
                  
            # We convert the id to write properly in the base using the 
            # instance_name to reuse the instance_id in the base.
            else:
                new_b.data['instance_id'] = self.convert_id(new_b.data['instance_id'], new_b.data['instance_name'])
                self.todo.append(new_b)
                for brok in self.todo :
                    # We have to put the good instance ID to all brok waiting
                    # in the list then execute the query
                    brok.data['instance_id'] = new_b.data['instance_id']
                    queries = BaseModule.manage_brok(self, brok)
                    if queries is not None:
                        for q in queries :
                            self.db.execute_query(q)
                # We've finished to manage the todo, so we empty it
                self.todo = []
                return

        # Executed if we don't synchronise or there is no instance_id
        queries = BaseModule.manage_brok(self,new_b)
        
        if queries is not None:
            for q in queries :
                self.db.execute_query(q)
            return
#.........这里部分代码省略.........
开发者ID:bs-github,项目名称:shinken,代码行数:103,代码来源:ndodb_mysql_broker.py

示例6: Glpidb_broker

# 需要导入模块: from shinken.db_mysql import DBMysql [as 别名]
# 或者: from shinken.db_mysql.DBMysql import execute_query [as 别名]

#.........这里部分代码省略.........
            if host_name in self.hosts_cache and self.hosts_cache[host_name]['items_id'] is not None:
                if service_id in self.services_cache and self.services_cache[service_id]['items_id'] is not None:
                    start = time.time()
                    self.record_service_check_result(b)
                    logger.debug("[glpidb] service check result: %s, %d seconds", service_id, time.time() - start)

        return

    ## Host result
    def record_host_check_result(self, b):
        host_name = b.data['host_name']
        host_cache = self.hosts_cache[host_name]
        logger.debug("[glpidb] record host check result: %s: %s", host_name, b.data)

        # Escape SQL fields ...
        # b.data['output'] = MySQLdb.escape_string(b.data['output'])
        # b.data['long_output'] = MySQLdb.escape_string(b.data['long_output'])
        # b.data['perf_data'] = MySQLdb.escape_string(b.data['perf_data'])

        if self.update_hosts:
            data = {}
            data['event'] = ("%s \n %s", b.data['output'], b.data['long_output']) if (len(b.data['long_output']) > 0) else b.data['output']
            data['state'] = b.data['state']
            data['state_type'] = b.data['state_type']
            data['last_check'] = datetime.datetime.fromtimestamp( int(b.data['last_chk']) ).strftime('%Y-%m-%d %H:%M:%S')
            data['perf_data'] = b.data['perf_data']
            data['latency'] = b.data['latency']
            data['execution_time'] = b.data['execution_time']
            data['is_acknowledged'] = '1' if b.data['problem_has_been_acknowledged'] else '0'

            where_clause = {'items_id': host_cache['items_id'], 'itemtype': host_cache['itemtype']}
            query = self.db_backend.create_update_query('glpi_plugin_monitoring_hosts', data, where_clause)
            try:
                self.db_backend.execute_query(query)
            except Exception as exp:
                logger.error("[glpidb] error '%s' when executing query: %s", exp, query)

        # Update acknowledge table if host becomes UP
        #if self.update_acknowledges and b.data['state_id'] == 0 and b.data['last_state_id'] != 0:
        # Update acknowledge table if host is UP
        if self.update_acknowledges and b.data['state_id'] == 0:
            data = {}
            data['end_time'] = datetime.datetime.fromtimestamp( int(b.data['last_chk']) ).strftime('%Y-%m-%d %H:%M:%S')
            data['expired'] = '1'

            where_clause = {'items_id': host_cache['items_id'], 'itemtype': "PluginMonitoringHost"}
            query = self.db_backend.create_update_query('glpi_plugin_monitoring_acknowledges', data, where_clause)
            logger.debug("[glpidb] acknowledge query: %s", query)
            try:
                self.db_backend.execute_query(query)
            except Exception as exp:
                logger.error("[glpidb] error '%s' when executing query: %s", exp, query)

    ## Service result
    def record_service_check_result(self, b):
        host_name = b.data['host_name']
        service_description = b.data['service_description']
        service_id = host_name+"/"+service_description
        service_cache = self.services_cache[service_id]
        logger.debug("[glpidb] service check result: %s: %s", service_id, b.data)

        # Escape SQL fields ...
        # b.data['output'] = MySQLdb.escape_string(b.data['output'])
        # b.data['long_output'] = MySQLdb.escape_string(b.data['long_output'])
        # b.data['perf_data'] = MySQLdb.escape_string(b.data['perf_data'])
开发者ID:shinken-debian-modules,项目名称:shinken-mod-glpidb,代码行数:69,代码来源:module.py


注:本文中的shinken.db_mysql.DBMysql.execute_query方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。