本文整理汇总了Python中shinken.db_mysql.DBMysql.create_insert_query方法的典型用法代码示例。如果您正苦于以下问题:Python DBMysql.create_insert_query方法的具体用法?Python DBMysql.create_insert_query怎么用?Python DBMysql.create_insert_query使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类shinken.db_mysql.DBMysql
的用法示例。
在下文中一共展示了DBMysql.create_insert_query方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Ndodb_Mysql_broker
# 需要导入模块: from shinken.db_mysql import DBMysql [as 别名]
# 或者: from shinken.db_mysql.DBMysql import create_insert_query [as 别名]
#.........这里部分代码省略.........
"contactgroup_members",
"objects",
"hoststatus",
"servicestatus",
"instances",
"servicegroup_members",
]
res = []
for table in tables:
q = "DELETE FROM %s WHERE instance_id = '%s' " % ("nagios_" + table, instance_id)
res.append(q)
# We also clean cache, because we are not sure about this data now
print "[MySQL/NDO] Flushing caches"
self.services_cache = {}
self.hosts_cache = {}
return res
# Program status is .. status of program? :)
# Like pid, daemon mode, last activity, etc
# We aleady clean database, so insert
# TODO : fill nagios_instances
def manage_program_status_brok(self, b):
new_b = copy.deepcopy(b)
# Must delete me first
query_delete_instance = u"DELETE FROM %s WHERE instance_name = '%s' " % (
"nagios_instances",
b.data["instance_name"],
)
query_instance = self.db.create_insert_query(
"instances",
{
"instance_name": new_b.data["instance_name"],
"instance_description": new_b.data["instance_name"],
"instance_id": new_b.data["instance_id"],
},
)
to_del = ["instance_name", "command_file"]
to_add = []
mapping = self.mapping["program_status"]
for prop in new_b.data:
# ex : 'name' : 'program_start_time', 'transform'
if prop in mapping:
# print "Got a prop to change", prop
val = new_b.data[prop]
if mapping[prop]["transform"] is not None:
f = mapping[prop]["transform"]
val = f(val)
new_name = mapping[prop]["name"]
to_add.append((new_name, val))
to_del.append(prop)
for prop in to_del:
del new_b.data[prop]
for (name, val) in to_add:
new_b.data[name] = val
query = self.db.create_insert_query("programstatus", new_b.data)
return [query_delete_instance, query_instance, query]
# TODO : fill nagios_instances
def manage_update_program_status_brok(self, b):
new_b = copy.deepcopy(b)
示例2: Ndodb_Mysql_broker
# 需要导入模块: from shinken.db_mysql import DBMysql [as 别名]
# 或者: from shinken.db_mysql.DBMysql import create_insert_query [as 别名]
#.........这里部分代码省略.........
# if not, BAD THINGS MAY HAPPENED :)
def manage_clean_all_my_instance_id_brok(self, b):
instance_id = b.data['instance_id']
tables = ['commands', 'contacts', 'contactgroups', 'hosts',
'hostescalations', 'hostgroups', 'notifications',
'services', 'serviceescalations', 'programstatus',
'servicegroups', 'timeperiods', 'hostgroup_members',
'contactgroup_members', 'objects', 'hoststatus',
'servicestatus', 'instances', 'servicegroup_members']
res = []
for table in tables:
q = "DELETE FROM %s WHERE instance_id = '%s' " % ('nagios_'+table, instance_id)
res.append(q)
# We also clean cache, because we are not sure about this data now
logger.log("[MySQL/NDO] Flushing caches (clean from instance %d)" % instance_id)
self.services_cache_sync = {}
self.hosts_cache_sync = {}
return res
# Program status is .. status of program? :)
# Like pid, daemon mode, last activity, etc
# We aleady clean database, so insert
# TODO : fill nagios_instances
def manage_program_status_brok(self, b):
new_b = copy.deepcopy(b)
# Must delete me first
query_delete_instance = u"DELETE FROM %s WHERE instance_name = '%s' " % ('nagios_instances', b.data['instance_name'])
query_instance = self.db.create_insert_query('instances', {'instance_name' : new_b.data['instance_name'],\
'instance_description' : new_b.data['instance_name'], \
'instance_id' : new_b.data['instance_id']
})
to_del = ['instance_name', 'command_file', 'check_external_commands', 'check_service_freshness',
'check_host_freshness']
to_add = []
mapping = self.mapping['program_status']
for prop in new_b.data:
# ex : 'name' : 'program_start_time', 'transform'
if prop in mapping:
#print "Got a prop to change", prop
val = new_b.data[prop]
if mapping[prop]['transform'] is not None:
f = mapping[prop]['transform']
val = f(val)
new_name = mapping[prop]['name']
to_add.append((new_name, val))
to_del.append(prop)
for prop in to_del:
del new_b.data[prop]
for (name, val) in to_add:
new_b.data[name] = val
query = self.db.create_insert_query('programstatus', new_b.data)
return [query_delete_instance, query_instance, query]
# TODO : fill nagios_instances
def manage_update_program_status_brok(self, b):
new_b = copy.deepcopy(b)
to_del = ['instance_name', 'command_file', 'check_external_commands', 'check_service_freshness',
示例3: Glpidb_broker
# 需要导入模块: from shinken.db_mysql import DBMysql [as 别名]
# 或者: from shinken.db_mysql.DBMysql import create_insert_query [as 别名]
#.........这里部分代码省略.........
new_brok.data[name] = val
else:
print "No preprocess type", brok.type
print brok.data
return new_brok
#Get a brok, parse it, and put in in database
#We call functions like manage_ TYPEOFBROK _brok that return us queries
def manage_brok(self, b):
type = b.type
# To update check in glpi_plugin_monitoring_hosts
manager = 'manage_'+type+'up_brok'
if hasattr(self, manager):
new_b = self.preprocess(type, b, 0)
f = getattr(self, manager)
queries = f(new_b)
#Ok, we've got queries, now : run them!
for q in queries :
self.db_backend.execute_query(q)
manager = 'manage_'+type+'_brok'
if hasattr(self, manager):
new_b = self.preprocess(type, b, '1')
if 'host_name' in new_b.data:
if 'plugin_monitoring_services_id' not in new_b.data:
return
f = getattr(self, manager)
queries = f(new_b)
#Ok, we've got queries, now : run them!
for q in queries :
self.db_backend.execute_query(q)
return
#Host result
#def manage_host_check_result_brok(self, b):
#logger.info("GLPI : data in DB %s " % b)
#b.data['date'] = time.strftime('%Y-%m-%d %H:%M:%S')
#query = self.db_backend.create_insert_query('glpi_plugin_monitoring_serviceevents', b.data)
#return [query]
#Host result
#def manage_host_check_resultup_brok(self, b):
#logger.info("GLPI : data in DB %s " % b)
#new_data = copy.deepcopy(b.data)
#new_data['last_check'] = time.strftime('%Y-%m-%d %H:%M:%S')
#new_data['id'] = b.data['plugin_monitoring_services_id']
#del new_data['plugin_monitoring_services_id']
#del new_data['perf_data']
#del new_data['output']
#del new_data['latency']
#del new_data['execution_time']
#where_clause = {'id' : new_data['id']}
#query = self.db_backend.create_update_query('glpi_plugin_monitoring_services', new_data, where_clause)
#return [query]
#Service result
def manage_service_check_result_brok(self, b):
#logger.info("GLPI : data in DB %s " % b)
try:
b.data['plugin_monitoring_servicescatalogs_id']
return ''
except:
b.data['date'] = time.strftime('%Y-%m-%d %H:%M:%S')
#print "Add event service : ", b.data
query = self.db_backend.create_insert_query('glpi_plugin_monitoring_serviceevents', b.data)
return [query]
return ''
#Service result
def manage_service_check_resultup_brok(self, b):
"""If a host is defined locally (in shinken) and not in GLPI,
we must not edit GLPI datas !
"""
if 'plugin_monitoring_servicescatalogs_id' not in b.data and\
'plugin_monitoring_services_id' not in b.data:
return list()
logger.info("GLPI : data in DB %s " % b.data)
new_data = copy.deepcopy(b.data)
new_data['last_check'] = time.strftime('%Y-%m-%d %H:%M:%S')
del new_data['perf_data']
del new_data['output']
del new_data['latency']
del new_data['execution_time']
try:
new_data['id'] = b.data['plugin_monitoring_servicescatalogs_id']
del new_data['plugin_monitoring_servicescatalogs_id']
table = 'glpi_plugin_monitoring_servicescatalogs'
except:
new_data['id'] = b.data['plugin_monitoring_services_id']
del new_data['plugin_monitoring_services_id']
table = 'glpi_plugin_monitoring_services'
where_clause = {'id' : new_data['id']}
#print "Update service : ", new_data
query = self.db_backend.create_update_query(table, new_data, where_clause)
return [query]
示例4: Glpidb_broker
# 需要导入模块: from shinken.db_mysql import DBMysql [as 别名]
# 或者: from shinken.db_mysql.DBMysql import create_insert_query [as 别名]
#.........这里部分代码省略.........
where_clause = {'items_id': host_cache['items_id'], 'itemtype': "PluginMonitoringHost"}
query = self.db_backend.create_update_query('glpi_plugin_monitoring_acknowledges', data, where_clause)
logger.debug("[glpidb] acknowledge query: %s", query)
try:
self.db_backend.execute_query(query)
except Exception as exp:
logger.error("[glpidb] error '%s' when executing query: %s", exp, query)
## Service result
def record_service_check_result(self, b):
host_name = b.data['host_name']
service_description = b.data['service_description']
service_id = host_name+"/"+service_description
service_cache = self.services_cache[service_id]
logger.debug("[glpidb] service check result: %s: %s", service_id, b.data)
# Escape SQL fields ...
# b.data['output'] = MySQLdb.escape_string(b.data['output'])
# b.data['long_output'] = MySQLdb.escape_string(b.data['long_output'])
# b.data['perf_data'] = MySQLdb.escape_string(b.data['perf_data'])
# Insert into serviceevents log table
if self.update_services_events:
data = {}
data['plugin_monitoring_services_id'] = service_cache['items_id']
data['date'] = datetime.datetime.fromtimestamp( int(b.data['last_chk']) ).strftime('%Y-%m-%d %H:%M:%S')
data['event'] = ("%s \n %s", b.data['output'], b.data['long_output']) if (len(b.data['long_output']) > 0) else b.data['output']
data['state'] = b.data['state']
data['state_type'] = b.data['state_type']
data['perf_data'] = b.data['perf_data']
data['latency'] = b.data['latency']
data['execution_time'] = b.data['execution_time']
query = self.db_backend.create_insert_query('glpi_plugin_monitoring_serviceevents', data)
try:
self.db_backend.execute_query(query)
except Exception as exp:
logger.error("[glpidb] error '%s' when executing query: %s", exp, query)
# Update service state table
if self.update_services:
data = {}
data['event'] = ("%s \n %s", b.data['output'], b.data['long_output']) if (len(b.data['long_output']) > 0) else b.data['output']
data['state'] = b.data['state']
data['state_type'] = b.data['state_type']
data['last_check'] = datetime.datetime.fromtimestamp( int(b.data['last_chk']) ).strftime('%Y-%m-%d %H:%M:%S')
data['is_acknowledged'] = '1' if b.data['problem_has_been_acknowledged'] else '0'
where_clause = {'id': service_cache['items_id']}
table = 'glpi_plugin_monitoring_services'
if service_cache['itemtype'] == 'ServiceCatalog':
table = 'glpi_plugin_monitoring_servicescatalogs'
query = self.db_backend.create_update_query(table, data, where_clause)
try:
self.db_backend.execute_query(query)
except Exception as exp:
logger.error("[glpidb] error '%s' when executing query: %s", exp, query)
# Update acknowledge table if service becomes OK
#if self.update_acknowledges and b.data['state_id'] == 0 and b.data['last_state_id'] != 0:
# Update acknowledge table if service is OK
if self.update_acknowledges and b.data['state_id'] == 0:
data = {}
data['end_time'] = datetime.datetime.fromtimestamp( int(b.data['last_chk']) ).strftime('%Y-%m-%d %H:%M:%S')
data['expired'] = '1'