本文整理汇总了Python中shinken.log.logger.info函数的典型用法代码示例。如果您正苦于以下问题:Python info函数的具体用法?Python info怎么用?Python info使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了info函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: manage_service_check_resultup_brok
def manage_service_check_resultup_brok(self, b):
"""If a host is defined locally (in shinken) and not in GLPI,
we must not edit GLPI datas !
"""
if 'plugin_monitoring_servicescatalogs_id' not in b.data and\
'plugin_monitoring_services_id' not in b.data:
return list()
logger.info("GLPI : data in DB %s " % b.data)
new_data = copy.deepcopy(b.data)
new_data['last_check'] = time.strftime('%Y-%m-%d %H:%M:%S')
del new_data['perf_data']
del new_data['output']
del new_data['latency']
del new_data['execution_time']
try:
new_data['id'] = b.data['plugin_monitoring_servicescatalogs_id']
del new_data['plugin_monitoring_servicescatalogs_id']
table = 'glpi_plugin_monitoring_servicescatalogs'
except:
new_data['id'] = b.data['plugin_monitoring_services_id']
del new_data['plugin_monitoring_services_id']
table = 'glpi_plugin_monitoring_services'
where_clause = {'id' : new_data['id']}
#print "Update service : ", new_data
query = self.db_backend.create_update_query(table, new_data, where_clause)
return [query]
示例2: launch_new_checks
def launch_new_checks(self):
for chk in self.checks:
if chk.status == 'queue':
logger.info("[Android SMS] Launching SMS for command %s" % chk.command)
elts = chk.command.split(' ')
# Check the command call first
if len(elts) < 3:
chk.exit_status = 2
chk.get_outputs('The android SMS call %s is not valid. should be android_sms PHONENUMBER TEXT', 8012)
chk.status = 'done'
chk.execution_time = 0.1
continue
# Should be android_sms PHONE TEXT
phone = elts[1]
text = ' '.join(elts[2:])
# Go call the SMS :)
try:
self.android.smsSend(phone, text)
except Exception, exp:
chk.exit_status = 2
chk.get_outputs('The android SMS to %s got an error %s' % (phone, exp), 8012)
chk.status = 'done'
chk.execution_time = 0.1
continue
logger.info("[Android SMS] Send SMS %s to %s" % text, str(phone))
# And finish the notification
chk.exit_status = 1
chk.get_outputs('SMS sent to %s' % phone, 8012)
chk.status = 'done'
chk.execution_time = 0.01
示例3: hook_late_configuration
def hook_late_configuration(self, arb):
""" Read config and fill database """
mac_resol = MacroResolver()
mac_resol.init(arb.conf)
for serv in arb.conf.services:
if serv.check_command.command.module_type == 'snmp_booster':
try:
# Serialize service
dict_serv = dict_serialize(serv,
mac_resol,
self.datasource)
except Exception as exp:
logger.error("[SnmpBooster] [code 0907] [%s,%s] "
"%s" % (serv.host.get_name(),
serv.get_name(),
str(exp)))
continue
# We want to make a diff between arbiter insert and poller insert. Some backend may need it.
try:
self.db_client.update_service_init(dict_serv['host'],
dict_serv['service'],
dict_serv)
except Exception as exp:
logger.error("[SnmpBooster] [code 0909] [%s,%s] "
"%s" % (dict_serv['host'],
dict_serv['service'],
str(exp)))
continue
logger.info("[SnmpBooster] [code 0908] Done parsing")
# Disconnect from database
self.db_client.disconnect()
示例4: manage_log_brok
def manage_log_brok(self, brok):
"""
Parse a Shinken log brok to enqueue a log line for Index insertion
"""
d = date.today()
index_name = self.index_prefix + "-" + d.strftime("%Y.%m.%d")
line = brok.data["log"]
if re.match("^\[[0-9]*\] [A-Z][a-z]*.:", line):
# Match log which NOT have to be stored
logger.warning("[elastic-logs] do not store: %s", line)
return
logline = Logline(line=line)
logline_dict = logline.as_dict()
logline_dict.update({"@timestamp": datetime.utcfromtimestamp(int(logline_dict["time"])).isoformat() + "Z"})
values = {"_index": index_name, "_type": "shinken-logs", "_source": logline_dict}
# values = logline.as_dict()
if logline.logclass != LOGCLASS_INVALID:
logger.debug("[elastic-logs] store log line values: %s", values)
self.logs_cache.append(values)
else:
logger.info("[elastic-logs] This line is invalid: %s", line)
return
示例5: publish_archive
def publish_archive(archive):
# Now really publish it
api_key = CONFIG['shinken.io']['api_key']
c = prepare_curl_connection('/push', post=1, verbose=1)
c.setopt(c.HTTPPOST, [("api_key", api_key),
("data",
(c.FORM_FILE, str(archive),
c.FORM_CONTENTTYPE, "application/x-gzip"))
])
response = StringIO()
c.setopt(pycurl.WRITEFUNCTION, response.write)
try:
c.perform()
except pycurl.error as exp:
logger.error("There was a critical error : %s", exp)
sys.exit(2)
return
r = c.getinfo(pycurl.HTTP_CODE)
c.close()
if r != 200:
logger.error("There was a critical error : %s", response.getvalue())
sys.exit(2)
else:
ret = json.loads(response.getvalue().replace('\\/', '/'))
status = ret.get('status')
text = ret.get('text')
if status == 200:
logger.info(text)
else:
logger.error(text)
sys.exit(2)
示例6: is_me
def is_me(self, lookup_name):
logger.info("And arbiter is launched with the hostname:%s "
"from an arbiter point of view of addr:%s", self.host_name, socket.getfqdn())
if lookup_name:
return lookup_name == self.get_name()
else:
return self.host_name == socket.getfqdn() or self.host_name == socket.gethostname()
示例7: check_alive_instances
def check_alive_instances(self):
# Only for external
for inst in self.instances:
if not inst in self.to_restart:
if inst.is_external and not inst.process.is_alive():
logger.error("The external module %s goes down unexpectedly!" % inst.get_name())
logger.info("Setting the module %s to restart" % inst.get_name())
# We clean its queues, they are no more useful
inst.clear_queues(self.manager)
self.to_restart.append(inst)
# Ok, no need to look at queue size now
continue
# Now look for man queue size. If above value, the module should got a huge problem
# and so bailout. It's not a perfect solution, more a watchdog
# If max_queue_size is 0, don't check this
if self.max_queue_size == 0:
continue
# Ok, go launch the dog!
queue_size = 0
try:
queue_size = inst.to_q.qsize()
except Exception, exp:
pass
if queue_size > self.max_queue_size:
logger.error("The external module %s got a too high brok queue size (%s > %s)!" % (inst.get_name(), queue_size, self.max_queue_size))
logger.info("Setting the module %s to restart" % inst.get_name())
# We clean its queues, they are no more useful
inst.clear_queues(self.manager)
self.to_restart.append(inst)
示例8: process_check_result
def process_check_result(self, databuffer, IV):
# 208 is the size of fixed received data ... NSCA packets are 208+512 (720) or 208+4096 (4304)
if not databuffer:
logger.warning("[NSCA] Received an empty NSCA packet")
return
logger.debug("[NSCA] Received NSCA packet: %s", binascii.hexlify(databuffer))
payload_length = len(databuffer) - 208
if payload_length != 512 and payload_length != 4096:
logger.warning("[NSCA] Received packet with unusual payload length: %d.", payload_length)
if self.payload_length != -1 and payload_length != self.payload_length:
logger.warning("[NSCA] Dropping packet with incorrect payload length.")
return
(timestamp, rc, hostname, service, output) = self.read_check_result(databuffer, IV, payload_length)
current_time = time.time()
check_result_age = current_time - timestamp
if timestamp > current_time and self.check_future_packet:
logger.warning("[NSCA] Dropping packet with future timestamp.")
elif check_result_age > self.max_packet_age:
logger.info(
"[NSCA] Dropping packet with stale timestamp - packet was %s seconds old. Timestamp: %s for %s/%s" % \
(check_result_age, timestamp, hostname, service))
else:
self.post_command(timestamp, rc, hostname, service, output)
示例9: get_instance
def get_instance(plugin):
""" Return a module instance for the plugin manager """
logger.info("Get a NSCA arbiter module for plugin %s" % plugin.get_name())
host = getattr(plugin, 'host', '127.0.0.1')
if host == '*':
host = ''
port = int(getattr(plugin, 'port', '5667'))
buffer_length = int(getattr(plugin, 'buffer_length', '4096'))
payload_length = int(getattr(plugin, 'payload_length', '-1'))
encryption_method = int(getattr(plugin, 'encryption_method', '0'))
backlog = int(getattr(plugin, 'backlog', '10'))
password = getattr(plugin, 'password', '')
if password == "" and encryption_method != 0:
logger.error("[NSCA] No password specified whereas there is a encryption_method defined")
logger.warning("[NSCA] Setting password to dummy to avoid crash!")
password = "dummy"
max_packet_age = min(int(getattr(plugin, 'max_packet_age', '30')), 900)
check_future_packet = bool(getattr(plugin, 'check_future_packet', 0))
instance = NSCA_arbiter(plugin, host, port,
buffer_length, payload_length, encryption_method, password, max_packet_age, check_future_packet,
backlog)
return instance
示例10: hook_late_configuration
def hook_late_configuration(self, arb):
# We will return external commands to the arbiter, so
# it can just manage it easily and in a generic way
ext_cmds = []
# If the file do not exist, we launch the command
# and we bail out
if not self._is_file_existing():
self._launch_command()
return
self._is_mapping_file_changed()
self._update_mapping()
additions, removed = self._got_mapping_changes()
for (father_k, son_k) in additions:
son_type, son_name = son_k
father_type, father_name = father_k
logger.info("[Hot dependencies] Linked son : %s and its father: %s" % (son_name, father_name))
if son_type == 'host' and father_type == 'host':
son = arb.conf.hosts.find_by_name(son_name)
father = arb.conf.hosts.find_by_name(father_name)
if son is not None and father is not None:
logger.debug("[Hot dependencies] Found! %s %s" % (son_name, father_name))
if not son.is_linked_with_host(father):
logger.debug("[Hot dependencies] Doing simple link between %s and %s" % (son.get_name(), father.get_name()))
# Add a dep link between the son and the father
son.add_host_act_dependency(father, ['w', 'u', 'd'], None, True)
else:
logger.debug("[Hot dependencies] Missing one of %s %s" % (son_name, father_name))
示例11: get_instance
def get_instance(plugin):
logger.info("Get a RawSocket broker for plugin %s" % plugin.get_name())
#Catch errors
#path = plugin.path
instance = RawSocket_broker(plugin)
return instance
示例12: hook_save_retention
def hook_save_retention(self, daemon):
log_mgr = logger
logger.info("[PickleRetentionGeneric] asking me to update the retention objects")
# Now the flat file method
try:
# Open a file near the path, with .tmp extension
# so in cae or problem, we do not lost the old one
f = open(self.path + ".tmp", "wb")
# We get interesting retention data from the daemon it self
all_data = daemon.get_retention_data()
# And we save it on file :)
# s = cPickle.dumps(all_data)
# s_compress = zlib.compress(s)
cPickle.dump(all_data, f, protocol=cPickle.HIGHEST_PROTOCOL)
# f.write(s_compress)
f.close()
# Now move the .tmp fiel to the real path
shutil.move(self.path + ".tmp", self.path)
except IOError, exp:
log_mgr.log("Error: retention file creation failed, %s" % str(exp))
return
示例13: init_http
def init_http(self):
logger.info("[WS_Arbiter] Starting WS arbiter http socket")
try:
self.srv = run(host=self.host, port=self.port, server='wsgirefselect')
except Exception, e:
logger.error("[WS_Arbiter] Exception : %s" % str(e))
raise
示例14: hook_save_retention
def hook_save_retention(self, daemon):
"""
main function that is called in the retention creation pass
"""
try:
self.max_workers = cpu_count()
except NotImplementedError:
pass
t0 = time.time()
logger.debug("[MongodbRetention] asking me to update the retention objects")
all_data = daemon.get_retention_data()
processes = []
for i in xrange(self.max_workers):
proc = Process(target=self.job, args=(all_data, i, self.max_workers))
proc.start()
processes.append(proc)
# Allow 30s to join the sub-processes, should be enough
for proc in processes:
proc.join(30)
logger.info("Retention information updated in Mongodb (%.2fs)" % (time.time() - t0))
示例15: hook_save_retention
def hook_save_retention(self, daemon):
"""
main function that is called in the retention creation pass
"""
logger.debug("[MemcacheRetention] asking me to update the retention objects")
all_data = daemon.get_retention_data()
hosts = all_data['hosts']
services = all_data['services']
# Now the flat file method
for h_name in hosts:
try:
h = hosts[h_name]
key = self.normalize_key("HOST-%s" % h_name)
val = cPickle.dumps(h)
self.mc.set(key, val)
except:
logger.error("[MemcacheRetention] error while saving host %s" % key)
for (h_name, s_desc) in services:
try:
key = self.normalize_key("SERVICE-%s,%s" % (h_name, s_desc))
s = services[(h_name, s_desc)]
val = cPickle.dumps(s)
self.mc.set(key, val)
except:
logger.error("[MemcacheRetention] error while saving service %s" % key)
self.mc.disconnect_all()
logger.info("Retention information updated in Memcache")