本文整理汇总了Python中shinken.log.logger.error函数的典型用法代码示例。如果您正苦于以下问题:Python error函数的具体用法?Python error怎么用?Python error使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了error函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: linkify_sd_by_s
def linkify_sd_by_s(self, hosts, services):
for sd in self:
try:
s_name = sd.dependent_service_description
hst_name = sd.dependent_host_name
# The new member list, in id
s = services.find_srv_by_name_and_hostname(hst_name, s_name)
if s is None:
self.configuration_errors.append("Service %s not found for host %s"
% (s_name, hst_name))
sd.dependent_service_description = s
s_name = sd.service_description
hst_name = sd.host_name
# The new member list, in id
s = services.find_srv_by_name_and_hostname(hst_name, s_name)
if s is None:
self.configuration_errors.append("Service %s not found for host %s"
% (s_name, hst_name))
sd.service_description = s
except AttributeError, exp:
logger.error("[servicedependency] fail to linkify by service %s: %s", sd, exp)
示例2: grab_package
def grab_package(pname):
cprint('Grabbing : ' , end='')
cprint('%s' % pname, 'green')
# Now really publish it
proxy = CONFIG['shinken.io']['proxy']
api_key = CONFIG['shinken.io']['api_key']
# Ok we will push the file with a 5m timeout
c = pycurl.Curl()
c.setopt(c.POST, 0)
c.setopt(c.CONNECTTIMEOUT, 30)
c.setopt(c.TIMEOUT, 300)
if proxy:
c.setopt(c.PROXY, proxy)
c.setopt(c.URL, str('shinken.io/grab/%s' % pname))
response = StringIO()
c.setopt(pycurl.WRITEFUNCTION, response.write)
#c.setopt(c.VERBOSE, 1)
try:
c.perform()
except pycurl.error, exp:
logger.error("There was a critical error : %s" % exp)
return ''
示例3: get_services_by_explosion
def get_services_by_explosion(self, servicegroups):
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_explode = True
# Now the recursive part
# rec_tag is set to False every HG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[servicegroup::%s] got a loop in servicegroup definition", self.get_name())
if self.has('members'):
return self.members
else:
return ''
# Ok, not a loop, we tag it and continue
self.rec_tag = True
sg_mbrs = self.get_servicegroup_members()
for sg_mbr in sg_mbrs:
sg = servicegroups.find_by_name(sg_mbr.strip())
if sg is not None:
value = sg.get_services_by_explosion(servicegroups)
if value is not None:
self.add_string_member(value)
if self.has('members'):
return self.members
else:
return ''
示例4: open
def open(self):
try:
if self.replica_set:
self.conn = pymongo.ReplicaSetConnection(self.mongodb_uri, replicaSet=self.replica_set, fsync=self.mongodb_fsync)
else:
# Old versions of pymongo do not known about fsync
if ReplicaSetConnection:
self.conn = pymongo.Connection(self.mongodb_uri, fsync=self.mongodb_fsync)
else:
self.conn = pymongo.Connection(self.mongodb_uri)
self.db = self.conn[self.database]
self.db[self.collection].ensure_index([('host_name', pymongo.ASCENDING), ('time', pymongo.ASCENDING), ('lineno', pymongo.ASCENDING)], name='logs_idx')
self.db[self.collection].ensure_index([('time', pymongo.ASCENDING), ('lineno', pymongo.ASCENDING)], name='time_1_lineno_1')
if self.replica_set:
pass
# This might be a future option prefer_secondary
#self.db.read_preference = ReadPreference.SECONDARY
self.is_connected = CONNECTED
self.next_log_db_rotate = time.time()
except AutoReconnect as err:
# now what, ha?
logger.error("[LogStoreMongoDB] LiveStatusLogStoreMongoDB.AutoReconnect %s" % err)
# The mongodb is hopefully available until this module is restarted
raise LiveStatusLogStoreError(err)
except Exception as err:
# If there is a replica_set, but the host is a simple standalone one
# we get a "No suitable hosts found" here.
# But other reasons are possible too.
logger.error("[LogStoreMongoDB] Could not open the database: %s" % err)
raise LiveStatusLogStoreError(err)
示例5: publish_archive
def publish_archive(archive):
# Now really publish it
proxy = CONFIG['shinken.io']['proxy']
api_key = CONFIG['shinken.io']['api_key']
# Ok we will push the file with a 10s timeout
c = pycurl.Curl()
c.setopt(c.POST, 1)
c.setopt(c.CONNECTTIMEOUT, 30)
c.setopt(c.TIMEOUT, 300)
if proxy:
c.setopt(c.PROXY, proxy)
c.setopt(c.URL, "http://shinken.io/push")
c.setopt(c.HTTPPOST, [("api_key", api_key),
("data",
(c.FORM_FILE, str(archive),
c.FORM_CONTENTTYPE, "application/x-gzip"))
])
response = StringIO()
c.setopt(pycurl.WRITEFUNCTION, response.write)
c.setopt(c.VERBOSE, 1)
try:
c.perform()
except pycurl.error, exp:
logger.error("There was a critical error : %s" % exp)
return
示例6: get_instance
def get_instance(plugin):
name = plugin.get_name()
logger.info("Get a Syslog broker for plugin %s" % (name))
# syslog.syslog priority defaults to (LOG_INFO | LOG_USER)
facility = syslog.LOG_USER
priority = syslog.LOG_INFO
# Get configuration values, if any
if hasattr(plugin, 'facility'):
facility = plugin.facility
if hasattr(plugin, 'priority'):
priority = plugin.priority
# Ensure config values have a string type compatible with
# SysLogHandler.encodePriority
if type(facility) in types.StringTypes:
facility = types.StringType(facility)
if type(priority) in types.StringTypes:
priority = types.StringType(priority)
# Convert facility / priority (integers or strings) to aggregated
# priority value
sh = SysLogHandler()
try:
priority = sh.encodePriority(facility, priority)
except TypeError, e:
logger.error("[%s] Couldn't get syslog priority, "
"reverting to defaults" % (name))
示例7: get_ui_availability
def get_ui_availability(self, elt, range_start=None, range_end=None):
import pymongo
if not self.db:
logger.error("[mongo-logs] error Problem during init phase, no database connection")
return None
logger.debug("[mongo-logs] get_ui_availability, name: %s", elt)
query = [{"hostname": elt.host_name}]
if elt.__class__.my_type == 'service':
query.append({"service": elt.service_description})
if range_start:
query.append({'day_ts': {'$gte': range_start}})
if range_end:
query.append({'day_ts': {'$lte': range_end}})
query = {'$and': query}
logger.debug("[mongo-logs] Fetching records from database with query: '%s'", query)
records = []
try:
for log in self.db[self.hav_collection].find(query).sort([
("day",pymongo.DESCENDING),
("hostname",pymongo.ASCENDING),
("service",pymongo.ASCENDING)]):
if '_id' in log:
del log['_id']
records.append(log)
logger.debug("[mongo-logs] %d records fetched from database.", len(records))
except Exception, exp:
logger.error("[mongo-logs] Exception when querying database: %s", str(exp))
示例8: get_hosts_by_explosion
def get_hosts_by_explosion(self, hostgroups):
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_explode = True
# Now the recursive part
# rec_tag is set to False every HG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[hostgroup::%s] got a loop in hostgroup definition", self.get_name())
return self.get_hosts()
# Ok, not a loop, we tag it and continue
self.rec_tag = True
hg_mbrs = self.get_hostgroup_members()
for hg_mbr in hg_mbrs:
hg = hostgroups.find_by_name(hg_mbr.strip())
if hg is not None:
value = hg.get_hosts_by_explosion(hostgroups)
if value is not None:
self.add_string_member(value)
return self.get_hosts()
示例9: open
def open(self):
"""
Connect to the Mongo DB with configured URI.
Execute a command to check if connected on master to activate immediate connection to
the DB because we need to know if DB server is available.
Update log rotation time to force a log rotation
"""
self.con = MongoClient(self.uri, connect=False)
logger.info("[mongo-logs] trying to connect MongoDB: %s", self.uri)
try:
result = self.con.admin.command("ismaster")
logger.info("[mongo-logs] connected to MongoDB, admin: %s", result)
logger.debug("[mongo-logs] server information: %s", self.con.server_info())
self.db = getattr(self.con, self.database)
logger.info("[mongo-logs] connected to the database: %s (%s)", self.database, self.db)
self.is_connected = CONNECTED
self.next_logs_rotation = time.time()
logger.info('[mongo-logs] database connection established')
except ConnectionFailure as e:
logger.error("[mongo-logs] Server is not available: %s", str(e))
return False
except Exception as e:
logger.error("[mongo-logs] Could not open the database", str(e))
raise MongoLogsError
return True
示例10: manage_log_brok
def manage_log_brok(self, b):
if self.read_only:
return
data = b.data
line = data['log']
if re.match("^\[[0-9]*\] [A-Z][a-z]*.:", line):
# Match log which NOT have to be stored
# print "Unexpected in manage_log_brok", line
return
try:
logline = Logline(line=line)
values = logline.as_tuple()
if logline.logclass != LOGCLASS_INVALID:
insert_log = True
current_state = int(values[12])
if self.do_not_log_successive_ok and current_state == 0:
dbresult = self.execute("SELECT state FROM logs WHERE host_name='%s' AND service_description='%s' AND class=%d ORDER BY time DESC LIMIT 1" % (values[6],values[11],int(values[2])))
if len(dbresult) > 0 and dbresult[0][0] == 0:
insert_log = False
if insert_log:
self.execute('INSERT INTO LOGS VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', values)
except LiveStatusLogStoreError, exp:
logger.error("[Logstore SQLite] An error occurred: %s", str(exp.args[0]))
logger.error("[Logstore SQLite] DATABASE ERROR!!!!!!!!!!!!!!!!!")
示例11: check_alive_instances
def check_alive_instances(self):
# Only for external
for inst in self.instances:
if not inst in self.to_restart:
if inst.is_external and not inst.process.is_alive():
logger.error("The external module %s goes down unexpectedly!" % inst.get_name())
logger.info("Setting the module %s to restart" % inst.get_name())
# We clean its queues, they are no more useful
inst.clear_queues(self.manager)
self.to_restart.append(inst)
# Ok, no need to look at queue size now
continue
# Now look for man queue size. If above value, the module should got a huge problem
# and so bailout. It's not a perfect solution, more a watchdog
# If max_queue_size is 0, don't check this
if self.max_queue_size == 0:
continue
# Ok, go launch the dog!
queue_size = 0
try:
queue_size = inst.to_q.qsize()
except Exception, exp:
pass
if queue_size > self.max_queue_size:
logger.error("The external module %s got a too high brok queue size (%s > %s)!" % (inst.get_name(), queue_size, self.max_queue_size))
logger.info("Setting the module %s to restart" % inst.get_name())
# We clean its queues, they are no more useful
inst.clear_queues(self.manager)
self.to_restart.append(inst)
示例12: is_correct
def is_correct(self):
b = self.day in Daterange.weekdays
if not b:
logger.error("Error: %s is not a valid day", self.day)
# Check also if Daterange is correct.
b &= Daterange.is_correct(self)
return b
示例13: hook_tick
def hook_tick(self, brok):
"""Each second the broker calls the hook_tick function
Every tick try to flush the buffer
"""
if self.use_pickle:
if self.ticks >= self.tick_limit:
# If the number of ticks where data was not
# sent successfully to Graphite reaches the bufferlimit.
# Reset the buffer and reset the ticks
self.buffer = []
self.ticks = 0
return
self.ticks += 1
# Format the data
payload = cPickle.dumps(self.buffer)
header = struct.pack("!L", len(payload))
packet = header + payload
try:
self.con.sendall(packet)
except IOError, err:
logger.error(
"[Graphite broker] Failed sending to the Graphite Carbon instance network socket! IOError:%s"
% str(err)
)
return
# Flush the buffer after a successful send to Graphite
self.buffer = []
示例14: create_pack
def create_pack(self, buf, name):
if not json:
logger.warning("[Pack] cannot load the pack file '%s': missing json lib", name)
return
# Ok, go compile the code
try:
d = json.loads(buf)
if not 'name' in d:
logger.error("[Pack] no name in the pack '%s'", name)
return
p = Pack({})
p.pack_name = d['name']
p.description = d.get('description', '')
p.macros = d.get('macros', {})
p.templates = d.get('templates', [p.pack_name])
p.path = d.get('path', 'various/')
p.doc_link = d.get('doc_link', '')
p.services = d.get('services', {})
p.commands = d.get('commands', [])
if not p.path.endswith('/'):
p.path += '/'
# Ok, add it
self[p.id] = p
except ValueError, exp:
logger.error("[Pack] error in loading pack file '%s': '%s'", name, exp)
示例15: publish_archive
def publish_archive(archive):
# Now really publish it
proxy = CONFIG['shinken.io']['proxy']
api_key = CONFIG['shinken.io']['api_key']
# Ok we will push the file with a 10s timeout
c = pycurl.Curl()
c.setopt(c.POST, 1)
c.setopt(c.CONNECTTIMEOUT, 10)
c.setopt(c.TIMEOUT, 10)
if proxy:
c.setopt(c.PROXY, proxy)
c.setopt(c.URL, "http://shinken.io/push")
c.setopt(c.HTTPPOST, [("api_key", api_key),
("data",
(c.FORM_FILE, str(archive),
c.FORM_CONTENTTYPE, "application/x-gzip"))
])
response = StringIO()
c.setopt(pycurl.WRITEFUNCTION, response.write)
c.setopt(c.VERBOSE, 1)
c.perform()
r = c.getinfo(pycurl.HTTP_CODE)
c.close()
if r != 200:
logger.error("There was a critical error : %s" % response.getvalue())
sys.exit(2)
else:
ret = json.loads(response.getvalue().replace('\\/', '/'))
status = ret.get('status')
text = ret.get('text')
if status == 200:
logger.log(text)
else:
logger.error(text)