本文整理汇总了Python中utils.log_exc函数的典型用法代码示例。如果您正苦于以下问题:Python log_exc函数的具体用法?Python log_exc怎么用?Python log_exc使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了log_exc函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: generate_kickstart_for_profile
def generate_kickstart_for_profile(self,g):
g = self.api.find_profile(name=g)
if g is None:
return "# profile not found"
distro = g.get_conceptual_parent()
meta = utils.blender(self.api, False, g)
if distro is None:
raise CX(_("profile %(profile)s references missing distro %(distro)s") % { "profile" : g.name, "distro" : g.distro })
kickstart_path = utils.find_kickstart(meta["kickstart"])
if kickstart_path is not None and os.path.exists(kickstart_path):
# the input is an *actual* file, hence we have to copy it
try:
meta = utils.blender(self.api, False, g)
ksmeta = meta["ks_meta"]
del meta["ks_meta"]
meta.update(ksmeta) # make available at top level
meta["yum_repo_stanza"] = self.generate_repo_stanza(g,True)
meta["yum_config_stanza"] = self.generate_config_stanza(g,True)
meta["kickstart_done"] = self.generate_kickstart_signal(0, g, None)
meta["kickstart_start"] = self.generate_kickstart_signal(1, g, None)
meta["kernel_options"] = utils.hash_to_string(meta["kernel_options"])
kfile = open(kickstart_path)
data = self.templar.render(kfile, meta, None, g)
kfile.close()
return data
except:
utils.log_exc(self.api.logger)
raise
elif kickstart_path is not None and not os.path.exists(kickstart_path):
if kickstart_path.find("http://") == -1 and kickstart_path.find("ftp://") == -1 and kickstart_path.find("nfs:") == -1:
return "# Error, cannot find %s" % kickstart_path
return "# kickstart is sourced externally, or is missing, and cannot be displayed here: %s" % meta["kickstart"]
示例2: replace_objects_newer_on_remote
def replace_objects_newer_on_remote(self, obj_type):
locals = utils.loh_to_hoh(self.local_data[obj_type],"uid")
remotes = utils.loh_to_hoh(self.remote_data[obj_type],"uid")
for (ruid, rdata) in remotes.iteritems():
# do not add the system if it is not on the transfer list
if not self.must_include[obj_type].has_key(rdata["name"]):
continue
if locals.has_key(ruid):
ldata = locals[ruid]
if ldata["mtime"] < rdata["mtime"]:
if ldata["name"] != rdata["name"]:
self.logger.info("removing %s %s" % (obj_type, ldata["name"]))
self.api.remove_item(obj_type, ldata["name"], recursive=True, logger=self.logger)
creator = getattr(self.api, "new_%s" % obj_type)
newobj = creator()
newobj.from_datastruct(rdata)
try:
self.logger.info("updating %s %s" % (obj_type, rdata["name"]))
self.api.add_item(obj_type, newobj)
except Exception, e:
utils.log_exc(self.logger)
示例3: replicate_data
def replicate_data(self):
# distros
self.logger.info("Copying Distros")
local_distros = self.api.distros()
try:
remote_distros = self.remote.get_distros()
except:
utils.die(self.logger, "Failed to contact remote server")
if self.sync_all or self.sync_trees:
self.logger.info("Rsyncing Distribution Trees")
self.rsync_it(os.path.join(self.settings.webdir, "ks_mirror"), self.settings.webdir)
for distro in remote_distros:
self.logger.info("Importing remote distro %s." % distro["name"])
if os.path.exists(distro["kernel"]):
remote_mtime = distro["mtime"]
if self.should_add_or_replace(distro, "distros"):
new_distro = self.api.new_distro()
new_distro.from_datastruct(distro)
try:
self.api.add_distro(new_distro)
self.logger.info("Copied distro %s." % distro["name"])
except Exception, e:
utils.log_exc(self.logger)
self.logger.error("Failed to copy distro %s" % distro["name"])
else:
# FIXME: force logic
self.logger.info("Not copying distro %s, sufficiently new mtime" % distro["name"])
else:
self.logger.error("Failed to copy distro %s, content not here yet." % distro["name"])
示例4: remove_objects_not_on_master
def remove_objects_not_on_master(self, obj_type):
locals = utils.loh_to_hoh(self.local_data[obj_type],"uid")
remotes = utils.loh_to_hoh(self.remote_data[obj_type],"uid")
for (luid, ldata) in locals.iteritems():
if not remotes.has_key(luid):
try:
self.logger.info("removing %s %s" % (obj_type, ldata["name"]))
self.api.remove_item(obj_type, ldata["name"], recursive=True, logger=self.logger)
except Exception, e:
utils.log_exc(self.logger)
示例5: process
def process(self, message):
# find principal
from_hdr = parseaddr(message["From"])[1].lower()
try:
principal = UserModel().objects.get(email=from_hdr)
except UserModel().DoesNotExist:
# member not found
raise MailInException("Member not found: %s" % from_hdr)
# deliver message
try:
self.processRecipient(principal, message)
except Exception, e:
log_exc()
raise MailInException(e)
示例6: load_modules
def load_modules(module_path=mod_path, blacklist=None):
logger = clogger.Logger()
filenames = glob.glob("%s/*.py" % module_path)
filenames += glob.glob("%s/*.pyc" % module_path)
filenames += glob.glob("%s/*.pyo" % module_path)
mods = set()
for fn in filenames:
basename = os.path.basename(fn)
if basename == "__init__.py":
continue
if basename[-3:] == ".py":
modname = basename[:-3]
elif basename[-4:] in [".pyc", ".pyo"]:
modname = basename[:-4]
# No need to try importing the same module over and over if
# we have a .py, .pyc, and .pyo
if modname in mods:
continue
mods.add(modname)
try:
blip = __import__("modules.%s" % (modname), globals(), locals(),
[modname])
if not hasattr(blip, "register"):
if not modname.startswith("__init__"):
errmsg = _(
"%(module_path)s/%(modname)s is not a proper module")
print errmsg % {
'module_path': module_path,
'modname': modname
}
continue
category = blip.register()
if category:
MODULE_CACHE[modname] = blip
if category not in MODULES_BY_CATEGORY:
MODULES_BY_CATEGORY[category] = {}
MODULES_BY_CATEGORY[category][modname] = blip
except Exception:
logger.info('Exception raised when loading module %s' % modname)
log_exc(logger)
return (MODULE_CACHE, MODULES_BY_CATEGORY)
示例7: __call__
def __call__(self, request, *args, **kw):
mail = request.POST.get('mail') or request.GET.get('mail')
if not mail:
return HttpResponse('failed', status=500)
# convert mail
try:
msg = message_from_string(mail.encode('utf-8'))
except:
log_exc('Error parsing email')
return HttpResponse('failed on parsing', status=500)
# check message for loops, wrong mta hosts, etc
try:
config_instance.checkMessage(msg, mail, request)
except MailInException, msg:
log(str(msg))
return HttpResponse('failed on checking', status=500)
示例8: render
def render(self, request, compress=True):
request = self._get_request(request)
if self.resource:
return self.resource(request).GET()
path = self.path
resource = traverse(path, request)
if resource is None:
return u''
gresource = IResource(resource, None)
if gresource is not None:
try:
return gresource.render(request)
except Exception, err:
log_exc(str(err))
raise
示例9: add_objects_not_on_local
def add_objects_not_on_local(self, obj_type):
locals = utils.loh_to_hoh(self.local_data[obj_type], "uid")
remotes = utils.loh_sort_by_key(self.remote_data[obj_type],"depth")
remotes2 = utils.loh_to_hoh(self.remote_data[obj_type],"depth")
for rdata in remotes:
# do not add the system if it is not on the transfer list
if not self.must_include[obj_type].has_key(rdata["name"]):
continue
if not locals.has_key(rdata["uid"]):
creator = getattr(self.api, "new_%s" % obj_type)
newobj = creator()
newobj.from_datastruct(rdata)
try:
self.logger.info("adding %s %s" % (obj_type, rdata["name"]))
self.api.add_item(obj_type, newobj)
except Exception, e:
utils.log_exc(self.logger)
示例10: add_objects_not_on_local
def add_objects_not_on_local(self, obj_type):
locals = utils.lod_to_dod(self.local_data[obj_type], "uid")
remotes = utils.lod_sort_by_key(self.remote_data[obj_type], "depth")
for rdata in remotes:
# do not add the system if it is not on the transfer list
if not rdata["name"] in self.must_include[obj_type]:
continue
if not rdata["uid"] in locals:
creator = getattr(self.api, "new_%s" % obj_type)
newobj = creator()
newobj.from_dict(rdata)
try:
self.logger.info("adding %s %s" % (obj_type, rdata["name"]))
if not self.api.add_item(obj_type, newobj, logger=self.logger):
self.logger.error("failed to add %s %s" % (obj_type, rdata["name"]))
except Exception:
utils.log_exc(self.logger)
示例11: createrepo_walker
def createrepo_walker(self, repo, dirname, fnames):
"""
Used to run createrepo on a copied Yum mirror.
"""
if os.path.exists(dirname) or repo["breed"] == "rsync":
utils.remove_yum_olddata(dirname)
# add any repo metadata we can use
mdoptions = []
if os.path.isfile("%s/repodata/repomd.xml" % (dirname)):
if not HAS_YUM:
utils.die(self.logger, "yum is required to use this feature")
rmd = yum.repoMDObject.RepoMD("", "%s/repodata/repomd.xml" % (dirname))
if rmd.repoData.has_key("group"):
groupmdfile = rmd.getData("group").location[1]
mdoptions.append("-g %s" % groupmdfile)
if rmd.repoData.has_key("prestodelta"):
# need createrepo >= 0.9.7 to add deltas
if utils.check_dist() == "redhat" or utils.check_dist() == "suse":
cmd = "/usr/bin/rpmquery --queryformat=%{VERSION} createrepo"
createrepo_ver = utils.subprocess_get(self.logger, cmd)
if createrepo_ver >= "0.9.7":
mdoptions.append("--deltas")
else:
utils.die(
self.logger,
"this repo has presto metadata; you must upgrade createrepo to >= 0.9.7 first and then need to resync the repo through cobbler.",
)
blended = utils.blender(self.api, False, repo)
flags = blended.get("createrepo_flags", "(ERROR: FLAGS)")
try:
# BOOKMARK
cmd = "createrepo %s %s %s" % (" ".join(mdoptions), flags, dirname)
utils.subprocess_call(self.logger, cmd)
except:
utils.log_exc(self.logger)
self.logger.error("createrepo failed.")
del fnames[:] # we're in the right place
示例12: remove_objects_not_on_master
def remove_objects_not_on_master(self, obj_type):
locals = utils.lod_to_dod(self.local_data[obj_type], "uid")
remotes = utils.lod_to_dod(self.remote_data[obj_type], "uid")
obj_pattern = getattr(self, "%s_patterns" % obj_type)
if obj_pattern and self.prune:
self.logger.info("Found pattern for %s. Pruning non-matching items" % obj_type)
keep_obj = {}
remote_names = utils.loh_to_hoh(self.remote_data[obj_type], "name")
for name in remote_names.keys():
if name in self.must_include[obj_type] and remote_names[name]["uid"] in remotes:
self.logger.info("Adding %s:%s to keep list" % (name, remote_names[name]["uid"]))
keep_obj[remote_names[name]["uid"]] = remotes[remote_names[name]["uid"]]
remotes = keep_obj
for (luid, ldata) in locals.iteritems():
if luid not in remotes:
try:
self.logger.info("removing %s %s" % (obj_type, ldata["name"]))
self.api.remove_item(obj_type, ldata["name"], recursive=True, logger=self.logger)
except Exception:
utils.log_exc(self.logger)
示例13: signature_update
def signature_update(self, logger):
try:
tmpfile = tempfile.NamedTemporaryFile()
response = urllib2.urlopen(self.settings().signature_url)
sigjson = response.read()
tmpfile.write(sigjson)
tmpfile.flush()
logger.debug("Successfully got file from %s" % self.settings().signature_url)
# test the import without caching it
if not utils.load_signatures(tmpfile.name,cache=False):
logger.error("Downloaded signatures failed test load (tempfile = %s)" % tmpfile.name)
return False
# rewrite the real signature file and import it for real
f = open(self.settings().signature_path,"w")
f.write(sigjson)
f.close()
return utils.load_signatures(self.settings().signature_path)
except:
utils.log_exc(logger)
return False
示例14: process
def process(self, message):
recipient = IRecipient(self.context, None)
if recipient is None:
raise MailInException('Recipent not found.')
# find principal
from_hdr = parseaddr(message['From'])[1].lower()
try:
principal = getPrincipalByEMail(from_hdr)
except PrincipalLookupError:
if IAnonymousSupport.providedBy(recipient):
principal = getUtility(IUnauthenticatedPrincipal)
else:
# member not found
raise MailInException('Member not found: %s'%from_hdr)
# set security context
interaction = queryInteraction()
if interaction is not None:
request = copy.copy(interaction.participations[0])
else:
request = TestRequest()
request.setPrincipal(principal)
request.interaction = None
endInteraction()
newInteraction(request)
# deliver message
try:
recipient.process(message)
except:
log_exc()
# restore old security context
restoreInteraction()
示例15: __depth_sort
# workaround for profile inheritance, must load in order
def __depth_sort(a, b):
return cmp(a["depth"], b["depth"])
remote_profiles.sort(__depth_sort)
for profile in remote_profiles:
self.logger.info("Importing remote profile %s" % profile["name"])
if self.should_add_or_replace(profile, "profiles"):
new_profile = self.api.new_profile()
new_profile.from_datastruct(profile)
try:
self.api.add_profile(new_profile)
self.logger.info("Copied profile %s." % profile["name"])
except Exception, e:
utils.log_exc(self.logger)
self.logger.error("Failed to copy profile %s." % profile["name"])
else:
self.logger.info("Not copying profile %s, sufficiently new mtime" % profile["name"])
# images
self.logger.info("Copying Images")
remote_images = self.remote.get_images()
for image in remote_images:
self.logger.info("Importing remote image %s" % image["name"])
if self.should_add_or_replace(image, "images"):
new_image = self.api.new_image()
new_image.from_datastruct(image)
try:
self.api.add_image(new_image)
self.logger.info("Copied image %s." % image["name"])