本文整理汇总了Python中lsst.log.Log类的典型用法代码示例。如果您正苦于以下问题:Python Log类的具体用法?Python Log怎么用?Python Log使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Log类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: traceSetAt
def traceSetAt(name, number):
"""!Adjust logging level to display messages with trace number <= NUMBER
Set the levels of loggers "TRACEn.name" with n > NUMBER to INFO,
and those with n <= NUMBER to DEBUG, so that only tracing messages
with n <= NUMBER are shown.
@param[in] name The logger name
@param[in] number The trace number threshold for display
"""
for i in range(6):
level = Log.INFO if i > number else Log.DEBUG
Log.getLogger('TRACE%d.%s' % (i, name)).setLevel(level)
示例2: __init__
def __init__(self, uri, create):
self.log = Log.getLogger("daf.persistence.butler")
self.root = self._pathFromURI(uri)
if self.root and not os.path.exists(self.root):
if not create:
raise NoRepositroyAtRoot("No repository at {}".format(uri))
safeMakeDir(self.root)
示例3: main
def main():
log = Log.getLogger('foo')
log.setLevel(Log.INFO)
ny, nx = 256, 256
fwhm0 = 5.0
psf = measAlg.DoubleGaussianPsf(21, 21, fwhm0)
flux = 1.0e6
# make two sets of fake data, seconds set is missing a source
nSrc = 4
xy = randomCoords(nSrc)
fluxs = [flux]*(nSrc-1) + [0.7*flux]
mimg = makeFakeImage(nx, ny, xy, fluxs, [3.0*fwhm0]*nSrc)
mimg.writeFits("foo.fits")
nSrcB = nSrc - 4
mimgB = makeFakeImage(nx, ny, xy[0:nSrcB], fluxs[0:nSrcB], [3.0*fwhm0]*nSrcB)
mimgB.writeFits("fooB.fits")
# Run the detection
fp = detect(mimg)
# deblend mimgB (missing a peak) using the fp with the extra peak
deb = measDeb.deblend(fp, mimgB, psf, fwhm0, verbose=True, rampFluxAtEdge=True, log=log)
print("Deblended peaks: ", len(deb.peaks))
fig = makePortionFigure(deb, mimg, mimgB)
fig.savefig("test.png")
示例4: __call__
def __call__(self, args):
"""Run the task on a single target.
This implementation is nearly equivalent to the overridden one, but
it never writes out metadata and always returns results. For memory
efficiency reasons, the return value is exactly the one of |run|,
rather than a :class:`~lsst.pipe.base.Struct` wrapped around it.
"""
data_ref, kwargs = args
if self.log is None:
self.log = Log.getDefaultLogger()
if hasattr(data_ref, "dataId"):
self.log.MDC("LABEL", str(data_ref.dataId))
elif isinstance(data_ref, (list, tuple)):
self.log.MDC("LABEL", str([ref.dataId for ref in data_ref if hasattr(ref, "dataId")]))
task = self.makeTask(args=args)
result = None
try:
result = task.run(data_ref, **kwargs)
except Exception, e:
if self.doRaise:
raise
if hasattr(data_ref, "dataId"):
task.log.fatal("Failed on dataId=%s: %s" % (data_ref.dataId, e))
elif isinstance(data_ref, (list, tuple)):
task.log.fatal("Failed on dataId=[%s]: %s" %
(",".join([str(_.dataId) for _ in data_ref]), e))
else:
task.log.fatal("Failed on dataRef=%s: %s" % (data_ref, e))
if not isinstance(e, pipe_base.TaskError):
traceback.print_exc(file=sys.stderr)
示例5: __init__
def __init__(self, config=None, name=None, parentTask=None, log=None):
self.metadata = dafBase.PropertyList()
self._parentTask = parentTask
if parentTask is not None:
if name is None:
raise RuntimeError("name is required for a subtask")
self._name = name
self._fullName = parentTask._computeFullName(name)
if config is None:
config = getattr(parentTask.config, name)
self._taskDict = parentTask._taskDict
loggerName = parentTask.log.getName() + '.' + name
else:
if name is None:
name = getattr(self, "_DefaultName", None)
if name is None:
raise RuntimeError("name is required for a task unless it has attribute _DefaultName")
name = self._DefaultName
self._name = name
self._fullName = self._name
if config is None:
config = self.ConfigClass()
self._taskDict = dict()
loggerName = self._fullName
if log is not None and log.getName():
loggerName = log.getName() + '.' + loggerName
self.log = Log.getLogger(loggerName)
self.config = config
self._display = lsstDebug.Info(self.__module__).display
self._taskDict[self._fullName] = self
示例6: __init__
def __init__(self):
# Set up defaults to send to deblender
# Always deblend as Psf
self.psfChisqCut1 = self.psfChisqCut2 = self.psfChisqCut2b = np.inf
self.log = Log.getLogger('ip.diffim.DipoleDeblender')
self.sigma2fwhm = 2. * np.sqrt(2. * np.log(2.))
示例7: _assignClusters
def _assignClusters(yvec, centers):
"""Return a vector of centerIds based on their distance to the centers"""
assert len(centers) > 0
minDist = numpy.nan*numpy.ones_like(yvec)
clusterId = numpy.empty_like(yvec)
clusterId.dtype = int # zeros_like(..., dtype=int) isn't in numpy 1.5
dbl = Log.getLogger("objectSizeStarSelector._assignClusters")
dbl.setLevel(dbl.INFO)
# Make sure we are logging aall numpy warnings...
oldSettings = numpy.seterr(all="warn")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
for i, mean in enumerate(centers):
dist = abs(yvec - mean)
if i == 0:
update = dist == dist # True for all points
else:
update = dist < minDist
if w: # Only do if w is not empty i.e. contains a warning message
dbl.trace(str(w[-1]))
minDist[update] = dist[update]
clusterId[update] = i
numpy.seterr(**oldSettings)
return clusterId
示例8: testWriteCfg
def testWriteCfg(self):
# The number of writers to use can result in too many open files
# We calculate this as the 80% of the maximum allowed number for this
# process, or 1000, whichever is smaller.
numWriters = 1000
try:
import resource
limit = resource.getrlimit(resource.RLIMIT_NOFILE)
allowedOpen = int(limit[0] * 0.8)
if allowedOpen < numWriters:
numWriters = allowedOpen
except Exception:
# Use the default number if we had trouble obtaining resources
pass
startTime = time.time()
go = multiprocessing.Value('b', False)
cfg = dp.RepositoryCfg(root=os.path.join(self.testDir), mapper='bar', mapperArgs={},
parents=None, policy=None)
procs = [multiprocessing.Process(target=TestOneThousandWriters.writeCfg, args=(cfg, go))
for x in range(numWriters)]
for proc in procs:
proc.start()
go = True
for proc in procs:
proc.join()
endTime = time.time()
log = Log.getLogger("daf.persistence")
log.trace("TestOneThousandWriters took {} seconds.".format(endTime-startTime))
示例9: __init__
def __init__(self, config):
self.config = config
self.log = Log.getLogger("ip.diffim.DiaSourceAnalysis")
self.bitMask = 0
srcBadMaskPlanes = self.config.srcBadMaskPlanes
for maskPlane in srcBadMaskPlanes:
self.bitMask |= afwImage.Mask.getPlaneBitMask(maskPlane)
示例10: testLog
def testLog(self):
"""Test the Task's logger
"""
addMultTask = AddMultTask()
self.assertEqual(addMultTask.log.getName(), "addMult")
self.assertEqual(addMultTask.add.log.getName(), "addMult.add")
log = Log.getLogger("tester")
addMultTask = AddMultTask(log=log)
self.assertEqual(addMultTask.log.getName(), "tester.addMult")
self.assertEqual(addMultTask.add.log.getName(), "tester.addMult.add")
示例11: testOverrides
def testOverrides(self):
"""Test config and log override
"""
config = ExampleTask.ConfigClass()
config.floatField = -99.9
log = Log.getLogger("cmdLineTask")
retVal = ExampleTask.parseAndRun(
args=[DataPath, "--output", self.outPath, "--id", "visit=2"],
config=config,
log=log
)
self.assertEqual(retVal.parsedCmd.config.floatField, -99.9)
self.assertIs(retVal.parsedCmd.log, log)
示例12: setUp
def setUp(self):
# Load sample input from disk
testDir = os.path.dirname(__file__)
self.srcCat = afwTable.SourceCatalog.readFits(
os.path.join(testDir, "data", "v695833-e0-c000.xy.fits"))
self.srcCat["slot_ApFlux_instFluxErr"] = 1
self.srcCat["slot_PsfFlux_instFluxErr"] = 1
# The .xy.fits file has sources in the range ~ [0,2000],[0,4500]
# which is bigger than the exposure
self.bbox = afwGeom.Box2I(afwGeom.Point2I(0, 0), afwGeom.Extent2I(2048, 4612))
smallExposure = afwImage.ExposureF(os.path.join(testDir, "data", "v695833-e0-c000-a00.sci.fits"))
self.exposure = afwImage.ExposureF(self.bbox)
self.exposure.setWcs(smallExposure.getWcs())
self.exposure.setFilter(smallExposure.getFilter())
self.exposure.setPhotoCalib(smallExposure.getPhotoCalib())
coordKey = self.srcCat.getCoordKey()
centroidKey = self.srcCat.getCentroidKey()
wcs = self.exposure.getWcs()
for src in self.srcCat:
src.set(coordKey, wcs.pixelToSky(src.get(centroidKey)))
# Make a reference loader
butler = Butler(RefCatDir)
self.refObjLoader = LoadIndexedReferenceObjectsTask(butler=butler)
logLevel = Log.TRACE
self.log = Log.getLogger('testPhotoCal')
self.log.setLevel(logLevel)
self.config = PhotoCalConfig()
self.config.match.matchRadius = 0.5
self.config.match.referenceSelection.doMagLimit = True
self.config.match.referenceSelection.magLimit.maximum = 22.0
self.config.match.referenceSelection.magLimit.fluxField = "i_flux"
self.config.match.referenceSelection.doFlags = True
self.config.match.referenceSelection.flags.good = ['photometric']
self.config.match.referenceSelection.flags.bad = ['resolved']
self.config.match.sourceSelection.doUnresolved = False # Don't have star/galaxy in the srcCat
# The test and associated data have been prepared on the basis that we
# use the PsfFlux to perform photometry.
self.config.fluxField = "base_PsfFlux_instFlux"
示例13: makeDataRefList
def makeDataRefList(self, namespace):
"""Make self.refList from self.idList
"""
if self.datasetType is None:
raise RuntimeError("Must call setDatasetType first")
log = Log.getLogger("meas.base.forcedPhotCcd.PerTractCcdDataIdContainer")
skymap = None
visitTract = collections.defaultdict(set) # Set of tracts for each visit
visitRefs = collections.defaultdict(list) # List of data references for each visit
for dataId in self.idList:
if "tract" not in dataId:
# Discover which tracts the data overlaps
log.info("Reading WCS for components of dataId=%s to determine tracts", dict(dataId))
if skymap is None:
skymap = namespace.butler.get(namespace.config.coaddName + "Coadd_skyMap")
for ref in namespace.butler.subset("calexp", dataId=dataId):
if not ref.datasetExists("calexp"):
continue
visit = ref.dataId["visit"]
visitRefs[visit].append(ref)
md = ref.get("calexp_md", immediate=True)
wcs = lsst.afw.geom.makeSkyWcs(md)
box = lsst.geom.Box2D(lsst.afw.image.bboxFromMetadata(md))
# Going with just the nearest tract. Since we're throwing all tracts for the visit
# together, this shouldn't be a problem unless the tracts are much smaller than a CCD.
tract = skymap.findTract(wcs.pixelToSky(box.getCenter()))
if imageOverlapsTract(tract, wcs, box):
visitTract[visit].add(tract.getId())
else:
self.refList.extend(ref for ref in namespace.butler.subset(self.datasetType, dataId=dataId))
# Ensure all components of a visit are kept together by putting them all in the same set of tracts
for visit, tractSet in visitTract.items():
for ref in visitRefs[visit]:
for tract in tractSet:
self.refList.append(namespace.butler.dataRef(datasetType=self.datasetType,
dataId=ref.dataId, tract=tract))
if visitTract:
tractCounter = collections.Counter()
for tractSet in visitTract.values():
tractCounter.update(tractSet)
log.info("Number of visits for each tract: %s", dict(tractCounter))
示例14: plot
def plot(mag, width, centers, clusterId, marker="o", markersize=2, markeredgewidth=0, ltype='-',
magType="model", clear=True):
log = Log.getLogger("objectSizeStarSelector.plot")
try:
import matplotlib.pyplot as plt
except ImportError as e:
log.warn("Unable to import matplotlib: %s", e)
return
try:
fig
except NameError:
fig = plt.figure()
else:
if clear:
fig.clf()
axes = fig.add_axes((0.1, 0.1, 0.85, 0.80))
xmin = sorted(mag)[int(0.05*len(mag))]
xmax = sorted(mag)[int(0.95*len(mag))]
axes.set_xlim(-17.5, -13)
axes.set_xlim(xmin - 0.1*(xmax - xmin), xmax + 0.1*(xmax - xmin))
axes.set_ylim(0, 10)
colors = ["r", "g", "b", "c", "m", "k", ]
for k, mean in enumerate(centers):
if k == 0:
axes.plot(axes.get_xlim(), (mean, mean,), "k%s" % ltype)
li = (clusterId == k)
axes.plot(mag[li], width[li], marker, markersize=markersize, markeredgewidth=markeredgewidth,
color=colors[k % len(colors)])
li = (clusterId == -1)
axes.plot(mag[li], width[li], marker, markersize=markersize, markeredgewidth=markeredgewidth,
color='k')
if clear:
axes.set_xlabel("Instrumental %s mag" % magType)
axes.set_ylabel(r"$\sqrt{(I_{xx} + I_{yy})/2}$")
return fig
示例15: __init__
def __init__(self, rerun=0, basedir='.', **kwargs):
Mapper.__init__(self)
print('TractorMapper(): ignoring kwargs', kwargs)
self.basedir = basedir
self.rerun = rerun
self.log = Log.getLogger('TractorMapper')
indir = os.path.join(self.basedir, 't%(visit)04i')
outdir = os.path.join(indir, 'rr%(rerun)04i')
self.filenames = {'outdir': (outdir, None, None),
'visitim': (os.path.join(indir, 't.fits'), # 't_img.fits'), #img.fits'),
'lsst.afw.image.ExposureF', 'ExposureF'),
'psf': (os.path.join(outdir, 'psf.boost'),
'lsst.afw.detection.Psf', 'Psf'),
'src': (os.path.join(outdir, 'src.boost'),
# dare to dream / keep dreaming
# os.path.join(outdir, 'src.fits'),
# htf did this work before?
# 'lsst.afw.detection.Source', 'Source'),
'lsst.afw.detection.PersistableSourceVector',
'PersistableSourceVector'),
'bb': (os.path.join(outdir, 'bb.pickle'),
None, None),
'pyfoots': (os.path.join(outdir, 'foots.pickle'),
None, None),
'footprints': (os.path.join(outdir, 'foots.boost'),
'lsst.afw.detection.FootprintList',
'FootprintList'),
'truesrc': (os.path.join(indir, 'srcs.fits'),
None, None),
}
'''
for datasetType in ["raw", "bias", "dark", "flat", "fringe",
"postISR", "postISRCCD", "sdqaAmp", "sdqaCcd",
"icSrc", "icMatch", "visitim", "psf", "apCorr", "calexp", "src",
"sourceHist", "badSourceHist", "source", "badSource",
"invalidSource", "object", "badObject"]:
'''
self.keys = ['visit', 'filter']