本文整理汇总了Python中astrodata.adutils.logutils.get_logger函数的典型用法代码示例。如果您正苦于以下问题:Python get_logger函数的具体用法?Python get_logger怎么用?Python get_logger使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_logger函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: getProcessedFringe
def getProcessedFringe(self, rc):
# Instantiate the log
log = logutils.get_logger(__name__)
caltype = "processed_fringe"
source = rc["source"]
if source == None:
rc.run("getCalibration(caltype=%s)" % caltype)
else:
rc.run("getCalibration(caltype=%s, source=%s)" % (caltype,source))
# List calibrations found
# Fringe correction is always optional, so don't raise errors if fringe
# not found
first = True
for ad in rc.get_inputs_as_astrodata():
calurl = rc.get_cal(ad, caltype) #get from cache
if calurl:
cal = AstroData(calurl)
if cal.filename is not None:
if first:
log.stdinfo("getCalibration: Results")
first = False
log.stdinfo(" %s\n for %s" % (cal.filename,
ad.filename))
yield rc
示例2: showCals
def showCals(self, rc):
# Instantiate the log
log = logutils.get_logger(__name__)
if str(rc["showcals"]).lower() == "all":
num = 0
# print "pG256: showcals=all", repr (rc.calibrations)
for calkey in rc.calibrations:
num += 1
log.stdinfo(rc.calibrations[calkey], category="calibrations")
if (num == 0):
log.stdinfo("There are no calibrations in the cache.")
else:
for adr in rc.inputs:
sid = IDFactory.generate_astro_data_id(adr.ad)
num = 0
for calkey in rc.calibrations:
if sid in calkey :
num += 1
log.stdinfo(rc.calibrations[calkey],
category="calibrations")
if (num == 0):
log.stdinfo("There are no calibrations in the cache.")
yield rc
示例3: separateLampOff
def separateLampOff(self, rc):
"""
This primitive is intended to run on gcal imaging flats.
It goes through the input list and figures out which ones are lamp-on
and which ones are lamp-off
"""
# Instantiate the log
log = logutils.get_logger(__name__)
# Log the standard "starting primitive" debug message
log.debug(gt.log_message("primitive", "separateLampOff", "starting"))
# Initialize the list of output AstroData objects
lampon_list = []
lampoff_list = []
# Loop over the input frames
for ad in rc.get_inputs_as_astrodata():
if('GCAL_IR_ON' in ad.types):
log.stdinfo("%s is a lamp-on flat" % ad.data_label())
#rc.run("addToList(purpose=lampOn)")
lampon_list.append(ad)
elif('GCAL_IR_OFF' in ad.types):
log.stdinfo("%s is a lamp-off flat" % ad.data_label())
#rc.run("addToList(purpose=lampOff)")
lampoff_list.append(ad)
else:
log.warning("Not a GCAL flatfield? Cannot tell if it is lamp-on or lamp-off for %s" % ad.data_label())
rc.report_output(lampon_list, stream="lampOn")
rc.report_output(lampoff_list, stream="lampOff")
yield rc
示例4: failCalibration
def failCalibration(self,rc):
# Mark a given calibration "fail" and upload it
# to the system. This is intended to be used to mark a
# calibration file that has already been uploaded, so that
# it will not be returned as a valid match for future data.
# Instantiate the log
log = logutils.get_logger(__name__)
# Initialize the list of output AstroData objects
adoutput_list = []
# Loop over each input AstroData object in the input list
for ad in rc.get_inputs_as_astrodata():
# Change the two keywords -- BAD and NO = Fail
ad.phu_set_key_value("RAWGEMQA","BAD",
comment=self.keyword_comments["RAWGEMQA"])
ad.phu_set_key_value("RAWPIREQ","NO",
comment=self.keyword_comments["RAWPIREQ"])
log.fullinfo("%s has been marked %s" % (ad.filename,ad.qa_state()))
# Append the output AstroData object to the list
# of output AstroData objects
adoutput_list.append(ad)
# Report the list of output AstroData objects to the
# reduction context
rc.report_output(adoutput_list)
# Run the storeCalibration primitive, so that the
# failed file gets re-uploaded
rc.run("storeCalibration")
yield rc
示例5: storeProcessedFlat
def storeProcessedFlat(self, rc):
# Instantiate the log
log = logutils.get_logger(__name__)
# Log the standard "starting primitive" debug message
log.debug(gt.log_message("primitive", "storeProcessedFlat",
"starting"))
# Loop over each input AstroData object in the input list
for ad in rc.get_inputs_as_astrodata():
# Updating the file name with the suffix for this primitive and
# then report the new file to the reduction context
ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"],
strip=True)
# Adding a PROCFLAT time stamp to the PHU
gt.mark_history(adinput=ad, keyword="PROCFLAT")
# Refresh the AD types to reflect new processed status
ad.refresh_types()
# Upload to cal system
rc.run("storeCalibration")
yield rc
示例6: subtractLampOnLampOff
def subtractLampOnLampOff(self, rc):
"""
This primitive subtracts the lamp off stack from the lampon stack. It expects there to be only
one file (the stack) on each stream - call stackLampOnLampOff to do the stacking before calling this
"""
# Instantiate the log
log = logutils.get_logger(__name__)
# Log the standard "starting primitive" debug message
log.debug(gt.log_message("primitive", "subtractLampOnLampOff", "starting"))
# Initialize the list of output AstroData objects
adoutput_list = []
lampon = rc.get_stream(stream="lampOn", style="AD")[0]
lampoff = rc.get_stream(stream="lampOff", style="AD")[0]
log.stdinfo("Lamp ON is: %s %s" % (lampon.data_label(), lampon.filename))
log.stdinfo("Lamp OFF is: %s %s" % (lampoff.data_label(), lampoff.filename))
lampon.sub(lampoff)
lampon.filanme = gt.filename_updater(adinput=lampon, suffix="lampOnOff")
adoutput_list.append(lampon)
rc.report_output(adoutput_list)
yield rc
示例7: getProcessedArc
def getProcessedArc(self, rc):
# Instantiate the log
log = logutils.get_logger(__name__)
caltype = "processed_arc"
source = rc["source"]
if source == None:
rc.run("getCalibration(caltype=%s)" % caltype)
else:
rc.run("getCalibration(caltype=%s, source=%s)" % (caltype,source))
# List calibrations found
first = True
for ad in rc.get_inputs_as_astrodata():
calurl = rc.get_cal(ad, caltype) #get from cache
if calurl:
cal = AstroData(calurl)
if cal.filename is None:
if "qa" not in rc.context:
raise Errors.InputError("Calibration not found for " \
"%s" % ad.filename)
else:
if first:
log.stdinfo("getCalibration: Results")
first = False
log.stdinfo(" %s\n for %s" % (cal.filename,
ad.filename))
else:
if "qa" not in rc.context:
raise Errors.InputError("Calibration not found for %s" %
ad.filename)
yield rc
示例8: storeCalibration
def storeCalibration(self, rc):
# Instantiate the log
log = logutils.get_logger(__name__)
# Log the standard "starting primitive" debug message
log.debug(gt.log_message("primitive", "storeCalibration", "starting"))
# Determine the path where the calibration will be stored
storedcals = rc["cachedict"]["storedcals"]
# Loop over each input AstroData object in the input list
for ad in rc.get_inputs_as_astrodata():
# Construct the filename of the calibration, including the path
fname = os.path.join(storedcals, os.path.basename(ad.filename))
# Write the calibration to disk. Use rename=False so that
# ad.filename does not change (i.e., does not include the
# calibration path)
ad.write(filename=fname, rename=False, clobber=True)
log.stdinfo("Calibration stored as %s" % fname)
if "upload" in rc.context:
try:
upload_calibration(fname)
except:
log.warning("Unable to upload file to calibration system")
else:
log.stdinfo("File %s uploaded to fitsstore." %
os.path.basename(ad.filename))
yield rc
yield rc
示例9: normalizeFlat
def normalizeFlat(self, rc):
"""
This primitive normalizes each science extension of the input
AstroData object by its mean
"""
# Instantiate the log
log = logutils.get_logger(__name__)
# Log the standard "starting primitive" debug message
log.debug(gt.log_message("primitive", "normalizeFlat", "starting"))
# Define the keyword to be used for the time stamp for this primitive
timestamp_key = self.timestamp_keys["normalizeFlat"]
# Initialize the list of output AstroData objects
adoutput_list = []
# Loop over each input AstroData object in the input list
for ad in rc.get_inputs_as_astrodata():
# Check whether the normalizeFlat primitive has been run previously
if ad.phu_get_key_value(timestamp_key):
log.warning("No changes will be made to %s, since it has " \
"already been processed by normalizeFlat" \
% (ad.filename))
# Append the input AstroData object to the list of output
# AstroData objects without further processing
adoutput_list.append(ad)
continue
# Loop over each science extension in each input AstroData object
for ext in ad[SCI]:
# Normalise the input AstroData object. Calculate the mean
# value of the science extension
mean = np.mean(ext.data, dtype=np.float64)
# Divide the science extension by the mean value of the science
# extension
log.fullinfo("Normalizing %s[%s,%d] by dividing by the mean " \
"= %f" % (ad.filename, ext.extname(),
ext.extver(), mean))
ext = ext.div(mean)
# Add the appropriate time stamps to the PHU
gt.mark_history(adinput=ad, keyword=timestamp_key)
# Change the filename
ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"],
strip=True)
# Append the output AstroData object to the list
# of output AstroData objects
adoutput_list.append(ad)
# Report the list of output AstroData objects to the reduction
# context
rc.report_output(adoutput_list)
yield rc
示例10: cutFootprints
def cutFootprints(self, rc):
"""
This primitive will create and append multiple HDU to the output
AD object. Each HDU correspond to a rectangular cut containing a
slit from a MOS Flat exposure or a XD flat exposure as in the
Gnirs case.
:param logLevel: Verbosity setting for log messages to the screen.
:type logLevel: integer from 0-6, 0=nothing to screen, 6=everything to
screen. OR the message level as a string (i.e.,
'critical', 'status', 'fullinfo'...)
"""
# Instantiate the log
log = logutils.get_logger(__name__)
# Log the standard "starting primitive" debug message
log.debug(gt.log_message("primitive", "cutFootprints", "starting"))
# Initialize the list of output AstroData objects
adoutput_list = []
# Loop over each input AstroData object in the input list
for ad in rc.get_inputs_as_astrodata():
# Call the user level function
# Check that the input ad has the TRACEFP extension,
# otherwise, create it.
if ad['TRACEFP'] == None:
ad = trace_footprints(ad)
log.stdinfo("Cutting_footprints for: %s"%ad.filename)
try:
adout = cut_footprints(ad)
except:
log.error("Error in cut_slits with file: %s"%ad.filename)
# DO NOT add this input ad to the adoutput_lis
continue
# Change the filename
adout.filename = gt.filename_updater(adinput=ad,
suffix=rc["suffix"],
strip=True)
# Append the output AstroData object to the list of output
# AstroData objects.
adoutput_list.append(adout)
# Report the list of output AstroData objects to the reduction
# context
rc.report_output(adoutput_list)
yield rc
示例11: traceFootprints
def traceFootprints(self, rc):
"""
This primitive will create and append a 'TRACEFP' Bintable HDU to the
AD object. The content of this HDU is the footprints information
from the espectroscopic flat in the SCI array.
:param logLevel: Verbosity setting for log messages to the screen.
:type logLevel: integer from 0-6, 0=nothing to screen, 6=everything to
screen. OR the message level as a string (i.e.,
'critical', 'status', 'fullinfo'...)
"""
# Instantiate the log
log = logutils.get_logger(__name__)
# Log the standard "starting primitive" debug message
log.debug(gt.log_message("primitive", "", "starting"))
# Initialize the list of output AstroData objects
adoutput_list = []
# Loop over each input AstroData object in the input list
for ad in rc.get_inputs_as_astrodata():
# Check whether this primitive has been run previously
if ad.phu_get_key_value("TRACEFP"):
log.warning("%s has already been processed by traceSlits" \
% (ad.filename))
# Append the input AstroData object to the list of output
# AstroData objects without further processing
adoutput_list.append(ad)
continue
# Call the user level function
try:
adout = trace_footprints(ad,function=rc["function"],
order=rc["order"],
trace_threshold=rc["trace_threshold"])
except:
log.warning("Error in traceFootprints with file: %s"%ad.filename)
# Change the filename
adout.filename = gt.filename_updater(adinput=ad,
suffix=rc["suffix"],
strip=True)
# Append the output AstroData object to the list of output
# AstroData objects.
adoutput_list.append(adout)
# Report the list of output AstroData objects to the reduction
# context
rc.report_output(adoutput_list)
yield rc
示例12: wcalResampleToLinearCoords
def wcalResampleToLinearCoords(self,rc):
""" Uses the Wavecal fit_image solution
"""
# Instantiate the log
log = logutils.get_logger(__name__)
# Define the keyword to be used for the time stamp
timestamp_key = self.timestamp_keys["wcalResampleToLinearCoords"]
# Log the standard "starting primitive" debug message
log.debug(gt.log_message("primitive", "wcalResampleToLinearCoords",
"starting"))
# Initialize the list of output AstroData objects
adoutput_list = []
# Loop over each input AstroData object in the input list
for ad in rc.get_inputs_as_astrodata():
# Check for a wavelength solution
if ad["WAVECAL"] is None:
if "qa" in rc.context:
log.warning("No wavelength solution found for %s" %
ad.filename)
adout=ad # Don't do anything
else:
raise Errors.InputError("No wavelength solution found "\
"for %s" % ad.filename)
else:
# Wavelength solution found.
wc = Wavecal(ad)
wc.read_wavecal_table()
adout = wc.resample_image_asAstrodata()
# Add the appropriate time stamps to the PHU
gt.mark_history(adinput=adout, keyword=timestamp_key)
# Change the filename
adout.filename = gt.filename_updater(
adinput=adout, suffix=rc["suffix"], strip=True)
# Append the output AstroData object to the list
# of output AstroData objects
adoutput_list.append(adout)
# Report the list of output AstroData objects to the reduction
# context
rc.report_output(adoutput_list)
yield rc
示例13: validateData
def validateData(self, rc):
"""
This primitive is used to validate NIRI data, specifically.
:param repair: Set to True to repair the data, if necessary. Note: this
feature does not work yet.
:type repair: Python boolean
"""
# Instantiate the log
log = logutils.get_logger(__name__)
# Log the standard "starting primitive" debug message
log.debug(gt.log_message("primitive", "validateData", "starting"))
# Define the keyword to be used for the time stamp for this primitive
timestamp_key = self.timestamp_keys["validateData"]
# Initialize the list of output AstroData objects
adoutput_list = []
# Loop over each input AstroData object in the input list
for ad in rc.get_inputs_as_astrodata():
# Check whether the validateData primitive has been run previously
if ad.phu_get_key_value(timestamp_key):
log.warning("No changes will be made to %s, since it has "
"already been processed by validateData"
% ad.filename)
# Append the input AstroData object to the list of output
# AstroData objects without further processing
adoutput_list.append(ad)
continue
# Validate the input AstroData object.
log.status("No validation required for %s" % ad.filename)
# Add the appropriate time stamps to the PHU
gt.mark_history(adinput=ad, keyword=timestamp_key)
# Change the filename
ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"],
strip=True)
# Append the output AstroData object to the list of output
# AstroData objects
adoutput_list.append(ad)
# Report the list of output AstroData objects to the reduction context
rc.report_output(adoutput_list)
yield rc
示例14: skyCorrectFromSlit
def skyCorrectFromSlit(self,rc):
# Instantiate the log
log = logutils.get_logger(__name__)
# Define the keyword to be used for the time stamp
timestamp_key = self.timestamp_keys["skyCorrectFromSlit"]
# Log the standard "starting primitive" debug message
log.debug(gt.log_message("primitive", "skyCorrectFromSlit", "starting"))
# Initialize the list of output AstroData objects
adoutput_list = []
# Loop over each input AstroData object in the input list
for ad in rc.get_inputs_as_astrodata():
try:
xbin = ad.detector_x_bin().as_pytype()
ybin = ad.detector_y_bin().as_pytype()
bin_factor = xbin*ybin
roi = ad.detector_roi_setting().as_pytype()
except:
bin_factor = 1
roi = "unknown"
if bin_factor<=2 and roi=="Full Frame" and "qa" in rc.context:
log.warning("Frame is too large to subtract sky efficiently; not "\
"subtracting sky for %s" % ad.filename)
adoutput_list.append(ad)
continue
# Instantiate ETI and then run the task
gsskysub_task = eti.gsskysubeti.GsskysubETI(rc,ad)
adout = gsskysub_task.run()
# Add the appropriate time stamps to the PHU
gt.mark_history(adinput=adout, keyword=timestamp_key)
# Change the filename
adout.filename = gt.filename_updater(
adinput=adout, suffix=rc["suffix"], strip=True)
# Append the output AstroData object to the list
# of output AstroData objects
adoutput_list.append(adout)
# Report the list of output AstroData objects to the reduction
# context
rc.report_output(adoutput_list)
yield rc
示例15: determineWavelengthSolution
def determineWavelengthSolution(self,rc):
# Instantiate the log
log = logutils.get_logger(__name__)
# Define the keyword to be used for the time stamp
timestamp_key = self.timestamp_keys["determineWavelengthSolution"]
# Log the standard "starting primitive" debug message
log.debug(gt.log_message("primitive", "determineWavelengthSolution",
"starting"))
# Initialize the list of output AstroData objects
adoutput_list = []
# Loop over each input AstroData object in the input list
for ad in rc.get_inputs_as_astrodata():
# Instantiate ETI and then run the task
# Run in a try/except because gswavelength sometimes fails
# badly, and we want to be able to continue without
# wavelength calibration in the QA case
gswavelength_task = eti.gswavelengtheti.GswavelengthETI(rc,ad)
try:
adout = gswavelength_task.run()
except Errors.OutputError:
gswavelength_task.clean()
if "qa" in rc.context:
log.warning("gswavelength failed for input " + ad.filename)
adoutput_list.append(ad)
continue
else:
raise Errors.ScienceError("gswavelength failed for input "+
ad.filename + ". Try interactive"+
"=True")
# Add the appropriate time stamps to the PHU
gt.mark_history(adinput=adout, keyword=timestamp_key)
# Change the filename
adout.filename = gt.filename_updater(
adinput=adout, suffix=rc["suffix"], strip=True)
# Append the output AstroData object to the list
# of output AstroData objects
adoutput_list.append(adout)
# Report the list of output AstroData objects to the reduction
# context
rc.report_output(adoutput_list)
yield rc