本文整理汇总了Python中starutil.msg_out函数的典型用法代码示例。如果您正苦于以下问题:Python msg_out函数的具体用法?Python msg_out怎么用?Python msg_out使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了msg_out函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cleanup
def cleanup():
global retain
ParSys.cleanup()
if retain:
msg_out( "Retaining temporary files in {0}".format(NDG.tempdir))
else:
NDG.cleanup()
示例2: cleanup
def cleanup():
global retain
try:
starutil.ParSys.cleanup()
if retain:
msg_out( "Retaining temporary files in {0}".format(NDG.tempdir))
else:
NDG.cleanup()
except:
pass
示例3: run_calcqu
def run_calcqu(input_data,config,harmonic):
# The following call to SMURF:CALCQU creates two HDS container files -
# one holding a set of Q NDFs and the other holding a set of U NDFs. Create
# these container files in the NDG temporary directory.
qcont = NDG(1)
qcont.comment = "qcont"
ucont = NDG(1)
ucont.comment = "ucont"
msg_out( "Calculating Q and U values for each bolometer...")
invoke("$SMURF_DIR/calcqu in={0} config=\"{1}\" lsqfit=no outq={2} outu={3} "
"harmonic={4} fix".format(input_data,starutil.shell_quote(config),
qcont,ucont,harmonic) )
return (qcont,ucont)
示例4: cleanup
def cleanup():
global retain, new_ext_ndfs, new_lut_ndfs, new_noi_ndfs
try:
starutil.ParSys.cleanup()
if retain:
msg_out( "Retaining EXT, LUT and NOI models in {0} and temporary files in {1}".format(os.getcwd(),NDG.tempdir))
else:
NDG.cleanup()
for ext in new_ext_ndfs:
os.remove( ext )
for lut in new_lut_ndfs:
os.remove( lut )
for noi in new_noi_ndfs:
os.remove( noi )
for res in qua:
os.remove( res )
except:
pass
示例5: force_flat
def force_flat( ins, masks ):
"""
Forces the background regions to be flat in a set of Q or U images.
Invocation:
result = force_flat( ins, masks )
Arguments:
in = NDG
An NDG object specifying a group of Q or U images from which
any low frequency background structure is to be removed.
masks = NDG
An NDG object specifying a corresponding group of Q or U images
in which source pixels are bad. These are only used to mask the
images specified by "in". It should have the same size as "in".
Returned Value:
A new NDG object containing the group of corrected Q or U images.
"""
# How many NDFs are we processing?
nndf = len( ins )
# Blank out sources by copy the bad pixels from "mask" into "in".
msg_out( " masking...")
qm = NDG( ins )
invoke( "$KAPPA_DIR/copybad in={0} ref={1} out={2}".format(ins,masks,qm) )
# Smooth the blanked NDFs using a 3 pixel Gaussian. Set wlim so that
# small holes are filled in by the smoothing process.
msg_out( " smoothing...")
qs = NDG( ins )
invoke( "$KAPPA_DIR/gausmooth in={0} out={1} fwhm=3 wlim=0.5".format(qm,qs) )
# Fill remaining big holes using artifical data.
msg_out( " filling...")
qf = NDG( ins )
invoke( "$KAPPA_DIR/fillbad in={0} out={1} niter=10 size=10 variance=no".format(qs,qf) )
# Subtract the filled low frequency data form the original to create the
# returned images.
msg_out( " removing low frequency background structure...")
result = NDG( ins )
invoke( "$KAPPA_DIR/sub in1={0} in2={1} out={2}".format(ins,qf,result) )
return result
示例6: int
# See if temp files are to be retained.
retain = parsys["RETAIN"].value
# See statistical debiasing is to be performed.
debias = parsys["DEBIAS"].value
# See if we should convert pW to Jy.
jy = parsys["JY"].value
# Determine the waveband and get the corresponding FCF values with and
# without POL2 in the beam.
try:
filter = int( float( starutil.get_fits_header( qin[0], "FILTER", True )))
except NoValueError:
filter = 850
msg_out( "No value found for FITS header 'FILTER' in {0} - assuming 850".format(qin[0]))
if filter == 450:
fcf1 = 962.0
fcf2 = 491.0
elif filter == 850:
fcf1 = 725.0
fcf2 = 537.0
else:
raise starutil.InvalidParameterError("Invalid FILTER header value "
"'{0} found in {1}.".format( filter, qin[0] ) )
# Remove any spectral axes
qtrim = NDG(qin)
invoke( "$KAPPA_DIR/ndfcopy in={0} out={1} trim=yes".format(qin,qtrim) )
utrim = NDG(uin)
示例7: NDG
# See if temp files are to be retained.
retain = parsys["RETAIN"].value
# The following call to SMURF:CALCQU creates two HDS container files -
# one holding a set of Q NDFs and the other holding a set of U NDFs. Create
# these container files in the NDG temporary directory.
qcont = NDG(1)
qcont.comment = "qcont"
ucont = NDG(1)
ucont.comment = "ucont"
# Create a set of Q images and a set of U images. These are put into the HDS
# container files "q_TMP.sdf" and "u_TMP.sdf". Each image contains Q or U
# values derived from a short section of raw data during which each bolometer
# moves less than half a pixel.
msg_out( "Calculating Q and U values for each bolometer...")
invoke("$SMURF_DIR/calcqu in={0} config={1} outq={2} outu={3} fix".
format(indata,config,qcont,ucont) )
# Remove spikes from the Q and U images. The cleaned NDFs are written to
# temporary NDFs specified by two new NDG objects "qff" and "uff", which
# inherit their size from the existing groups "qcont" and "ucont".
msg_out( "Removing spikes from bolometer Q and U values...")
qff = NDG(qcont)
qff.comment = "qff"
uff = NDG(ucont)
uff.comment = "uff"
invoke( "$KAPPA_DIR/ffclean in={0} out={1} box=3 clip=\[2,2,2\]"
.format(qcont,qff) )
invoke( "$KAPPA_DIR/ffclean in={0} out={1} box=3 clip=\[2,2,2\]"
.format(ucont,uff) )
示例8: remove_corr
def remove_corr( ins, masks ):
"""
Masks the supplied set of Q or U images and then looks for and removes
correlated components in the background regions.
Invocation:
result = remove_corr( ins, masks )
Arguments:
ins = NDG
An NDG object specifying a group of Q or U images from which
correlated background components are to be removed.
masks = NDG
An NDG object specifying a corresponding group of Q or U images
in which source pixels are bad. These are only used to mask the
images specified by "in". It should have the same size as "in".
Returned Value:
A new NDG object containing the group of corrected Q or U images.
"""
# How many NDFs are we processing?
nndf = len( ins )
# Blank out sources by copy the bad pixels from "mask" into "in". We refer
# to "q" below, but the same applies whether processing Q or U.
msg_out( " masking...")
qm = NDG( ins )
invoke( "$KAPPA_DIR/copybad in={0} ref={1} out={2}".format(ins,masks,qm) )
# Find the most correlated pair of imagtes. We use the basic correlation
# coefficient calculated by kappa:scatter for this.
msg_out( " Finding most correlated pair of images...")
cmax = 0
for i in range(0,nndf-1):
for j in range(i + 1,nndf):
invoke( "$KAPPA_DIR/scatter in1={0} in2={1} device=!".format(qm[i],qm[j]) )
c = starutil.get_task_par( "corr", "scatter" )
if abs(c) > abs(cmax):
cmax = c
cati = i
catj = j
if abs(cmax) < 0.3:
msg_out(" No correlated images found!")
return ins
msg_out( " Correlation for best pair of images = {0}".format( cmax ) )
# Find images that are reasonably correlated to the pair found above,
# and coadd them to form a model for the correlated background
# component. Note, the holes left by the masking are filled in by the
# coaddition using background data from other images.
msg_out( " Forming model...")
# Form the average of the two most correlated images, first normalising
# them to a common scale so that they both have equal weight.
norm = "{0}/norm".format(NDG.tempdir)
if not normer( qm[cati], qm[catj], 0.3, norm ):
norm = qm[cati]
mslist = NDG( [ qm[catj], norm ] )
ave = "{0}/ave".format(NDG.tempdir)
invoke( "$CCDPACK_DIR/makemos in={0} method=mean genvar=no usevar=no out={1}".format(mslist,ave) )
# Loop round each image finding the correlation factor of the image and
# the above average image.
temp = "{0}/temp".format(NDG.tempdir)
nlist = []
ii = 0
for i in range(0,nndf):
c = blanker( qm[i], ave, temp )
# If the correlation is high enough, normalize the image to the average
# image and then include the normalised image in the list of images to be
# coadded to form the final model.
if abs(c) > 0.3:
tndf = "{0}/t{1}".format(NDG.tempdir,ii)
ii += 1
invoke( "$KAPPA_DIR/normalize in1={1} in2={2} out={0} device=!".format(tndf,temp,ave))
nlist.append( tndf )
if ii == 0:
msg_out(" No secondary correlated images found!")
return ins
msg_out(" Including {0} secondary correlated images in the model.".format(ii) )
# Coadded the images created above to form the model of the correlated
# background component. Fill any remaining bad pixels with artificial data.
model = "{0}/model".format(NDG.tempdir)
included = NDG( nlist )
invoke( "$CCDPACK_DIR/makemos in={0} method=mean usevar=no genvar=no out={1}".format( included, temp ) )
invoke( "$KAPPA_DIR/fillbad in={1} variance=no out={0} size=10 niter=10".format(model,temp) )
# Now estimate how much of the model is present in each image and remove it.
msg_out(" Removing model...")
temp2 = "{0}/temp2".format(NDG.tempdir)
#.........这里部分代码省略.........
示例9: in
# Do not use more com files for each sub-array than are needed.
remlist = []
for subarr in ( "s8a", "s8b", "s8c", "s8d", "s4a", "s4b", "s4c", "s4d" ):
nin = 0
for ndf in indata:
if subarr in ndf:
nin += 1
ncom = 0
for ndf in incom:
if subarr in ndf:
ncom += 1
if ncom > nin:
remlist.append( ndf )
msg_out("Ignoring {0} surplus files in INCOM".format(len(remlist) ))
for ndf in remlist:
incom.remove( ndf )
# See if new artificial I, Q and U maps are to be created.
newart = parsys["NEWART"].value
# If not, set the ART parameters to indicate that the specified NDFs
# must already exist.
if not newart:
parsys["ARTI"].exists = True
parsys["ARTQ"].exists = True
parsys["ARTU"].exists = True
else:
parsys["ARTI"].exists = False
parsys["ARTQ"].exists = False
示例10: get_filtered_skydip_data
def get_filtered_skydip_data(qarray,uarray,clip,a):
"""
This function takes q and u array data (output from calcqu), applies ffclean to remove spikes
and puts in numpy array variable
It borrows (copies) heavily from pol2cat.py (2015A)
Invocation:
( qdata_total,qvar_total,udata_total,uvar_total,elevation,opacity_term,bad_pixel_ref ) = ...
get_filtered_skydip_data(qarray,uarray,clip,a)
Arguments:
qarray = An NDF of Q array data (output from calcqu).
uarray = An NDF of U array data (output form calcqu).
clip = The sigma cut for ffclean.
a = A string indicating the array (eg. 'S8A').
Returned Value:
qdata_total = A numpy array with the cleaned qarray data.
qvar_total = A numpy array with the qarray variance data.
udata_total = A numpy array with the cleaned uarray data.
uvar_total = A numpy array with the uarray variance data.
elevation = A numpy array with the elevation data
opacity_term = A numpy array with the opacity brightness term (1-exp(-tau*air_mass))
Here tau is calculated using the WVM data as input.
"""
# Remove spikes from the Q images for the current subarray. The cleaned NDFs
# are written to temporary NDFs specified by the new NDG object "qff", which
# inherit its size from the existing group "qarray"".
msg_out( "Removing spikes from {0} bolometer Q values...".format(a))
qff = NDG(qarray)
qff.comment = "qff"
invoke( "$KAPPA_DIR/ffclean in={0} out={1} genvar=yes box=3 clip=\[{2}\]".format(qarray,qff,clip) )
# Remove spikes from the U images for the current subarray. The cleaned NDFs
# are written to temporary NDFs specified by the new NDG object "uff", which
# inherit its size from the existing group "uarray"".
msg_out( "Removing spikes from {0} bolometer U values...".format(a))
uff = NDG(uarray)
uff.comment = "uff"
invoke( "$KAPPA_DIR/ffclean in={0} out={1} genvar=yes box=3 clip=\[{2}\]"
.format(uarray,uff,clip) )
elevation = []
opacity_term = []
for stare in range(len(qff[:])):
# Stack Q data in numpy array
# Get elevation information
elevation.append(numpy.array( float( invoke( "$KAPPA_DIR/fitsmod ndf={0} edit=print keyword=ELSTART".format( qff[ stare ] ) ) ) ) )
# Get Tau (Opacity) information
tau_temp = numpy.array( float( invoke( "$KAPPA_DIR/fitsmod ndf={0} edit=print keyword=WVMTAUST".format( qff[ stare ] ) ) ) )
# Convert to obs band.
if '4' in a:
tau_temp = 19.04*(tau_temp-0.018) # Eq from Dempsey et al
elif '8' in a:
tau_temp = 5.36*(tau_temp-0.006) # Eq from Dempsey et al.
opacity_term.append(1-numpy.exp(-1*tau_temp/numpy.sin(numpy.radians(elevation[-1]))))
invoke( "$KAPPA_DIR/ndftrace {0} quiet".format(qff[ stare ]))
nx = get_task_par( "dims(1)", "ndftrace" )
ny = get_task_par( "dims(2)", "ndftrace" )
qdata_temp = numpy.reshape( Ndf( qff[ stare ] ).data, (ny,nx))
qdata_temp[numpy.abs(qdata_temp)>1e300] = numpy.nan;
if stare == 0:
qdata_total = qdata_temp
else:
qdata_total = numpy.dstack((qdata_total,qdata_temp))
qvar_temp = numpy.reshape( Ndf( qff[ stare ] ).var, (ny,nx))
qdata_temp[numpy.abs(qvar_temp)>1e300] = numpy.nan;
if stare == 0:
qvar_total = qvar_temp
else:
qvar_total = numpy.dstack((qvar_total,qvar_temp))
# Stack U data in numpy array
invoke( "$KAPPA_DIR/ndftrace {0} quiet".format(uff[ stare ]))
nx = get_task_par( "dims(1)", "ndftrace" )
ny = get_task_par( "dims(2)", "ndftrace" )
udata_temp = numpy.reshape( Ndf( uff[ stare ] ).data, (ny,nx))
udata_temp[numpy.abs(udata_temp)>1e300] = numpy.nan;
if stare == 0:
udata_total = udata_temp
else:
udata_total = numpy.dstack((udata_total,udata_temp))
uvar_temp = numpy.reshape( Ndf( uff[ stare ] ).var, (ny,nx))
udata_temp[numpy.abs(uvar_temp)>1e300] = numpy.nan;
if stare == 0:
uvar_total = uvar_temp
else:
uvar_total = numpy.dstack((uvar_total,uvar_temp))
# Create bad pixel reference.
bad_pixel_ref = NDG(1)
invoke( "$KAPPA_DIR/copybad in={0} ref={1} out={2}".format(qff,uff,bad_pixel_ref))
return( qdata_total,qvar_total,udata_total,uvar_total,elevation,opacity_term,bad_pixel_ref )
示例11: str
else:
tile_dict[ jsatile ] = tile
# Create a list holding the paths to the tile NDFs that intersect
# the required region.
ntile = 0
used_tile_list = []
for jsatile in jsatile_list:
key = str(jsatile)
if key in tile_dict and tile_dict[ key ]:
used_tile_list.append( tile_dict[ key ] )
ntile += 1
# Create an NDG holding the group of tile NDFs.
if ntile > 0:
msg_out( "{0} of the supplied tiles intersect the requested region.".format(ntile) )
used_tiles = NDG( used_tile_list )
else:
raise starutil.InvalidParameterError( "None of the supplied JSA tiles "
"intersect the requested region" )
# If we are using all tiles, just use the supplied group of tiles. Use
# the middle supplied tile as the reference.
else:
used_tiles = tiles
jsatile = int( len(tiles)/2 )
jsatile = starutil.get_fits_header( tiles[ jsatile ], "TILENUM" )
# Paste these tile NDFs into a single image. This image still uses the
# JSA all-sky pixel grid. If we have only a single tile, then just use
# it as it is.
示例12: msg_out
# Initialise the parameters to hold any values supplied on the command
# line. This automatically adds definitions for the additional parameters
# "MSG_FILTER", "ILEVEL", "GLEVEL" and "LOGFILE".
parsys = starutil.ParSys( params )
# It's a good idea to get parameter values early if possible, in case
# the user goes off for a coffee whilst the script is running and does not
# see a later parameter prompt or error.
restart = parsys["RESTART"].value
if restart == None:
retain = parsys["RETAIN"].value
else:
retain = True
NDG.tempdir = restart
NDG.overwrite = True
msg_out( "Re-starting using data in {0}".format(restart) )
indata = parsys["IN"].value
outdata = parsys["OUT"].value
niter = parsys["NITER"].value
pixsize = parsys["PIXSIZE"].value
config = parsys["CONFIG"].value
ref = parsys["REF"].value
mask2 = parsys["MASK2"].value
mask3 = parsys["MASK3"].value
extra = parsys["EXTRA"].value
itermap = parsys["ITERMAP"].value
# See if we are using pre-cleaned data, in which case there is no need
# to export the cleaned data on the first iteration.
if invoke( "$KAPPA_DIR/configecho name=doclean config={0} "
示例13: write_ip_NDF
ipprms_pol_screen[row_val,col_val] = ipprms.x[0]
ipprms_Co[row_val,col_val] = ipprms.x[1]
ipprms_dc_Q[row_val,col_val] = ipprms.x[2]
ipprms_dc_U[row_val,col_val] = ipprms.x[3]
chi2Vals[row_val,col_val] = ipprms.fun
else:
returnCode[row_val,col_val] = False
# Write NDFs.
out_p0 = write_ip_NDF(ip_prms['Pf_'+a[-1]],bad_pixel_ref)
out_p1 = write_ip_NDF(ipprms_pol_screen,bad_pixel_ref)
out_c0 = write_ip_NDF(ipprms_Co,bad_pixel_ref)
out_angc = write_ip_NDF(ip_prms['Theta_ip_'+a[-1]],bad_pixel_ref)
# Fill any bad pixels with smooth function to match surrounding pixels
msg_out( "Filling in bad pixel values for {0} bolometer IP parameters...".format(a))
out_p0_filled = NDG(1)
invoke( "$KAPPA_DIR/fillbad in={0} out={1} variance=true niter=10 size=15".format(out_p0,out_p0_filled) )
out_p1_filled = NDG(1)
invoke( "$KAPPA_DIR/fillbad in={0} out={1} variance=true niter=10 size=15".format(out_p1,out_p1_filled) )
out_c0_filled = NDG(1)
invoke( "$KAPPA_DIR/fillbad in={0} out={1} variance=true niter=10 size=15".format(out_c0,out_c0_filled) )
out_angc_filled = NDG(1)
invoke( "$KAPPA_DIR/fillbad in={0} out={1} variance=true niter=10 size=15".format(out_angc,out_angc_filled) )
# Copy individual NDFs to single output.
invoke( "$KAPPA_DIR/ndfcopy {0} {1}".format(out_p0,outdata+'_preclean.'+str.lower(a)+'p0'))
invoke( "$KAPPA_DIR/ndfcopy {0} {1}".format(out_p1,outdata+'_preclean.'+str.lower(a)+'p1'))
invoke( "$KAPPA_DIR/ndfcopy {0} {1}".format(out_c0,outdata+'_preclean.'+str.lower(a)+'c0'))
invoke( "$KAPPA_DIR/ndfcopy {0} {1}".format(out_angc,outdata+'_preclean.'+str.lower(a)+'angc'))
示例14: pca
def pca( indata, ncomp ):
"""
Identifies and returns the strongest PCA components in a 3D NDF.
Invocation:
result = pca( indata, ncomp )
Arguments:
indata = NDG
An NDG object specifying a single 3D NDF. Each plane in the cube
is a separate image, and the images are compared using PCA.
ncomp = int
The number of PCA components to include in the returned NDF.
Returned Value:
A new NDG object containing a single 3D NDF containing just the
strongest "ncomp" PCA components found in the input NDF.
"""
msg_out( " finding strongest {0} components using Principal Component Analysis...".format(ncomp) )
# Get the shape of the input NDF.
invoke( "$KAPPA_DIR/ndftrace {0} quiet".format(indata) )
nx = get_task_par( "dims(1)", "ndftrace" )
ny = get_task_par( "dims(2)", "ndftrace" )
nz = get_task_par( "dims(3)", "ndftrace" )
# Fill any bad pixels.
tmp = NDG(1)
invoke( "$KAPPA_DIR/fillbad in={0} out={1} variance=no niter=10 size=10".format(indata,tmp) )
# Read the planes from the supplied NDF. Note, numpy axis ordering is the
# reverse of starlink axis ordering. We want a numpy array consisting of
# "nz" elements, each being a vectorised form of a plane from the 3D NDF.
ndfdata = numpy.reshape( Ndf( tmp[0] ).data, (nz,nx*ny) )
# Normalize each plane to a mean of zero and standard deviation of 1.0
means = []
sigmas = []
newdata = []
for iplane in range(0,nz):
plane = ndfdata[ iplane ]
mn = plane.mean()
sg = math.sqrt( plane.var() )
means.append( mn )
sigmas.append( sg )
if sg > 0.0:
newdata.append( (plane-mn)/sg )
newdata= numpy.array( newdata )
# Transpose as required by MDP.
pcadata = numpy.transpose( newdata )
# Find the required number of PCA components (these are the strongest
# components).
pca = mdp.nodes.PCANode( output_dim=ncomp )
comp = pca.execute( pcadata )
# Re-project the components back into the space of the input 3D NDF.
ip = numpy.dot( comp, pca.get_recmatrix() )
# Transpose the array so that each row is an image.
ipt = numpy.transpose(ip)
# Normalise them back to the original scales.
jplane = 0
newdata = []
for iplane in range(0,nz):
if sigmas[ iplane ] > 0.0:
newplane = sigmas[ iplane ] * ipt[ jplane ] + means[ iplane ]
jplane += 1
else:
newplane = ndfdata[ iplane ]
newdata.append( newplane )
newdata= numpy.array( newdata )
# Dump the re-projected images out to a 3D NDF.
result = NDG(1)
indf = ndf.open( result[0], 'WRITE', 'NEW' )
indf.new('_DOUBLE', 3, numpy.array([1,1,1]),numpy.array([nx,ny,nz]))
ndfmap = indf.map( 'DATA', '_DOUBLE', 'WRITE' )
ndfmap.numpytondf( newdata )
indf.annul()
# Uncomment to dump the components.
# msg_out( "Dumping PCA comps to {0}-comps".format(result[0]) )
# compt = numpy.transpose(comp)
# indf = ndf.open( "{0}-comps".format(result[0]), 'WRITE', 'NEW' )
# indf.new('_DOUBLE', 3, numpy.array([1,1,1]),numpy.array([nx,ny,ncomp]))
# ndfmap = indf.map( 'DATA', '_DOUBLE', 'WRITE' )
# ndfmap.numpytondf( compt )
# indf.annul()
return result
示例15: size
# Fixed clump size (FWHM in pixels on all axes)
clump_fwhm = 10
# Initial mean clump separation in pixels
clump_separation = clump_fwhm/2.0
# Do tests for 5 different separations
for isep in range(0, 1):
# Initial peak value
peak_value = noise*0.5
# Do tests for 5 different peak values
for ipeak in range(0, 1):
starutil.msg_out( ">>> Doing sep={0} and peak={1}....".format(clump_separation,peak_value))
# Get the dimensions of a square image that would be expected to
# contain the target number of clumps at the current separation.
npix = int( clump_separation*math.sqrt( nclump_target ) )
# Create a temporary file containing circular clumps of constant size
# and shape (except for the effects of noise).
model = NDG(1)
out = NDG(1)
outcat = NDG.tempfile(".fit")
invoke( "$CUPID_DIR/makeclumps angle=\[0,0\] beamfwhm=0 deconv=no "
"fwhm1=\[{0},0\] fwhm2=\[{0},0\] lbnd=\[1,1\] ubnd=\[{1},{1}\] "
"model={2} nclump={3} out={4} outcat={5} pardist=normal "
"peak = \[{6},0\] rms={7} trunc=0.1".
format(clump_fwhm,npix,model,nclump_target,out,outcat,