本文整理汇总了Python中JLA_library.get_date方法的典型用法代码示例。如果您正苦于以下问题:Python JLA_library.get_date方法的具体用法?Python JLA_library.get_date怎么用?Python JLA_library.get_date使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类JLA_library
的用法示例。
在下文中一共展示了JLA_library.get_date方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: compute_model
# 需要导入模块: import JLA_library [as 别名]
# 或者: from JLA_library import get_date [as 别名]
def compute_model(options):
import numpy
import astropy.io.fits as fits
import JLA_library as JLA
from astropy.table import Table
from astropy.cosmology import FlatwCDM
from scipy.interpolate import interp1d
# ----------- Read in the configuration file ------------
params=JLA.build_dictionary(options.config)
# ----------- Read in the SN ordering ------------------------
SNeList = numpy.genfromtxt(options.SNlist,
usecols=(0, 2),
dtype='S30,S200',
names=['id', 'lc'])
nSNe = len(SNeList)
for i, SN in enumerate(SNeList):
SNeList['id'][i] = SNeList['id'][i].replace('lc-', '').replace('.list', '').replace('_smp', '')
lcfile = JLA.get_full_path(params[options.lcfits])
SNe = Table.read(lcfile, format='fits')
print 'There are %d SNe' % (nSNe)
indices = JLA.reindex_SNe(SNeList['id'], SNe)
SNe = SNe[indices]
redshift = SNe['zcmb']
replace=(redshift < 0)
# For SNe that do not have the CMB redshift
redshift[replace]=SNe[replace]['zhel']
print len(redshift)
if options.raw:
# Data from the bottom left hand figure of Mosher et al. 2014.
# This is option ii) that is descibed above
offsets=Table.read(JLA.get_full_path(params['modelOffset']),format='ascii.csv')
Delta_M=interp1d(offsets['z'], offsets['offset'], kind='linear',bounds_error=False,fill_value='extrapolate')(redshift)
else:
Om_0=0.303 # JLA value in the wCDM model
cosmo1 = FlatwCDM(name='SNLS3+WMAP7', H0=70.0, Om0=Om_0, w0=-1.0)
cosmo2 = FlatwCDM(name='SNLS3+WMAP7', H0=70.0, Om0=Om_0, w0=-1.024)
Delta_M=5*numpy.log10(cosmo1.luminosity_distance(redshift)/cosmo2.luminosity_distance(redshift))
# Build the covariance matrix. Note that only magnitudes are affected
Zero=numpy.zeros(nSNe)
H=numpy.concatenate((Delta_M,Zero,Zero)).reshape(3,nSNe).ravel(order='F')
C_model=numpy.matrix(H).T * numpy.matrix(H)
date = JLA.get_date()
fits.writeto('C_model_%s.fits' % (date),numpy.array(C_model),clobber=True)
return None
示例2: compute_Cstat
# 需要导入模块: import JLA_library [as 别名]
# 或者: from JLA_library import get_date [as 别名]
def compute_Cstat(options):
"""Python program to compute C_stat
"""
import numpy
import astropy.io.fits as fits
from astropy.table import Table
import JLA_library as JLA
# ----------- Read in the configuration file ------------
params=JLA.build_dictionary(options.config)
# ----------- Read in the SN ordering ------------------------
SNeList = numpy.genfromtxt(options.SNlist,
usecols=(0, 2),
dtype='S30,S200',
names=['id', 'lc'])
nSNe = len(SNeList)
for i, SN in enumerate(SNeList):
SNeList['id'][i] = SNeList['id'][i].replace('lc-', '').replace('.list', '')
lcfile = JLA.get_full_path(params[options.lcfits])
SNe = Table.read(lcfile, format='fits')
# ----------- Read in the data --------------------------
print 'There are %d SNe in the sample' % (nSNe)
indices = JLA.reindex_SNe(SNeList['id'], SNe)
SNe=SNe[indices]
C_stat=numpy.zeros(9*nSNe*nSNe).reshape(3*nSNe,3*nSNe)
for i,SN in enumerate(SNe):
cov=numpy.zeros(9).reshape(3,3)
cov[0,0]=SN['dmb']**2.
cov[1,1]=SN['dx1']**2.
cov[2,2]=SN['dcolor']**2.
cov[0,1]=SN['cov_m_s']
cov[0,2]=SN['cov_m_c']
cov[1,2]=SN['cov_s_c']
# symmetrise
cov=cov+cov.T-numpy.diag(cov.diagonal())
C_stat[i*3:i*3+3,i*3:i*3+3]=cov
# ----------- Read in the base matrix computed using salt2_stat.cc ------------
if options.base!=None:
C_stat+=fits.getdata(options.base)
date = JLA.get_date()
fits.writeto('C_stat_%s.fits' % date,C_stat,clobber=True)
return
示例3: compute_dust
# 需要导入模块: import JLA_library [as 别名]
# 或者: from JLA_library import get_date [as 别名]
def compute_dust(options):
"""Python program to compute C_dust
"""
import numpy
import astropy.io.fits as fits
import os
import JLA_library as JLA
# ---------- Read in the SNe list -------------------------
SNelist = numpy.genfromtxt(options.SNlist,
usecols=(0, 2),
dtype='S30,S110',
names=['id', 'lc'])
for i, SN in enumerate(SNelist):
SNelist['id'][i] = SNelist['id'][i].replace('lc-','').replace('.list','')
# ----------- Read in the configuration file ------------
params=JLA.build_dictionary(options.config)
try:
salt_path = JLA.get_full_path(params['defsaltModel'])
except KeyError:
salt_path = ''
# ----------- The lightcurve fitting -------------------
# Compute the offset between the nominal value of the extinciton
# and the adjusted value
# We first compute the difference in light curve fit parameters for E(B-V) * (1+offset)
offset = 0.1
j = []
for SN in SNelist:
inputFile = SN['lc']
print 'Fitting %s ' % (SN['lc'])
workArea = JLA.get_full_path(options.workArea)
dm, dx1, dc = JLA.compute_extinction_offset(SN['id'], inputFile, offset, workArea, salt_path)
j.extend([dm, dx1, dc])
# But we want to compute the impact of an offset that is twice as large, hence the factor of 4 in the expression
# 2017/10/13
# But we want to compute the impact of an offset that is half as large, hence the factor of 4 in the denominator
# cdust = numpy.matrix(j).T * numpy.matrix(j) * 4.0
cdust = numpy.matrix(j).T * numpy.matrix(j) / 4.0
date = JLA.get_date()
fits.writeto('C_dust_%s.fits' % date, cdust, clobber=True)
return
示例4: compute_model
# 需要导入模块: import JLA_library [as 别名]
# 或者: from JLA_library import get_date [as 别名]
def compute_model(options):
import numpy
import astropy.io.fits as fits
import JLA_library as JLA
from astropy.table import Table
from astropy.cosmology import FlatwCDM
# ----------- Read in the configuration file ------------
params=JLA.build_dictionary(options.config)
# ----------- Read in the SN ordering ------------------------
SNeList = numpy.genfromtxt(options.SNlist,
usecols=(0, 2),
dtype='S30,S200',
names=['id', 'lc'])
nSNe = len(SNeList)
for i, SN in enumerate(SNeList):
SNeList['id'][i] = SNeList['id'][i].replace('lc-', '').replace('.list', '')
lcfile = JLA.get_full_path(params[options.lcfits])
SNe = Table.read(lcfile, format='fits')
print 'There are %d SNe' % (nSNe)
#z=numpy.array([])
#offset=numpy.array([])
Om_0=0.303 # JLA value in the wCDM model
cosmo1 = FlatwCDM(name='SNLS3+WMAP7', H0=70.0, Om0=Om_0, w0=-1.0)
cosmo2 = FlatwCDM(name='SNLS3+WMAP7', H0=70.0, Om0=Om_0, w0=-1.024)
# For the JLA SNe
redshift = SNe['zcmb']
replace=(redshift < 0)
# For the non JLA SNe
redshift[replace]=SNe[replace]['zhel']
Delta_M=5*numpy.log10(cosmo1.luminosity_distance(redshift)/cosmo2.luminosity_distance(redshift))
# Build the covariance matrix. Note that only magnitudes are affected
Zero=numpy.zeros(nSNe)
H=numpy.concatenate((Delta_M,Zero,Zero)).reshape(3,nSNe).ravel(order='F')
C_model=numpy.matrix(H).T * numpy.matrix(H)
date = JLA.get_date()
fits.writeto('C_model_%s.fits' % (date),numpy.array(C_model),clobber=True)
return None
示例5: make_FilterCurves
# 需要导入模块: import JLA_library [as 别名]
# 或者: from JLA_library import get_date [as 别名]
def make_FilterCurves(options):
filterCurve=Table.read(options.input,format='fits')
f=open("des_y3a1_std_%s.dat" % (options.filterName),'w')
f.write("# Written on %s\n" % (JLA.get_date()))
f.write("# Derived from %s\n" % (options.input))
f.write("# Wavelength (Angstroms) Transmission\n")
selection=(filterCurve["lambda"] > bounds[options.filterName]["lower"]) & (filterCurve["lambda"] < bounds[options.filterName]["upper"])
for line in filterCurve[selection]:
f.write("%5.1f %7.5f\n" % (line["lambda"],line[options.filterName]))
f.close
return
示例6: compute_dust
# 需要导入模块: import JLA_library [as 别名]
# 或者: from JLA_library import get_date [as 别名]
def compute_dust(options):
"""Python program to compute C_dust
"""
import numpy
import astropy.io.fits as fits
import os
import JLA_library as JLA
# ---------- Read in the SNe list -------------------------
SNelist = numpy.genfromtxt(options.SNlist,
usecols=(0, 2),
dtype='S30,S100',
names=['id', 'lc'])
for i, SN in enumerate(SNelist):
SNelist['id'][i] = SNelist['id'][i].replace('lc-','').replace('.list','')
# ----------- The lightcurve fitting -------------------
# Compute the offset between the nominal value of the extinciton
# and the adjusted value
offset = 0.1
j = []
for SN in SNelist:
inputFile = SN['lc']
print 'Fitting %s' % (SN['id'])
dm, dx1, dc = JLA.compute_extinction_offset(SN['id'], inputFile, offset)
j.extend([dm, dx1, dc])
cdust = numpy.matrix(j).T * numpy.matrix(j) * 4.0
date = JLA.get_date()
fits.writeto('C_dust_%s.fits' % date, cdust, clobber=True)
return
示例7: compute_rel_size
# 需要导入模块: import JLA_library [as 别名]
# 或者: from JLA_library import get_date [as 别名]
def compute_rel_size(options):
import numpy
import astropy.io.fits as fits
from astropy.table import Table
import JLA_library as JLA
from astropy.cosmology import FlatwCDM
import os
# ----------- Read in the configuration file ------------
params=JLA.build_dictionary(options.config)
# ---------- Read in the SNe list -------------------------
SNeList=numpy.genfromtxt(options.SNlist,usecols=(0,2),dtype='S30,S200',names=['id','lc'])
for i,SN in enumerate(SNeList):
SNeList['id'][i]=SNeList['id'][i].replace('lc-','').replace('.list','')
# ----------- Read in the data JLA --------------------------
lcfile = JLA.get_full_path(params[options.lcfits])
SNe = Table.read(lcfile, format='fits')
nSNe=len(SNe)
print 'There are %d SNe in this sample' % (nSNe)
# sort it to match the listing in options.SNlist
indices = JLA.reindex_SNe(SNeList['id'], SNe)
SNe=SNe[indices]
# ---------- Compute the Jacobian ----------------------
# The Jacobian is an m by 4 matrix, where m is the number of SNe
# The columns are ordered in terms of Om, w, alpha and beta
J=[]
JLA_result={'Om':0.303,'w':-1.00,'alpha':0.141,'beta':3.102,'M_B':-19.05}
offset={'Om':0.01,'w':0.01,'alpha':0.01,'beta':0.01,'M_B':0.01}
nFit=4
cosmo1 = FlatwCDM(name='SNLS3+WMAP7', H0=70.0, Om0=JLA_result['Om'], w0=JLA_result['w'])
# Varying Om
cosmo2 = FlatwCDM(name='SNLS3+WMAP7', H0=70.0, Om0=JLA_result['Om']+offset['Om'], w0=JLA_result['w'])
J.append(5*numpy.log10((cosmo1.luminosity_distance(SNe['zcmb'])/cosmo2.luminosity_distance(SNe['zcmb']))[:,0]))
# varying alpha
J.append(1.0*offset['alpha']*SNe['x1'][:,0])
# varying beta
J.append(-1.0*offset['beta']*SNe['color'][:,0])
# varying M_B
J.append(offset['M_B']*numpy.ones(nSNe))
J = numpy.matrix(numpy.concatenate((J)).reshape(nSNe,nFit,order='F') * 100.)
# Set up the covariance matrices
systematic_terms = ['bias', 'cal', 'host', 'dust', 'model', 'nonia', 'pecvel', 'stat']
covmatrices = {'bias':params['bias'],
'cal':params['cal'],
'host':params['host'],
'dust':params['dust'],
'model':params['model'],
'nonia':params['nonia'],
'pecvel':params['pecvel'],
'stat':params['stat']}
if options.type in systematic_terms:
print "Using %s for the %s term" % (options.name,options.type)
covmatrices[options.type]=options.name
# Combine the matrices to compute the full covariance matrix, and compute its inverse
if options.all:
#read in the user provided matrix, otherwise compute it, and write it out
C=fits.getdata(JLA.get_full_path(params['all']))
else:
C=add_covar_matrices(covmatrices,params['diag'])
date=JLA.get_date()
fits.writeto('C_total_%s.fits' % (date), C, clobber=True)
Cinv=numpy.matrix(C).I
# Construct eta, a 3n vector
eta=numpy.zeros(3*nSNe)
for i,SN in enumerate(SNe):
eta[3*i]=SN['mb']
eta[3*i+1]=SN['x1']
eta[3*i+2]=SN['color']
# Construct A, a n x 3n matrix
A=numpy.zeros(nSNe*3*nSNe).reshape(nSNe,3*nSNe)
for i in range(nSNe):
#.........这里部分代码省略.........
示例8: train_SALT2
# 需要导入模块: import JLA_library [as 别名]
# 或者: from JLA_library import get_date [as 别名]
def train_SALT2(options):
# Read in the configuration file
params=JLA.build_dictionary(options.config)
# Make the initialisation and training directories
mkdir(params['trainingDir'])
# Copy accross files from the initDir to the trainingDir
for file in os.listdir(params['initDir']):
sh.copy(params['initDir']+'/'+file,params['trainingDir'])
# Make the output directory
date=date=JLA.get_date()
outputDir="/%s/data_%s_%s_%s/" % (params['outputDir'],date,params['trainingSample'],params['snpcaVersion'])
mkdir(outputDir)
os.chdir(params['trainingDir'])
# Part a) First training, withiout error snake
# Step 1 - Train without the error snake
cmd=['pcafit',
'-l','trainingsample_snls_sdss_v5.list',
'-c','training_without_error_snake.conf',
'-p','pca_1_opt1_final.list',
'-d']
sp.call(' '.join(cmd),shell=True)
# Step 2 - Compute uncertainties
cmd=['write_pca_uncertainties',
'pca_1_opt1_final.list',
'full_weight_1.fits',
'2',
'1.0',
'1.0']
sp.call(' '.join(cmd),shell=True)
# Step 3 - Compute error snake
cmd=['Compute_error_snake',
'trainingsample_snls_sdss_v5.list',
'training_without_error_snake.conf',
'pca_1_opt1_final.list',
'full_weight_1.fits',
'covmat_1_with_constraints.fits']
sp.call(' '.join(cmd),shell=True)
sh.copy('pca_1_opt1_final.list', 'pca_1_opt1_final_first.list')
sh.copy('model_covmat_for_error_snake.fits','model_covmat_for_error_snake_first.fits')
sh.copy('salt2_lc_dispersion_scaling.dat', 'salt2_lc_dispersion_scaling_first.dat')
# Part b Second training, with the error snake
# Step 4 - Second training using the output from the first three steps
cmd=['pcafit',
'-l','trainingsample_snls_sdss_v5.list',
'-c','training_with_error_snake.conf',
'-p','pca_1_opt1_final_first.list',
'-d']
sp.call(' '.join(cmd),shell=True)
# Step 5 - Recompute uncertainties
cmd=['write_pca_uncertainties',
'pca_1_opt1_final.list',
'full_weight_1.fits',
'2',
'1.0',
'1.0']
示例9: compute_C_K
# 需要导入模块: import JLA_library [as 别名]
# 或者: from JLA_library import get_date [as 别名]
def compute_C_K(options):
import JLA_library as JLA
import numpy
import astropy.io.fits as fits
# ----------- Read in the configuration file ------------
params = JLA.build_dictionary(options.config)
# ----------- We read in the JLA version of C_Kappa ------------
if options.base:
# CfA1 and CfA2 not treated separately and we use the JLA uncertainties
nDim = 42
else:
# CfA1 and CfA2 treated separately, and we use the Pantheon uncertainties
nDim = 58
C_K_H0 = numpy.zeros(nDim * nDim).reshape(nDim, nDim)
if options.base:
# Read in the JLA matrix and extract the appropriate rows and columns
# The matrix is structured in blocks with ZPs first,
# and uncertainties in the filter curves second
# The order is specified in salt2_calib_variations_all/saltModels.list
# Standard, Landolt photometry is in rows 5 to 9 and rows 42 to 46
# Keplercam is in rows 10 to 14 and 47 to 51
# 4 Shooter is in rows 15 to 19 and 52 5o 56
# CSP is in rows 20 to 25 and 56 to 62
C_K_JLA = fits.getdata(JLA.get_full_path(params['C_kappa_JLA']))
# Extract the relevant columns and rows
# ZPs first
# Since the indices are consecutive, we do this all at once
size = C_K_JLA.shape[0]
C_K_H0[0:21, 0:21] = C_K_JLA[4:25,4:25]
# Filter curves second
C_K_H0[21:42, 21:42] = C_K_JLA[4+size/2:25+size/2,4+size/2:25+size/2]
else:
filterUncertainties = numpy.genfromtxt(JLA.get_full_path(params['filterUncertainties']),
comments='#',usecols=(0,1,2,3,4), dtype='S30,f8,f8,f8,f8',
names=['filter', 'zp', 'zp_off', 'wavelength', 'central'])
# 1) ZP and filter uncertainty
# We add a third of the offset found in Scolnic et al.
for i, filt in enumerate(filterUncertainties):
C_K_H0[i, i] = (filt['zp'] / 1000.)**2. + (filt['zp_off'] / 3. / 1000.)**2.
C_K_H0[i+29, i+29] = (filt['wavelength'])**2.
# 2a) B14 3.4.1 The uncertainty associated to the measurement of
# the Secondary CALSPEC standards
# The uncerteinty is assumed to be uncorrelated between filters
# It only affects the diagonal terms of the ZPs
# It is estmated from repeat STIS measurements of the standard
# AGK+81D266 Bohlin et al. 2000 AJ 120, 437 and Bohlin 1999 ISR 99-07
# This is the most pessimistic option. We assume that only one standard was observed
nObs = 1 # It's been observed once
unc_transfer = 0.003 # 0.3% uncertainty
for i, filt1 in enumerate(filterUncertainties):
C_K_H0[i, i] += unc_transfer**2. / nObs
# 2b) B14 3.4.1 The uncertainty in the colour of the WD system 0.5%
# from 3,000-10,000
# The uncertainty is computed with respect to the Bessell B filter.
# The Bessell B filter is the filter we use in computing the dist. modulus
# The absolute uncertainty at the rest frame wavelengt of the B band
# is not important here, as this is absorbed into the
# combination of the absolute B band magnitude of SNe Ia and
# the Hubble constant.
slope = 0.005
waveStart = 300
waveEnd = 1000.
# central = 436.0 # Corresponds to B filter
central = 555.6 # Used in the Pantheon sample
# Note that 2.5 * log_10 (1+x) ~ x for |x| << 1
for i, filt1 in enumerate(filterUncertainties):
for j, filt2 in enumerate(filterUncertainties):
if i >= j:
C_K_H0[i, j] += (slope / (waveEnd - waveStart) * (filt1['central']-central)) * \
(slope / (waveEnd - waveStart) * (filt2['central']-central))
C_K_H0 = C_K_H0+C_K_H0.T-numpy.diag(C_K_H0.diagonal())
# Write out the results
date = JLA.get_date()
hdu = fits.PrimaryHDU(C_K_H0)
hdu.writeto("%s_%s.fits" % (options.output, date), clobber=True)
return
示例10: compute_bias
# 需要导入模块: import JLA_library [as 别名]
# 或者: from JLA_library import get_date [as 别名]
#.........这里部分代码省略.........
dtype='S10,f8,f8,f8',
names=['sample', 'redshift', 'bias', 'e_bias'])
if options.plot:
fig=plt.figure()
ax=fig.add_subplot(111)
colour={'nearby':'b','SNLS':'r','SDSS':'g','DES':'k'}
for sample in numpy.unique(bias['sample']):
selection=(bias['sample']==sample)
guess=[0,0,0]
print bias[selection]
plsq=leastsq(residuals, guess, args=(bias[selection]['bias'],
bias[selection]['redshift'],
bias[selection]['e_bias'],
'poly'), full_output=1)
if plsq[4] in [1,2,3,4]:
print 'Solution for %s found' % (sample)
if options.plot:
ax.errorbar(bias[selection]['redshift'],
bias[selection]['bias'],
yerr=bias[selection]['e_bias'],
ecolor='k',
color=colour[sample],
fmt='o',
label=sample)
z=numpy.arange(numpy.min(bias[selection]['redshift']),numpy.max(bias[selection]['redshift']),0.001)
ax.plot(z,poly(z,plsq[0]),color=colour[sample])
# For each SNe, determine the uncerainty in the correction. We use the approach descibed in
# https://www.astro.rug.nl/software/kapteyn/kmpfittutorial.html
# Compute the chi-sq.
chisq=(((bias[selection]['bias']-poly(bias[selection]['redshift'],plsq[0]))/bias[selection]['e_bias'])**2.).sum()
dof=selection.sum()-len(guess)
print "Reduced chi-square value for sample %s is %5.2e" % (sample, chisq / dof)
alpha=0.315 # Confidence interval is 100 * (1-alpha)
# Compute the upper alpha/2 value for the student t distribution with dof
thresh=t.ppf((1-alpha/2.0), dof)
if options.plot and sample!='nearby':
# The following is only valid for polynomial fitting functions, and we do not compute it for the nearby sample
upper_curve=[]
lower_curve=[]
for x in z:
vect=numpy.matrix([1,x,x**2.])
offset=thresh * numpy.sqrt(chisq / dof * (vect*numpy.matrix(plsq[1])*vect.T)[0,0])
upper_curve.append(poly(x,plsq[0])+offset)
lower_curve.append(poly(x,plsq[0])-offset)
ax.plot(z,lower_curve,'--',color=colour[sample])
ax.plot(z,upper_curve,'--',color=colour[sample])
# Compute the error in the bias
# We increase the absolute value
# In other words, if the bias is negative, we subtract the error to make it even more negative
# This is to get the correct sign in the off diagonal elements
# We assume 100% correlation between SNe
for i,SN in enumerate(SNe):
if SN['zcmb'] > 0:
redshift = SN['zcmb']
else:
redshift = SN['zhel']
if JLA.survey(SN) == sample:
# For the nearby SNe, the uncertainty in the bias correction is the bias correction itself
if sample=='nearby':
SNe['e_bias'][i]=poly(redshift,plsq[0])
#print SN['name'],redshift, SNe['e_bias'][i]
else:
vect = numpy.matrix([1,redshift,redshift**2.])
if poly(redshift,plsq[0]) > 0:
sign = 1
else:
sign = -1
SNe['e_bias'][i] = sign * thresh * numpy.sqrt(chisq / dof * (vect*numpy.matrix(plsq[1])*vect.T)[0,0])
# We are getting some unrealistcally large values
date = JLA.get_date()
if options.plot:
ax.legend()
plt.savefig('C_bias_%s.png' % (date))
plt.close()
# Compute the bias matrix
#
Zero=numpy.zeros(nSNe)
H=numpy.concatenate((SNe['e_bias'],Zero,Zero)).reshape(3,nSNe).ravel(order='F')
C_bias = numpy.matrix(H).T * numpy.matrix(H)
fits.writeto('C_bias_%s.fits' % (date),C_bias,clobber=True)
return None
示例11: compute_Ccal
# 需要导入模块: import JLA_library [as 别名]
# 或者: from JLA_library import get_date [as 别名]
#.........这里部分代码省略.........
dM,dX,dC=JLA.computeOffsets(results[0],result)
J.extend([dM,dX,dC])
pool.close() # This prevents to many open files
if firstSN:
J_new=numpy.array(J).reshape(nSALTmodels,3).T
firstSN=False
else:
J_new=numpy.concatenate((J_new,numpy.array(J).reshape(nSALTmodels,3).T),axis=0)
log.write('%d rows %d columns\n' % (J_new.shape[0],J_new.shape[1]))
log.close()
# Compute the new covariance matrix J . Cal . J.T produces a 3 * n_SN by 3 * n_SN matrix
# J=jacobian
J_smoothed=numpy.array(J_new)*0.0
J=J_new
# We need to concatenate the different samples ...
if options.Plot:
try:
os.mkdir('figures')
except:
pass
nPoints={'SNLS':11,'SDSS':11,'nearby':11,'high-z':11,'DES':11}
#sampleList=['nearby','DES']
sampleList=params['smoothList'].split(',')
if options.smoothed:
# We smooth the Jacobian
# We roughly follow the method descibed in the footnote of p13 of B14
for sample in sampleList:
selection=(SNeList['survey']==sample)
J_sample=J[numpy.repeat(selection,3)]
for sys in range(nSALTmodels):
# We need to convert to a numpy array
# There is probably a better way
redshifts=numpy.array([z for z in SNeList[selection]['z']])
derivatives_mag=J_sample[0::3][:,sys] # [0::3] = [0,3,6 ...] Every 3rd one
#print redshifts.shape, derivatives_mag.shape, nPoints[sample]
forPlotting_mag,res_mag=JLA.smooth(redshifts,derivatives_mag,nPoints[sample])
derivatives_x1=J_sample[1::3][:,sys]
forPlotting_x1,res_x1=JLA.smooth(redshifts,derivatives_x1,nPoints[sample])
derivatives_c=J_sample[2::3][:,sys]
forPlotting_c,res_c=JLA.smooth(redshifts,derivatives_c,nPoints[sample])
# We need to insert the new results into the smoothed Jacobian matrix in the correct place
# The Jacobian ia a 3 * n_SN by nSATLModels matrix
# The rows are ordered by the mag, stretch and colour of each SNe.
J_smoothed[numpy.repeat(selection,3),sys]=numpy.concatenate([res_mag,res_x1,res_c]).reshape(3,selection.sum()).ravel('F')
# If required, make some plots as a way of checking
if options.Plot:
print 'Creating plot for systematic %d and sample %s' % (sys, sample)
fig=plt.figure()
ax1=fig.add_subplot(311)
ax2=fig.add_subplot(312)
ax3=fig.add_subplot(313)
ax1.plot(redshifts,derivatives_mag,'bo')
ax1.plot(forPlotting_mag[0],forPlotting_mag[1],'r-')
ax1.set_ylabel('mag')
ax2.plot(redshifts,derivatives_x1,'bo')
ax2.plot(forPlotting_x1[0],forPlotting_x1[1],'r-')
ax2.set_ylabel('x1')
ax3.plot(redshifts,derivatives_c,'bo')
ax3.plot(forPlotting_c[0],forPlotting_c[1],'r-')
ax3.set_ylabel('c')
ax3.set_xlabel('z')
plt.savefig('figures/%s_sys_%d.png' % (sample,sys))
plt.close()
date=JLA.get_date()
fits.writeto('J_%s.fits' % (date) ,J,clobber=True)
fits.writeto('J_smoothed_%s.fits' % (date), J_smoothed,clobber=True)
# Some matrix arithmatic
# C_cal is a nSALTmodels by nSALTmodels matrix
# Read in a smoothed Jacobian specified in the options
if options.jacobian != None:
J_smoothed=fits.getdata(options.jacobian)
# else:
# # Replace the NaNs in your smoothed Jacobian with zero
# J_smoothed[numpy.isnan(J_smoothed)]=0
C=numpy.matrix(J_smoothed)*numpy.matrix(Cal)*numpy.matrix(J_smoothed).T
if options.output==None:
fits.writeto('C_cal_%s.fits' % (date), numpy.array(C), clobber=True)
else:
fits.writeto('%s.fits' % (options.output),numpy.array(C),clobber=True)
return
示例12: compute_C_K
# 需要导入模块: import JLA_library [as 别名]
# 或者: from JLA_library import get_date [as 别名]
#.........这里部分代码省略.........
# Cross terms. Not needed, as they are zero
# C_K_DES[0:16, 23:39] = C_K_JLA[9:25,9+size/2:25+size/2]
# C_K_DES[23:39, 0:16] = C_K_JLA[9+size/2:25+size/2,9:25]
# Read in the table listing the uncertainties in the ZPs and
# effective wavelengths
filterUncertainties = numpy.genfromtxt(JLA.get_full_path(params['filterUncertainties']),
comments='#',usecols=(0,1,2,3,4), dtype='S30,f8,f8,f8,f8',
names=['filter', 'zp', 'zp_off', 'wavelength', 'central'])
# For the Bc filter of CfA, and the V1 and V2 filters of CSP,
# we asumme that they have the same sized systematic uncertainteies as
# B filter of CfA and V1 and V2 filters of CSP
# We could either copy these terms across or recompute them.
# We choose to recompute them
# Compute the terms in DES, this includes the cross terms
# We first compute them separately, then add them to the matrix
nFilters = len(filterUncertainties)
C_K_new = numpy.zeros(nFilters*nFilters*4).reshape(nFilters*2, nFilters*2)
# 1) DES controlled uncertainties
# This uncertainty in the ZP has seeral components
# a) The uncertainty in the differential chromatic correction (set to zero for now)
# Note that this error is 100% correlated to the component of b) that comes from the filter curve
# b) The uncertainty in the measurement of the transfer to the AB system
# using the observations of C26202
# c) The SN field-to-field variation between DES and GAIA
for i, filt in enumerate(filterUncertainties):
if 'DES' in filt['filter']:
error_I0,error_chromatic,error_AB=FGCM.prop_unc(params,filt)
#print numpy.sqrt((error_AB)**2. + (FGCM_unc)**2. / nC26202_Observations[filt['filter']])
C_K_new[i, i] = uniformity**2. + (error_AB)**2.+(FGCM_unc)**2. / nC26202_Observations[filt['filter']] + SMP_ZP**2.
print '%s %5.4f' % (filt['filter'],numpy.sqrt(C_K_new[i, i]))
C_K_new[i, i+nFilters] = (error_AB) * filt['wavelength']
C_K_new[i+nFilters, i] = (error_AB) * filt['wavelength']
C_K_new[i+nFilters, i+nFilters] = (filt['wavelength'])**2.
else:
C_K_new[i, i] = (filt['zp'] / 1000.)**2. + (filt['zp_off'] / 3. / 1000.)**2.
# 2a) B14 3.4.1 The uncertainty associated to the measurement of
# the Secondary CALSPEC standards
# The uncerteinty is assumed to be uncorrelated between filters
# It only affects the diagonal terms of the ZPs
# It is estmated from repeat STIS measurements of the standard
# AGK+81D266 Bohlin et al. 2000 AJ 120, 437 and Bohlin 1999 ISR 99-07
nObs_C26202 = 1 # It's been observed once
unc_transfer = 0.003 # 0.3% uncertainty
for i, filt1 in enumerate(filterUncertainties):
C_K_new[i, i] += unc_transfer**2. / nObs_C26202
# 2b) B14 3.4.1 The uncertainty in the colour of the WD system 0.5%
# from 3,000-10,000
# The uncertainty is computed with respect to the Bessell B filter.
# The Bessell B filter is the filter we use in computing the dist. modulus
# The absolute uncertainty at the rest frame wavelengt of the B band
# is not important here, as this is absorbed into the
# combination of the absolute B band magnitude of SNe Ia and
# the Hubble constant.
slope = 0.005
waveStart = 300
waveEnd = 1000.
# central = 436.0 # Corresponds to B filter
central = 555.6 # Used in the Pantheon sample
# Note that 2.5 * log_10 (1+x) ~ x for |x| << 1
for i, filt1 in enumerate(filterUncertainties):
for j, filt2 in enumerate(filterUncertainties):
if i >= j:
C_K_new[i, j] += (slope / (waveEnd - waveStart) * (filt1['central']-central)) * \
(slope / (waveEnd - waveStart) * (filt2['central']-central))
C_K_new = C_K_new+C_K_new.T-numpy.diag(C_K_new.diagonal())
if options.base:
# We do not update
sel = numpy.zeros(nDim, bool)
sel[0:16] = True
sel[23:39] = True
sel2d = numpy.matrix(sel).T * numpy.matrix(sel)
C_K_new[sel2d] = 0.0
C_K_DES += C_K_new
# Write out the results
date = JLA.get_date()
hdu = fits.PrimaryHDU(C_K_DES)
hdu.writeto("%s_%s.fits" % (options.output, date), clobber=True)
return
示例13: add_covar_matrices
# 需要导入模块: import JLA_library [as 别名]
# 或者: from JLA_library import get_date [as 别名]
def add_covar_matrices(options):
"""
Python program that adds the individual covariance matrices into a single matrix
"""
import time
import numpy
import astropy.io.fits as fits
import JLA_library as JLA
params = JLA.build_dictionary(options.config)
# Read in the terms that account for uncertainties in perculiar velocities,
# instrinsic dispersion and, lensing
# Read in the covariance matrices
matrices = []
systematic_terms = ['bias', 'cal', 'host', 'dust', 'model', 'nonia', 'pecvel', 'stat']
covmatrices = {'bias':params['bias'],
'cal':params['cal'],
'host':params['host'],
'dust':params['dust'],
'model':params['model'],
'nonia':params['nonia'],
'pecvel':params['pecvel'],
'stat':params['stat']}
for term in systematic_terms:
matrices.append(fits.getdata(JLA.get_full_path(covmatrices[term]), 0))
# Add the matrices
size = matrices[0].shape
add = numpy.zeros(size[0]**2.).reshape(size[0], size[0])
for matrix in matrices:
add += matrix
# Write out this matrix. This is C_eta in qe. 13 of B14
date=JLA.get_date()
fits.writeto('C_eta_%s.fits' % (date), add, clobber=True)
# Compute A
nSNe = size[0]/3
jla_results = {'Om':0.303, 'w':-1.027, 'alpha':0.141, 'beta':3.102}
arr = numpy.zeros(nSNe*3*nSNe).reshape(nSNe, 3*nSNe)
for i in range(nSNe):
arr[i, 3*i] = 1.0
arr[i, 3*i+1] = jla_results['alpha']
arr[i, 3*i+2] = -jla_results['beta']
cov = numpy.matrix(arr) * numpy.matrix(add) * numpy.matrix(arr).T
# Add the diagonal terms
sigma = numpy.genfromtxt(JLA.get_full_path(params['diag']),
comments='#',
usecols=(0, 1, 2),
dtype='f8,f8,f8',
names=['sigma_coh', 'sigma_lens', 'sigma_pecvel'])
for i in range(nSNe):
cov[i, i] += sigma['sigma_coh'][i]**2 + \
sigma['sigma_lens'][i]**2 + \
sigma['sigma_pecvel'][i]**2
fits.writeto('C_total_%s.fits' % (date), cov, clobber=True)
return
示例14: compute_nonIa
# 需要导入模块: import JLA_library [as 别名]
# 或者: from JLA_library import get_date [as 别名]
#.........这里部分代码省略.........
# f_bright = 0.25 # The fraction of Ibc SN that are bright
# Simulate the characteristics of the SNLS survey
# Apply outlier rejection
# All SNe that pass the cuts are included in the sample
# One then has a mixture of SNe Ia and SNe Ibc
# and the average magnitude at each redshift is biased. This
# is called the raw bias. One multiplies the raw bias by the fraction of
# objects classified as SNe Ia*
# The results are presented in 7 redshift bins defined in table 14 of C11
# We use these results to generate the matrix.
# Only the SNLS SNe in the JLA sample are considered.
# For the photometrically selected sample and other surveys, this will probably be different
# JLA compute this for the SNLS sample only
# We assume that the redshift in this table refers to the left hand edge of each bin
# ----------- Read in the configuration file ------------
params = JLA.build_dictionary(options.config)
data=numpy.genfromtxt(JLA.get_full_path(params['classification']),comments="#",usecols=(0,1,2),dtype=['float','float','float'],names=['redshift','raw_bias','fraction'])
z_bin=data['redshift']
raw_bias=data['raw_bias']
f_star=data['fraction']
# The covaraiance between SNe Ia in the same redshift bin is fully correlated
# Otherwise, it is uncorrelated
# ----------- Read in the configuration file ------------
params = JLA.build_dictionary(options.config)
SNeList = numpy.genfromtxt(options.SNlist,
usecols=(0, 2),
dtype='S30,S200',
names=['id', 'lc'])
for i, SN in enumerate(SNeList):
SNeList['id'][i] = SNeList['id'][i].replace('lc-', '').replace('.list', '').replace('_smp','')
lcfile = JLA.get_full_path(params[options.lcfits])
SNe = Table.read(lcfile, format='fits')
# Add a bin column and a column that specifies if the covariance needs to be computed
SNe['bin'] = 0
SNe['eval'] = False
# make the order of data (in SNe) match SNeList
indices = JLA.reindex_SNe(SNeList['id'], SNe)
SNe = SNe[indices]
nSNe = len(SNe)
# Identify the SNLS SNe in the JLA sample
# We use the source and the name to decide if we want to add corrections for non-Ia contamination
# Identify the DESS SNe in the DES sample.
for i, SN in enumerate(SNe):
try:
# If the source keyword exists
if (SN['source'] == 'JLA' or SN['source'] == 'SNLS_spec') and SN['name'][2:4] in ['D1', 'D2', 'D3', 'D4']:
SNe['eval'][i] = True
elif (SN['source']== 'SNLS_photo') and (SN['name'][2:4] in ['D1', 'D2', 'D3', 'D4'] or (SN['name'][0:2] in ['D1', 'D2', 'D3', 'D4'])):
SNe['eval'][i] = True
except:
# If the source keyword does not exist
if SN['name'][0:3]=="DES":
SNe['eval'][i] = True
print list(SNe['eval']).count(True)
# Work out which redshift bin each SNe belongs to
# In numpy.digitize, the bin number starts at 1, so we subtract 1 -- need to check...
SNe['bin'] = numpy.digitize(SNe['zhel'], z_bin)-1
# Build the covariance matrix
C_nonIa = numpy.zeros(nSNe*3*nSNe*3).reshape(nSNe*3, nSNe*3)
# It only computes the covariance for the spectroscopically confirmed SNLS SNe
# We assume that covariance between redshift bins is uncorrelated
# Within a redshift bin, we assume 100% covariance between SNe in that bin
for i in range(nSNe):
bin1 = SNe['bin'][i]
if SNe['eval'][i]:
print SNe['zhel'][i], bin1, raw_bias[bin1], f_star[bin1], i
for j in range(nSNe):
bin2 = SNe['bin'][j]
if SNe['eval'][j] and SNe['eval'][i] and bin1 == bin2:
C_nonIa[3*i, 3*j] = (raw_bias[bin1] * f_star[bin1])**2
# print SNe['bin'][:239]
# I am unable to reproduce this JLA covariance matrix
date = JLA.get_date()
fits.writeto('C_nonIa_%s.fits' % date, numpy.array(C_nonIa), clobber=True)
return
示例15: merge_lightcurve_fits
# 需要导入模块: import JLA_library [as 别名]
# 或者: from JLA_library import get_date [as 别名]
#.........这里部分代码省略.........
# I imagine that the tables package in astropy could also be used to read the ascii input file
SNeSpec = Table(numpy.genfromtxt(lightCurveFits,
skip_header=1,
dtype='S12,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8',
names=names))
nSNeSpec=len(SNeSpec)
print 'There are %d SNe from the spectrscopically confirmed sample' % (nSNeSpec)
# Add an extra column to the table
SNeSpec['source']=['JLA']*nSNeSpec
# ---------------- Shuvo's sample ------------------------
# Photometrically identified SNe in Shuvo's sample, if the parameter exists
if params['photLightCurveFits']!='None':
lightCurveFits = JLA.get_full_path(params['photLightCurveFits'])
SNePhot=Table.read(lightCurveFits, format='fits')
nSNePhot=len(SNePhot)
print 'There are %d SNe from the photometric sample' % (nSNePhot)
# Converting from Shuvo's names to thosed used by JLA
conversion={'name':'name_adj', 'zcmb':None, 'zhel':'z', 'dz':None, 'mb':'mb', 'dmb':'emb', 'x1':'x1', 'dx1':'ex1', 'color':'c', 'dcolor':'ec', '3rdvar':'col27', 'd3rdvar':'d3rdvar', 'tmax':None, 'dtmax':None, 'cov_m_s':'cov_m_x1', 'cov_m_c':'cov_m_c', 'cov_s_c':'cov_x1_c', 'set':None, 'ra':None, 'dec':None, 'biascor':None}
# Add the uncertainty in the mass column
SNePhot['d3rdvar']=(SNePhot['col29']+SNePhot['col28'])/2. - SNePhot['col27']
# Remove columns that are not listed in conversion
for colname in SNePhot.colnames:
if colname not in conversion.values():
SNePhot.remove_column(colname)
for key in conversion.keys():
# Rename the column if it does not already exist
if conversion[key]!=None and conversion[key]!=key:
SNePhot.rename_column(conversion[key], key)
elif conversion[key]==None:
# Create it, mask it, and fill all values
SNePhot[key]=MaskedColumn(numpy.zeros(nSNePhot), numpy.ones(nSNePhot,bool))
SNePhot[key].fill_value=-99 # does not work as expected, so we set it explicitly in the next line
SNePhot[key]=-99.9
else:
# Do nothing if the column already exists
pass
# Add the source column
SNePhot['source']="Phot_Uddin"
# ---------------------- CfA4 ----------------------------------
if params['CfA4LightCurveFits']!='None':
lightCurveFits = JLA.get_full_path(params['CfA4LightCurveFits'])
f=open(lightCurveFits)
header=f.readlines()
f.close()
names=header[0].strip('#').split(',')
SNeCfA4=Table(numpy.genfromtxt(lightCurveFits,
skip_header=1,
dtype='S12,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8',
names=names,delimiter=','))
nSNeCfA4=len(SNeCfA4)
conversion={'name':'name', 'zcmb':None, 'zhel':'z', 'dz':None, 'mb':'mb', 'dmb':'emb', 'x1':'x1', 'dx1':'ex1', 'color':'c', 'dcolor':'ec', '3rdvar':None, 'd3rdvar':None, 'tmax':None, 'dtmax':None, 'cov_m_s':'cov_m_x1', 'cov_m_c':'cov_m_c', 'cov_s_c':'cov_x1_c', 'set':None, 'ra':None, 'dec':None, 'biascor':None}
# Remove columns that are not listed in conversion
for colname in SNeCfA4.colnames:
if colname not in conversion.values():
SNeCfA4.remove_column(colname)
for key in conversion.keys():
# Rename the column if it does not already exist
if conversion[key]!=None and conversion[key]!=key:
SNeCfA4.rename_column(conversion[key], key)
elif conversion[key]==None:
# Create it, mask it, and fill all values
SNeCfA4[key]=MaskedColumn(numpy.zeros(nSNeCfA4), numpy.ones(nSNeCfA4,bool))
SNeCfA4[key].fill_value=-99 # does not work as expected, so we set it explicitly in the next line
SNeCfA4[key]=-99.9
else:
# Do nothing if the column already exists
pass
# Add the source column
SNeCfA4['source']="CfA4"
try:
SNe=vstack([SNeSpec,SNePhot,SNeCfA4])
except:
SNe=SNeSpec
# Write out the result as a FITS table
date = JLA.get_date()
SNe.write('%s_%s.fits' % (options.output, date), format='fits')
return