本文整理汇总了Python中PALutils.createRedNoiseCovarianceMatrix方法的典型用法代码示例。如果您正苦于以下问题:Python PALutils.createRedNoiseCovarianceMatrix方法的具体用法?Python PALutils.createRedNoiseCovarianceMatrix怎么用?Python PALutils.createRedNoiseCovarianceMatrix使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类PALutils
的用法示例。
在下文中一共展示了PALutils.createRedNoiseCovarianceMatrix方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: optStat
# 需要导入模块: import PALutils [as 别名]
# 或者: from PALutils import createRedNoiseCovarianceMatrix [as 别名]
def optStat(psr, ORF, gam=4.33333):
"""
Computes the Optimal statistic as defined in Chamberlin, Creighton, Demorest et al (2013)
@param psr: List of pulsar object instances
@param ORF: Vector of pairwise overlap reduction values
@param gam: Power Spectral index of GBW (default = 13/3, ie SMBMBs)
@return: Opt: Optimal statistic value (A_gw^2)
@return: sigma: 1-sigma uncertanty on Optimal statistic
@return: snr: signal-to-noise ratio of cross correlations
"""
#TODO: maybe compute ORF in code instead of reading it in. Would be less
# of a risk but a bit slower...
k = 0
npsr = len(psr)
top = 0
bot = 0
for ll in xrange(0, npsr):
for kk in xrange(ll+1, npsr):
# form matrix of toa residuals and compute SigmaIJ
tm = PALutils.createTimeLags(psr[ll].toas, psr[kk].toas)
# create cross covariance matrix without overall amplitude A^2
SIJ = ORF[k]/2 * PALutils.createRedNoiseCovarianceMatrix(tm, 1, gam)
# construct numerator and denominator of optimal statistic
bot += np.trace(np.dot(psr[ll].invCov, np.dot(SIJ, np.dot(psr[kk].invCov, SIJ.T))))
top += np.dot(psr[ll].res, np.dot(psr[ll].invCov, np.dot(SIJ, \
np.dot(psr[kk].invCov, psr[kk].res))))
k+=1
# compute optimal statistic
Opt = top/bot
# compute uncertainty
sigma = 1/np.sqrt(bot)
# compute SNR
snr = top/np.sqrt(bot)
# return optimal statistic and snr
return Opt, sigma, snr
示例2: crossPower
# 需要导入模块: import PALutils [as 别名]
# 或者: from PALutils import createRedNoiseCovarianceMatrix [as 别名]
def crossPower(psr, gam=13/3):
"""
Compute the cross power as defined in Eq 9 and uncertainty of Eq 10 in
Demorest et al (2012).
@param psr: List of pulsar object instances
@param gam: Power spectral index of GWB
@return: vector of cross power for each pulsar pair
@return: vector of cross power uncertainties for each pulsar pair
"""
# initialization
npsr = len(psr)
# now compute cross power
rho = []
sig = []
xi = []
for ll in range(npsr):
for kk in range(ll+1, npsr):
# matrix of time lags
tm = PALutils.createTimeLags(psr[ll].toas, psr[kk].toas)
# create cross covariance matrix without overall amplitude A^2
SIJ = PALutils.createRedNoiseCovarianceMatrix(tm, 1, gam)
# construct numerator and denominator of optimal statistic
bot = np.trace(np.dot(psr[ll].invCov, np.dot(SIJ, np.dot(psr[kk].invCov, SIJ.T))))
top = np.dot(psr[ll].res, np.dot(psr[ll].invCov, np.dot(SIJ, \
np.dot(psr[kk].invCov, psr[kk].res))))
# cross correlation and uncertainty
rho.append(top/bot)
sig.append(1/np.sqrt(bot))
return np.array(rho), np.array(sig)
示例3: enumerate
# 需要导入模块: import PALutils [as 别名]
# 或者: from PALutils import createRedNoiseCovarianceMatrix [as 别名]
for ct, p in enumerate(psr):
efac = p.efac
equad = p.equad
Amp = p.Amp
gam = p.gam
fH = p.fH
print p.name, efac, equad, Amp, gam
# get white noise covariance matrix
white = PALutils.createWhiteNoiseCovarianceMatrix(p.err, efac, equad)
# get red noise covariance matrix
tm = PALutils.createTimeLags(p.toas, p.toas, round=True)
red = PALutils.createRedNoiseCovarianceMatrix(tm, Amp, gam, fH=fH)
C = np.dot(p.G.T, np.dot(red + white, p.G))
cf = sl.cho_factor(C)
logdetTerm.append(np.sum(2 * np.log(np.diag(cf[0])))) # + p.G.shape[1]*np.log(2*np.pi))
# get null model log-likelihood
nullLike = 0
for ct, p in enumerate(psr):
nullLike += -0.5 * logdetTerm[ct]
nullLike += -0.5 * np.dot(p.res, np.dot(p.invCov, p.res))
print "Null Like = {0}".format(nullLike)
# prior ranges
示例4: addInverseCovFromNoiseFile
# 需要导入模块: import PALutils [as 别名]
# 或者: from PALutils import createRedNoiseCovarianceMatrix [as 别名]
#.........这里部分代码省略.........
# first create G matrix from design matrix and toas
designmatrix = np.double(t2pulsar.designmatrix())
toas = np.double(t2pulsar.toas()*86400)
errs = np.double(t2pulsar.toaerrs*1e-6)
# if doing daily averaging
if dailyAverage:
# get average quantities
toas, qmatrix, errs, dmatrix, freqs, bands = PALutils.dailyAverage(t2pulsar)
# construct new daily averaged residuals and designmatrix
toas *= 86400
designmatrix = np.dot(qmatrix, dmatrix)
G = PALutils.createGmatrix(designmatrix)
# create matrix of time lags
tm = PALutils.createTimeLags(toas, toas, round=True)
# now read noise file to get model and parameters
file = open(noisefile,'r')
fH = None
tau = None
DMAmp = None
DMgam = None
for line in file.readlines():
# default parameters for different models other than pure PL
key = line.split()[0]
# get amplitude
if "Amp" == key:
Amp = float(line.split()[-1])
# get spectral index
elif "gam" == key:
gam = float(line.split()[-1])
# get efac
elif "efac" == key:
efac = float(line.split()[-1])
# get quad
elif "equad" == key:
equad = float(line.split()[-1])
# get high frequency cutoff if available
elif "fH" == key:
fH = float(line.split()[-1])
# get correlation time scale if available
elif "tau" == key:
tau = float(line.split()[-1])
# get DM Amplitude if available
elif "DMAmp" == key:
DMAmp = float(line.split()[-1])
# get DM Spectral Index if available
elif "DMgam" == key:
DMgam = float(line.split()[-1])
# cosstruct red and white noise covariance matrices
red = PALutils.createRedNoiseCovarianceMatrix(tm, Amp, gam, fH=fH)
white = PALutils.createWhiteNoiseCovarianceMatrix(errs, efac, equad, tau=tau)
# construct post timing model marginalization covariance matrix
cov = red + white
pcov = np.dot(G.T, np.dot(cov, G))
# finally construct "inverse"
invCov = np.dot(G, np.dot(np.linalg.inv(pcov), G.T))
# create dataset for inverse covariance matrix
pulsarsgroup.create_dataset('invCov', data = invCov)
# create dataset for G matrix
pulsarsgroup.create_dataset('Gmatrix', data = G)
# record noise parameter values
pulsarsgroup.create_dataset('Amp', data = Amp)
pulsarsgroup.create_dataset('gam', data = gam)
pulsarsgroup.create_dataset('efac', data = efac)
pulsarsgroup.create_dataset('equad', data = equad)
if fH is not None:
pulsarsgroup.create_dataset('fH', data = fH)
if tau is not None:
pulsarsgroup.create_dataset('tau', data = tau)
if DMAmp is not None:
pulsarsgroup.create_dataset('DMAmp', data = DMAmp)
if DMgam is not None:
pulsarsgroup.create_dataset('DMgam', data = DMgam)
# Close the hdf5 file
self.h5file.close()
示例5: firstOrderLikelihood
# 需要导入模块: import PALutils [as 别名]
# 或者: from PALutils import createRedNoiseCovarianceMatrix [as 别名]
def firstOrderLikelihood(psr, ORF, Agw, gamgw, Ared, gred, efac, equad, \
interpolate=False):
"""
Compute the value of the first-order likelihood as defined in
Ellis, Siemens, van Haasteren (2013).
@param psr: List of pulsar object instances
@param ORF: Vector of pairwise overlap reduction values
@param Agw: Amplitude of GWB in standard strain amplitude units
@param gamgw: Power spectral index of GWB
@param Ared: Vector of amplitudes of intrinsic red noise in GWB strain units
@param gamgw: Vector of power spectral index of red noise
@param efac: Vector of efacs
@param equad: Vector of equads
@param interpolate: Boolean to perform interpolation only with compressed
data. (default = False)
@return: Log-likelihood value
"""
npsr = len(psr)
loglike = 0
tmp = []
# start loop to evaluate auto-terms
for ll in range(npsr):
r1 = np.dot(psr[ll].G.T, psr[ll].res)
# create time lags
tm = PALutils.createTimeLags(psr[ll].toas, psr[ll].toas)
#TODO: finish option to do interpolation when using compression
# calculate auto GW covariance matrix
SC = PALutils.createRedNoiseCovarianceMatrix(tm, Agw, gamgw)
# calculate auto red noise covariance matrix
SA = PALutils.createRedNoiseCovarianceMatrix(tm, Ared[ll], gred[ll])
# create white noise covariance matrix
#TODO: add ability to use multiple efacs for different backends
white = PALutils.createWhiteNoiseCovarianceMatrix(psr[ll].err, efac[ll], equad[ll])
# total auto-covariance matrix
P = SC + SA + white
# sandwich with G matrices
Ppost = np.dot(psr[ll].G.T, np.dot(P, psr[ll].G))
# do cholesky solve
cf = sl.cho_factor(Ppost)
# solution vector P^_1 r
rr = sl.cho_solve(cf, r1)
# temporarily store P^-1 r
tmp.append(np.dot(psr[ll].G, rr))
# add to log-likelihood
loglike += -0.5 * (np.sum(np.log(2*np.pi*np.diag(cf[0])**2)) + np.dot(r1, rr))
# now compute cross terms
k = 0
for ll in range(npsr):
for kk in range(ll+1, npsr):
# create time lags
tm = PALutils.createTimeLags(psr[ll].toas, psr[kk].toas)
# create cross covariance matrix
SIJ = PALutils.createRedNoiseCovarianceMatrix(tm, 1, gamgw)
# carry out matrix-vetor operations
tmp1 = np.dot(SIJ, tmp[kk])
# add to likelihood
loglike += ORF[k]/2 * Agw**2 * np.dot(tmp[ll], tmp1)
# increment ORF counter
k += 1
return loglike
示例6:
# 需要导入模块: import PALutils [as 别名]
# 或者: from PALutils import createRedNoiseCovarianceMatrix [as 别名]
for key in pulsargroup:
# get noise values from file TODO: change this to read directly from pulsar class
Amp = pulsargroup[key]['Amp'].value
gam = pulsargroup[key]['gam'].value
efac = pulsargroup[key]['efac'].value
equad = pulsargroup[key]['equad'].value
try:
fH = pulsargroup[key]['fH'].value
except KeyError:
fH = None
# make covariance matrix
tm = PALutils.createTimeLags(psr[ct].toas, psr[ct].toas, round=True)
red = PALutils.createRedNoiseCovarianceMatrix(tm, Amp, gam, fH=fH)
white = PALutils.createWhiteNoiseCovarianceMatrix(psr[ct].err, efac, equad)
# sandwich with G matrices
cov = red + white
noiseCov = np.dot(psr[ct].G.T, np.dot(cov, psr[ct].G))
# diagonalize GW covariance matrix and noise covariance matrix
L = np.linalg.cholesky(noiseCov)
Linv = np.linalg.inv(L)
# get generalized GW covariance matrix with Amp = 1
redGW = PALutils.createRedNoiseCovarianceMatrix(tm, 1, 4.33333)
redGW = np.dot(psr[ct].G.T, np.dot(redGW, psr[ct].G))
# sandwich with Linv matrices