本文整理汇总了Python中scipy.interpolate.UnivariateSpline.integral方法的典型用法代码示例。如果您正苦于以下问题:Python UnivariateSpline.integral方法的具体用法?Python UnivariateSpline.integral怎么用?Python UnivariateSpline.integral使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.interpolate.UnivariateSpline
的用法示例。
在下文中一共展示了UnivariateSpline.integral方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: softening_scale
# 需要导入模块: from scipy.interpolate import UnivariateSpline [as 别名]
# 或者: from scipy.interpolate.UnivariateSpline import integral [as 别名]
def softening_scale(self,mq=70,auto=True,r=None,dens=None,mass=None,kernel='Gadget'):
opt_dict={'Gadget':0.698352, 'spline':0.977693}
rq=self.qmass(mq)
if auto==True:
prof=Profile(self.p,Ngrid=512,xmin=0.001*rq,xmax=10*rq,kind='lin')
r=prof.grid.gx
dens=prof.dens
dens_spline=UnivariateSpline(r, dens, k=1,s=0,ext=1)
der_dens=dens_spline.derivative()
derdens=der_dens(r)
ap=UnivariateSpline(r, r*r*dens*derdens*derdens, k=1,s=0,ext=1)
bp=UnivariateSpline(r, r*r*dens*dens, k=1,s=0,ext=1)
B=bp.integral(0,rq)
A=ap.integral(0,rq)/(mass*mq/100.)
C=(B/(A))**(1/5)
cost=opt_dict[kernel]
N=len(self.p.Id)**(1/5)
return C*cost/N
示例2: integrated_rate_test
# 需要导入模块: from scipy.interpolate import UnivariateSpline [as 别名]
# 或者: from scipy.interpolate.UnivariateSpline import integral [as 别名]
def integrated_rate_test(mx=100., annih_prod='BB'):
# This currently doesn't work
file_path = MAIN_PATH + "/Spectrum/"
file_path += '{}'.format(int(mx)) + 'GeV_' + annih_prod + '_DMspectrum.dat'
spectrum = np.loadtxt(file_path)
imax = 0
for i in range(len(spectrum)):
if spectrum[i, 1] < 10 or i == (len(spectrum) - 1):
imax = i
break
spectrum = spectrum[0:imax, :]
Nevents = 10. ** 5.
spectrum[:, 1] /= Nevents
test = interp1d(np.log10(spectrum[:, 0] / mx), np.log10(mx * np.log(10.) * spectrum[:, 1]), kind='cubic', bounds_error=False, fill_value=0.)
test2 = interp1d(spectrum[:, 0], spectrum[:, 0] * spectrum[:, 1], kind='cubic', bounds_error=False, fill_value=0.)
e_gamma_tab = np.logspace(0., np.log10(spectrum[-1, 0]), 200)
print np.column_stack((np.log10(spectrum[:, 0] / mx), np.log10(mx * np.log(10.) * spectrum[:, 1])))
xtab = np.linspace(np.log10(1. / mx), 0., 200)
ng2 = np.trapz(10.**test(xtab) / 10. ** xtab, xtab) / np.log(10.)
mean_e2 = np.trapz(test2(e_gamma_tab), e_gamma_tab)
rate_interp = UnivariateSpline(spectrum[:, 0], spectrum[:, 1])
avg_e_interp = UnivariateSpline(spectrum[:, 0], spectrum[:, 0] * spectrum[:, 1])
num_gamma = rate_interp.integral(1., spectrum[-1, 0])
mean_e = avg_e_interp.integral(1., spectrum[-1, 0])
print 'DM Mass: ', mx
print 'Annihilation Products: ', annih_prod
print 'Number of Gammas > 1 GeV: ', num_gamma, ng2
print '<E> Gamma: ', mean_e, mean_e2
return
示例3: softening_scale
# 需要导入模块: from scipy.interpolate import UnivariateSpline [as 别名]
# 或者: from scipy.interpolate.UnivariateSpline import integral [as 别名]
def softening_scale(self,mq=70,auto=True,r=None,dens=None,mass=None,kernel='Gadget',type=None):
"""
Calculate the optimal softening scale following Dehnen, 2012 eps=cost*a(dens)*N^-0.2. The output will be in unit
of r. If Auto==True, r and dens will be not considered.
:param mq: Mass fraction where calcualte the softening_scale.
:param auto: If True calculate the r-dens gride using the grid and Profile class
wit 512 points from 0.001*rq to 10*rq.
:param r: Array with the sampling radii.
:param dens: Array with the density at the sampling radii. Its unity need to be the same of mass/r^3
:param mass: Total mass of the system, the method will calculate in automatic the fraction mq/100*mass
:param kernel: Kernel to use. Different kernel have different constant C. The implemented kernels are:
-spline: generic cubic spline (as in Dehnen, 2012)
-Gadget: to calculate the softening_scale using the spline kernel of Gadget2
:return: the softening scale.
"""
opt_dict={'Gadget':0.698352, 'spline':0.977693}
rq=self.qmass(mq,type=type)
if auto==True:
prof=Profile(self.p,Ngrid=512,xmin=0.01*rq,xmax=10*rq,kind='lin',type=type)
r=prof.grid.gx
dens=prof.dens
dens_spline=UnivariateSpline(r, dens, k=1,s=0,ext=1)
der_dens=dens_spline.derivative()
derdens=der_dens(r)
ap=UnivariateSpline(r, r*r*dens*derdens*derdens, k=1,s=0,ext=1)
bp=UnivariateSpline(r, r*r*dens*dens, k=1,s=0,ext=1)
B=bp.integral(0,rq)
A=ap.integral(0,rq)/(mass*mq/100.)
C=(B/(A))**(1/5)
cost=opt_dict[kernel]
N=len(self.p.Id)**(1/5)
return C*cost/N
示例4: get_profile_func
# 需要导入模块: from scipy.interpolate import UnivariateSpline [as 别名]
# 或者: from scipy.interpolate.UnivariateSpline import integral [as 别名]
def get_profile_func(self, profile_x, profile_y):
from scipy.interpolate import UnivariateSpline
profile_ = UnivariateSpline(profile_x, profile_y, k=3, s=0,
bbox=[0, 1])
integ = profile_.integral(0, 1)
def profile(o, x, slitpos):
return profile_(slitpos) / integ
return profile
示例5: work
# 需要导入模块: from scipy.interpolate import UnivariateSpline [as 别名]
# 或者: from scipy.interpolate.UnivariateSpline import integral [as 别名]
def work(z):
Lband, M_AB, S_nu, Phi = _qlf.qlf(band, z, Lbolbins)
Phi /= (U.MPC ** 3)
Dc = cosmology.Dc(z)
DL = Dc * (1 + z)
distance_modulus = 5 * numpy.log10(DL / (0.01 * U.KPC))
m_AB = M_AB + distance_modulus
spl = UnivariateSpline(-m_AB, Phi, k=5)
print z, DL, m_AB[0], m_AB[-1], Phi.sum(), m_AB.max(), m_AB.min()
integrated = spl.integral(-faint, -bright)
if integrated < 0: integrated = 0
return integrated
示例6: integral3d
# 需要导入模块: from scipy.interpolate import UnivariateSpline [as 别名]
# 或者: from scipy.interpolate.UnivariateSpline import integral [as 别名]
def integral3d(self, a=None, b=None):
"""
Return definite integral of the spline of (r**2 values**2) between two given points a and b
Args:
a: First point. rmesh[0] if a is None
b: Last point. rmesh[-1] if a is None
"""
a = self.rmesh[0] if a is None else a
b = self.rmesh[-1] if b is None else b
r2v2_spline = UnivariateSpline(self.rmesh, (self.rmesh * self.values) ** 2, s=0)
return r2v2_spline.integral(a, b)
示例7: check_logders
# 需要导入模块: from scipy.interpolate import UnivariateSpline [as 别名]
# 或者: from scipy.interpolate.UnivariateSpline import integral [as 别名]
def check_logders(self):
"""Check the quality of the log derivatives."""
merits = {}
for (state, pp_ld) in self.pp_logders.items():
ae_ld = self.ae_logders[state]
rmesh = ae_ld.rmesh
f = np.abs(np.tan(ae_ld.values) - np.tan(pp_ld.values))
from scipy.interpolate import UnivariateSpline
spline = UnivariateSpline(rmesh, f, s=0)
merits[state] = spline.integral(rmesh[0], rmesh[-1]) / (rmesh[-1] - rmesh[0])
return merits
示例8: integral
# 需要导入模块: from scipy.interpolate import UnivariateSpline [as 别名]
# 或者: from scipy.interpolate.UnivariateSpline import integral [as 别名]
def integral(self, a, b):
# capturing contributions outside domain of interpolation
below_dx = np.max([0.0, self.bnds[0] - a])
above_dx = np.max([0.0, b - self.bnds[1]])
outside_contribution = (below_dx + above_dx) * self.fill_value
# adjusting interval to spline domain
a_f = np.max([a, self.bnds[0]])
b_f = np.min([b, self.bnds[1]])
if a_f >= b_f:
return outside_contribution
else:
return outside_contribution + UnivariateSpline.integral(self, a_f, b_f)
示例9: get_profile_func_ab
# 需要导入模块: from scipy.interpolate import UnivariateSpline [as 别名]
# 或者: from scipy.interpolate.UnivariateSpline import integral [as 别名]
def get_profile_func_ab(self, profile_x, profile_y):
from scipy.interpolate import UnivariateSpline
profile_ = UnivariateSpline(profile_x, profile_y, k=3, s=0,
bbox=[0, 1])
roots = list(profile_.roots())
#assert(len(roots) == 1)
integ_list = []
from itertools import izip, cycle
for ss, int_r1, int_r2 in izip(cycle([1, -1]),
[0] + roots,
roots + [1]):
#print ss, int_r1, int_r2
integ_list.append(profile_.integral(int_r1, int_r2))
integ = np.abs(np.sum(integ_list))
def profile(o, x, slitpos):
return profile_(slitpos) / integ
return profile
示例10: process
# 需要导入模块: from scipy.interpolate import UnivariateSpline [as 别名]
# 或者: from scipy.interpolate.UnivariateSpline import integral [as 别名]
#.........这里部分代码省略.........
# estimate lsf
ordermap_bpixed = order_map.copy()
ordermap_bpixed[pix_mask] = 0
ordermap_bpixed[~np.isfinite(orderflat)] = 0
#
if IF_POINT_SOURCE: # if point source
x1, x2 = 800, 1200
bins, lsf_list = ap.extract_lsf(ordermap_bpixed, slitpos_map,
data_minus_flattened,
x1, x2, bins=None)
hh0 = np.sum(lsf_list, axis=0)
peak1, peak2 = max(hh0), -min(hh0)
lsf_x = 0.5*(bins[1:]+bins[:-1])
lsf_y = hh0/(peak1+peak2)
from scipy.interpolate import UnivariateSpline
lsf_ = UnivariateSpline(lsf_x, lsf_y, k=3, s=0,
bbox=[0, 1])
roots = list(lsf_.roots())
#assert(len(roots) == 1)
integ_list = []
from itertools import izip, cycle
for ss, int_r1, int_r2 in izip(cycle([1, -1]),
[0] + roots,
roots + [1]):
#print ss, int_r1, int_r2
integ_list.append(lsf_.integral(int_r1, int_r2))
integ = np.abs(np.sum(integ_list))
def lsf(o, x, slitpos):
return lsf_(slitpos) / integ
# make weight map
profile_map = ap.make_profile_map(order_map, slitpos_map, lsf)
# extract spec
s_list, v_list = ap.extract_stellar(ordermap_bpixed,
profile_map,
variance_map,
data_minus_flattened,
slitoffset_map=slitoffset_map)
# make synth_spec : profile * spectra
synth_map = ap.make_synth_map(order_map, slitpos_map,
profile_map, s_list,
slitoffset_map=slitoffset_map)
sig_map = (data_minus_flattened - synth_map)**2/variance_map
## mark sig_map > 100 as cosmicay. The threshold need to be fixed.
# reextract with new variance map and CR is rejected
variance_map_r = variance_map0 + np.abs(synth_map)/gain
variance_map2 = np.max([variance_map, variance_map_r], axis=0)
variance_map2[np.abs(sig_map) > 100] = np.nan
# extract spec
示例11: estimate
# 需要导入模块: from scipy.interpolate import UnivariateSpline [as 别名]
# 或者: from scipy.interpolate.UnivariateSpline import integral [as 别名]
def estimate(self, observedLC):
"""!
Estimate intrinsicFlux, period, eccentricity, omega, tau, & a2sini
"""
## intrinsicFluxEst
maxPeriodFactor = 10.0
model = LombScargleFast().fit(observedLC.t, observedLC.y, observedLC.yerr)
periods, power = model.periodogram_auto(nyquist_factor = observedLC.numCadences)
model.optimizer.period_range = (2.0*np.mean(observedLC.t[1:] - observedLC.t[:-1]), maxPeriodFactor*observedLC.T)
periodEst = model.best_period
numIntrinsicFlux = 100
lowestFlux = np.min(observedLC.y[np.where(observedLC.mask == 1.0)])
highestFlux = np.max(observedLC.y[np.where(observedLC.mask == 1.0)])
intrinsicFlux = np.linspace(np.min(observedLC.y[np.where(observedLC.mask == 1.0)]), np.max(observedLC.y[np.where(observedLC.mask == 1.0)]), num = numIntrinsicFlux)
intrinsicFluxList = list()
totalIntegralList = list()
for f in xrange(1, numIntrinsicFlux - 1):
beamedLC = observedLC.copy()
beamedLC.x = np.require(np.zeros(beamedLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
for i in xrange(beamedLC.numCadences):
beamedLC.y[i] = observedLC.y[i]/intrinsicFlux[f]
beamedLC.yerr[i] = observedLC.yerr[i]/intrinsicFlux[f]
dopplerLC = beamedLC.copy()
dopplerLC.x = np.require(np.zeros(dopplerLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
for i in xrange(observedLC.numCadences):
dopplerLC.y[i] = math.pow(beamedLC.y[i], 1.0/3.44)
dopplerLC.yerr[i] = (1.0/3.44)*math.fabs(dopplerLC.y[i]*(beamedLC.yerr[i]/beamedLC.y[i]))
dzdtLC = dopplerLC.copy()
dzdtLC.x = np.require(np.zeros(dopplerLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
for i in xrange(observedLC.numCadences):
dzdtLC.y[i] = 1.0 - (1.0/dopplerLC.y[i])
dzdtLC.yerr[i] = math.fabs((-1.0*dopplerLC.yerr[i])/math.pow(dopplerLC.y[i], 2.0))
foldedLC = dzdtLC.fold(periodEst)
foldedLC.x = np.require(np.zeros(foldedLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
integralSpline = UnivariateSpline(foldedLC.t[np.where(foldedLC.mask == 1.0)], foldedLC.y[np.where(foldedLC.mask == 1.0)], 1.0/foldedLC.yerr[np.where(foldedLC.mask == 1.0)], k = 3, s = None, check_finite = True)
totalIntegral = math.fabs(integralSpline.integral(foldedLC.t[0], foldedLC.t[-1]))
intrinsicFluxList.append(intrinsicFlux[f])
totalIntegralList.append(totalIntegral)
intrinsicFluxEst = intrinsicFluxList[np.where(np.array(totalIntegralList) == np.min(np.array(totalIntegralList)))[0][0]]
## periodEst
for i in xrange(beamedLC.numCadences):
beamedLC.y[i] = observedLC.y[i]/intrinsicFluxEst
beamedLC.yerr[i] = observedLC.yerr[i]/intrinsicFluxEst
dopplerLC.y[i] = math.pow(beamedLC.y[i], 1.0/3.44)
dopplerLC.yerr[i] = (1.0/3.44)*math.fabs(dopplerLC.y[i]*(beamedLC.yerr[i]/beamedLC.y[i]))
dzdtLC.y[i] = 1.0 - (1.0/dopplerLC.y[i])
dzdtLC.yerr[i] = math.fabs((-1.0*dopplerLC.yerr[i])/math.pow(dopplerLC.y[i], 2.0))
model = LombScargleFast().fit(dzdtLC.t, dzdtLC.y, dzdtLC.yerr)
periods, power = model.periodogram_auto(nyquist_factor = dzdtLC.numCadences)
model.optimizer.period_range = (2.0*np.mean(dzdtLC.t[1:] - dzdtLC.t[:-1]), maxPeriodFactor*dzdtLC.T)
periodEst = model.best_period
## eccentricityEst & omega2Est
# First find a full period going from rising to falling.
risingSpline = UnivariateSpline(dzdtLC.t[np.where(dzdtLC.mask == 1.0)], dzdtLC.y[np.where(dzdtLC.mask == 1.0)], 1.0/dzdtLC.yerr[np.where(dzdtLC.mask == 1.0)], k = 3, s = None, check_finite = True)
risingSplineRoots = risingSpline.roots()
firstRoot = risingSplineRoots[0]
if risingSpline.derivatives(risingSplineRoots[0])[1] > 0.0:
tRising = risingSplineRoots[0]
else:
tRising = risingSplineRoots[1]
# Now fold the LC starting at tRising and going for a full period.
foldedLC = dzdtLC.fold(periodEst, tStart = tRising)
foldedLC.x = np.require(np.zeros(foldedLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
# Fit the folded LC with a spline to figure out alpha and beta
fitLC = foldedLC.copy()
foldedSpline = UnivariateSpline(foldedLC.t[np.where(foldedLC.mask == 1.0)], foldedLC.y[np.where(foldedLC.mask == 1.0)], 1.0/foldedLC.yerr[np.where(foldedLC.mask == 1.0)], k = 3, s = 2*foldedLC.numCadences, check_finite = True)
for i in xrange(fitLC.numCadences):
fitLC.x[i] = foldedSpline(fitLC.t[i])
# Now get the roots and find the falling root
tZeros = foldedSpline.roots()
if tZeros.shape[0] == 1: # We have found just tFalling
tFalling = tZeros[0]
tRising = fitLC.t[0]
startIndex = 0
tFull = fitLC.t[-1]
stopIndex = fitLC.numCadences
elif tZeros.shape[0] == 2: # We have found tFalling and one of tRising or tFull
if foldedSpline.derivatives(tZeros[0])[1] < 0.0:
tFalling = tZeros[0]
tFull = tZeros[1]
stopIndex = np.where(fitLC.t < tFull)[0][-1]
tRising = fitLC.t[0]
startIndex = 0
elif foldedSpline.derivatives(tZeros[0])[1] > 0.0:
if foldedSpline.derivatives(tZeros[1])[1] < 0.0:
tRising = tZeros[0]
startIndex = np.where(fitLC.t > tRising)[0][0]
tFalling = tZeros[1]
tFull = fitLC.t[-1]
stopIndex = fitLC.numCadences
else:
raise RuntimeError('Could not determine alpha & omega correctly because the first root is rising but the second root is not falling!')
elif tZeros.shape[0] == 3:
tRising = tZeros[0]
startIndex = np.where(fitLC.t > tRising)[0][0]
tFalling = tZeros[1]
tFull = tZeros[2]
stopIndex = np.where(fitLC.t < tFull)[0][-1]
#.........这里部分代码省略.........
示例12: GeneralModel
# 需要导入模块: from scipy.interpolate import UnivariateSpline [as 别名]
# 或者: from scipy.interpolate.UnivariateSpline import integral [as 别名]
class GeneralModel(Model.Model):
def __init__(self,R,dens,rc=1,Mmax=1, G='kpc km2 / (M_sun s2)', denorm=True, use_c=False):
"""
The purpose of the general model is to start from a density law R-dens to build a galaxy model.
Attenzione per come è creato il modello assume sempre che
per R>rmax la densita sia 0, la massa resti costante al suo valore massimo e il potenziale vada
come M/r. Per modelli che raggiungono la massa massima all infinito questo potrebbe essere un problema,
quindi si dovrebbero usare modelli con massa finita o troncarli e campionarli fino a quanto la massa non raggiunge
il suo valore max. Per modelli non troncati è meglio utilizzare modelli analitici se possibile.
Anche nel calcolo del potenziale Rinf è settato uguale all ultimo punto di R, poichè cmq per R>Rmax
dens=0 e l integrale int_Rmax^inf dens r dr=0 sempre.
:param R: list of radii, it needs to be in the form r/rc
:param dens: list of dens at radii R. It can be also a function or a lambda function that depends
only on the variable R=r/rc
:param rc: Scale length of the model, the R in input will be multiplyed by rc before start all the calculation
:param Mmax: Physical Value of the Mass at Rmax (the last point of the R grid). The physical unity of dens and pot and mass
will depends on the unity of Mmax
:param G: Value of the gravitational constant G, it can be a number of a string.
If G=1, the physical value of the potential will be Phi/G.
If string it must follow the rule of the unity of the module.astropy constants.
E.g. to have G in unit of kpc3/Msun s2, the input string is 'kpc3 / (M_sun s2)'
See http://astrofrog-debug.readthedocs.org/en/latest/constants/
:param denorm: If True, the output value of mass, dens and pot will be de normalized using Mmax and G.
:param use_c: To calculate pot and mass with a C-cyle, WARNING it creates more noisy results
"""
self.rc=rc
self.Mmax=Mmax
if isinstance(G,float) or isinstance(G,int): self.G=G
else:
GG=conG.to(G)
self.G=GG.value
if isinstance(dens,list) or isinstance(dens,tuple) or isinstance(dens,np.ndarray): self.dens_arr=np.array(dens,dtype=float,order='C')
else:
self.dens_arr=dens(R)
self.R=np.array(R,dtype=float,order='C')*self.rc
self.mass_arr=np.empty_like(self.dens_arr,dtype=float,order='C')
self.pot_arr=np.empty_like(self.dens_arr,dtype=float,order='C')
self.use_c=use_c
self._use_nparray=False
self._dens=UnivariateSpline(self.R,self.dens_arr, k=1, s=0, ext=1) #for R>rmax, dens=0
if self.use_c==True:
#add to path to use relative path
dll_name='model_c_ext/GeneralModel.so'
dllabspath = os.path.dirname(os.path.abspath(__file__)) + os.path.sep + dll_name
lib = ct.CDLL(dllabspath)
#add to path to use relativ path
mass_func=lib.evalmass
mass_func.restype=None
mass_func.argtypes=[ndpointer(ct.c_double, flags="C_CONTIGUOUS"), ndpointer(ct.c_double, flags="C_CONTIGUOUS"),ct.c_int,ndpointer(ct.c_double, flags="C_CONTIGUOUS")]
mass_func(self.R,self.dens_arr,len(self.dens_arr),self.mass_arr)
self._mass_int=UnivariateSpline(self.R,self.mass_arr, k=1, s=0, ext=3) #ext=3, const for R>Rmax non ci osno piu particelle e la massa rimane uguale
pot_func=lib.evalpot
pot_func.restype=None
pot_func.argtypes=[ndpointer(ct.c_double, flags="C_CONTIGUOUS"),ndpointer(ct.c_double, flags="C_CONTIGUOUS"),ndpointer(ct.c_double, flags="C_CONTIGUOUS"),ct.c_int,ndpointer(ct.c_double, flags="C_CONTIGUOUS")]
pot_func(self.R,self.dens_arr,self.mass_arr,len(self.dens_arr),self.pot_arr)
self._pot_int=UnivariateSpline(self.R,self.pot_arr, k=1, s=0, ext=1)
else:
self._dm2=UnivariateSpline(self.R,self.R*self.R*self.dens_arr, k=2, s=0,ext=1)
self._dm=UnivariateSpline(self.R,self.R*self.dens_arr, k=1, s=0,ext=1)
#Evaluate mass and pot on the R grid in input
#mass
func=np.vectorize(self._dm2.integral)
self.mass_arr=func(0,self.R)
#pot
a=(1/self.R)*self.mass_arr
func=np.vectorize(self._dm.integral)
b=func(self.R,self.R[-1])
self.pot_arr=a+b
if denorm==True: self._set_denorm(self.Mmax)
else:
self.Mc=1
self.dc=1
self.pc=1
def _evaluatedens(self,R):
return self.dc*self._dens(R)
def _evaluatemass(self,R):
return self.Mc*self._dm2.integral(0,R)
def _evaluatemassc(self,R):
return self.Mc*self._mass_int(R)
#.........这里部分代码省略.........
示例13: len
# 需要导入模块: from scipy.interpolate import UnivariateSpline [as 别名]
# 或者: from scipy.interpolate.UnivariateSpline import integral [as 别名]
if(len(curr_data) <= 3):
curr_data = np.concatenate([curr_data, np.zeros((3,num_params))])
time = np.arange(0, len(curr_data), 1) # the sample 'times' (0 to number of samples)
acc_X = curr_data[:,0]
acc_Y = curr_data[:,1]
acc_Z = curr_data[:,2]
# fit 2nd the antiderivative
# the interpolation representation
tck_X = UnivariateSpline(time, acc_X, s=0)
# integrals
tck_X.integral = tck_X.antiderivative()
tck_X.integral_2 = tck_X.antiderivative(2)
# the interpolation representation
tck_Y = UnivariateSpline(time, acc_Y, s=0)
# integrals
tck_Y.integral = tck_Y.antiderivative()
tck_Y.integral_2 = tck_Y.antiderivative(2)
# the interpolation representation
tck_Z = UnivariateSpline(time, acc_Z, s=0)
# integrals
tck_Z.integral = tck_Z.antiderivative()
tck_Z.integral_2 = tck_Z.antiderivative(2)
开发者ID:galenwilkerson,项目名称:Handwriting-Recognition-using-acceleration-data,代码行数:33,代码来源:draw_letters.py
示例14: estimate
# 需要导入模块: from scipy.interpolate import UnivariateSpline [as 别名]
# 或者: from scipy.interpolate.UnivariateSpline import integral [as 别名]
def estimate(self, observedLC):
"""!
Estimate intrinsicFlux, period, eccentricity, omega, tau, & a2sini
"""
# fluxEst
if observedLC.numCadences > 50:
model = gatspy.periodic.LombScargleFast(optimizer_kwds={"quiet": True}).fit(observedLC.t,
observedLC.y,
observedLC.yerr)
else:
model = gatspy.periodic.LombScargle(optimizer_kwds={"quiet": True}).fit(observedLC.t,
observedLC.y,
observedLC.yerr)
periods, power = model.periodogram_auto(nyquist_factor=observedLC.numCadences)
model.optimizer.period_range = (
2.0*np.mean(observedLC.t[1:] - observedLC.t[:-1]), observedLC.T)
periodEst = model.best_period
numIntrinsicFlux = 100
lowestFlux = np.min(observedLC.y[np.where(observedLC.mask == 1.0)])
highestFlux = np.max(observedLC.y[np.where(observedLC.mask == 1.0)])
intrinsicFlux = np.linspace(np.min(observedLC.y[np.where(observedLC.mask == 1.0)]), np.max(
observedLC.y[np.where(observedLC.mask == 1.0)]), num=numIntrinsicFlux)
intrinsicFluxList = list()
totalIntegralList = list()
for f in xrange(1, numIntrinsicFlux - 1):
beamedLC = observedLC.copy()
beamedLC.x = np.require(np.zeros(beamedLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
for i in xrange(beamedLC.numCadences):
beamedLC.y[i] = observedLC.y[i]/intrinsicFlux[f]
beamedLC.yerr[i] = observedLC.yerr[i]/intrinsicFlux[f]
dopplerLC = beamedLC.copy()
dopplerLC.x = np.require(np.zeros(dopplerLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
for i in xrange(observedLC.numCadences):
dopplerLC.y[i] = math.pow(beamedLC.y[i], 1.0/3.44)
dopplerLC.yerr[i] = (1.0/3.44)*math.fabs(dopplerLC.y[i]*(beamedLC.yerr[i]/beamedLC.y[i]))
dzdtLC = dopplerLC.copy()
dzdtLC.x = np.require(np.zeros(dopplerLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
for i in xrange(observedLC.numCadences):
dzdtLC.y[i] = 1.0 - (1.0/dopplerLC.y[i])
dzdtLC.yerr[i] = math.fabs((-1.0*dopplerLC.yerr[i])/math.pow(dopplerLC.y[i], 2.0))
foldedLC = dzdtLC.fold(periodEst)
foldedLC.x = np.require(np.zeros(foldedLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
integralSpline = UnivariateSpline(
foldedLC.t[np.where(foldedLC.mask == 1.0)], foldedLC.y[np.where(foldedLC.mask == 1.0)],
1.0/foldedLC.yerr[np.where(foldedLC.mask == 1.0)], k=3, s=None, check_finite=True)
totalIntegral = math.fabs(integralSpline.integral(foldedLC.t[0], foldedLC.t[-1]))
intrinsicFluxList.append(intrinsicFlux[f])
totalIntegralList.append(totalIntegral)
fluxEst = intrinsicFluxList[
np.where(np.array(totalIntegralList) == np.min(np.array(totalIntegralList)))[0][0]]
# periodEst
for i in xrange(beamedLC.numCadences):
beamedLC.y[i] = observedLC.y[i]/fluxEst
beamedLC.yerr[i] = observedLC.yerr[i]/fluxEst
dopplerLC.y[i] = math.pow(beamedLC.y[i], 1.0/3.44)
dopplerLC.yerr[i] = (1.0/3.44)*math.fabs(dopplerLC.y[i]*(beamedLC.yerr[i]/beamedLC.y[i]))
dzdtLC.y[i] = 1.0 - (1.0/dopplerLC.y[i])
dzdtLC.yerr[i] = math.fabs((-1.0*dopplerLC.yerr[i])/math.pow(dopplerLC.y[i], 2.0))
if observedLC.numCadences > 50:
model = gatspy.periodic.LombScargleFast(optimizer_kwds={"quiet": True}).fit(dzdtLC.t,
dzdtLC.y,
dzdtLC.yerr)
else:
model = gatspy.periodic.LombScargle(optimizer_kwds={"quiet": True}).fit(dzdtLC.t,
dzdtLC.y,
dzdtLC.yerr)
periods, power = model.periodogram_auto(nyquist_factor=dzdtLC.numCadences)
model.optimizer.period_range = (2.0*np.mean(dzdtLC.t[1:] - dzdtLC.t[:-1]), dzdtLC.T)
periodEst = model.best_period
# eccentricityEst & omega2Est
# First find a full period going from rising to falling.
risingSpline = UnivariateSpline(
dzdtLC.t[np.where(dzdtLC.mask == 1.0)], dzdtLC.y[np.where(dzdtLC.mask == 1.0)],
1.0/dzdtLC.yerr[np.where(dzdtLC.mask == 1.0)], k=3, s=None, check_finite=True)
risingSplineRoots = risingSpline.roots()
firstRoot = risingSplineRoots[0]
if risingSpline.derivatives(risingSplineRoots[0])[1] > 0.0:
tRising = risingSplineRoots[0]
else:
tRising = risingSplineRoots[1]
# Now fold the LC starting at tRising and going for a full period.
foldedLC = dzdtLC.fold(periodEst, tStart=tRising)
foldedLC.x = np.require(np.zeros(foldedLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
# Fit the folded LC with a spline to figure out alpha and beta
fitLC = foldedLC.copy()
foldedSpline = UnivariateSpline(
foldedLC.t[np.where(foldedLC.mask == 1.0)], foldedLC.y[np.where(foldedLC.mask == 1.0)],
1.0/foldedLC.yerr[np.where(foldedLC.mask == 1.0)], k=3, s=2*foldedLC.numCadences,
check_finite=True)
for i in xrange(fitLC.numCadences):
fitLC.x[i] = foldedSpline(fitLC.t[i])
# Now get the roots and find the falling root
tZeros = foldedSpline.roots()
# Find tRising, tFalling, tFull, startIndex, & stopIndex via DBSCAN #######################
# Find the number of clusters
'''dbsObj = DBSCAN(eps = periodEst/10.0, min_samples = 1)
db = dbsObj.fit(tZeros.reshape(-1,1))
#.........这里部分代码省略.........
示例15: get_field_rotation_power_from_PK
# 需要导入模块: from scipy.interpolate import UnivariateSpline [as 别名]
# 或者: from scipy.interpolate.UnivariateSpline import integral [as 别名]
def get_field_rotation_power_from_PK(params, PK, chi_source, lmax=20000, acc=1, lsamp=None):
results = camb.get_background(params)
nz = int(100 * acc)
if lmax < 3000:
raise ValueError('field rotation assumed lmax > 3000')
ls = np.hstack((np.arange(2, 400, 1), np.arange(401, 2600, int(10. / acc)),
np.arange(2650, lmax, int(50. / acc)), np.arange(lmax, lmax + 1))).astype(np.float64)
# get grid of C_L(chi_s,k) for different redshifts
chimaxs = np.linspace(0, chi_source, nz)
cls = np.zeros((nz, ls.size))
for i, chimax in enumerate(chimaxs[1:]):
cl = cl_kappa_limber(results, PK, ls, nz, chimax)
cls[i + 1, :] = cl
cls[0, :] = 0
cl_chi = RectBivariateSpline(chimaxs, ls, cls)
# Get M(l,l') matrix
chis = np.linspace(0, chi_source, nz, dtype=np.float64)
zs = results.redshift_at_comoving_radial_distance(chis)
dchis = (chis[2:] - chis[:-2]) / 2
chis = chis[1:-1]
zs = zs[1:-1]
win = (1 / chis - 1 / chi_source) ** 2 / chis ** 2
w = np.ones(chis.shape)
cchi = cl_chi(chis, ls, grid=True)
M = np.zeros((ls.size, ls.size))
for i, l in enumerate(ls):
k = (l + 0.5) / chis
w[:] = 1
w[k < 1e-4] = 0
w[k >= PK.kmax] = 0
cl = np.dot(dchis * w * PK.P(zs, k, grid=False) * win / k ** 4, cchi)
M[i, :] = cl * l ** 4 # note we don't attempt to be accurate beyond lowest Limber
Mf = RectBivariateSpline(ls, ls, np.log(M))
# L sampling for output
if lsamp is None:
lsamp = np.hstack((np.arange(2, 20, 2), np.arange(25, 200, 10 // acc), np.arange(220, 1200, 30 // acc),
np.arange(1300, min(lmax // 2, 2600), 150 // acc),
np.arange(3000, lmax // 2 + 1, 1000 // acc)))
# Get field rotation (curl) spectrum.
diagm = np.diag(M)
diagmsp = UnivariateSpline(ls, diagm, s=0)
def high_curl_integrand(ll, lp):
lp = lp.astype(np.int)
r2 = (np.float64(ll) / lp) ** 2
return lp * r2 * diagmsp(lp) / np.pi
clcurl = np.zeros(lsamp.shape)
lsall = np.arange(2, lmax + 1, dtype=np.float64)
for i, ll in enumerate(lsamp):
l = np.float64(ll)
lmin = lsall[0]
lpmax = min(lmax, int(max(1000, l * 2)))
if ll < 500:
lcalc = lsall[0:lpmax - 2]
else:
# sampling in l', with denser around l~l'
lcalc = np.hstack((lsall[0:20:4],
lsall[29:ll - 200:35],
lsall[ll - 190:ll + 210:6],
lsall[ll + 220:lpmax + 60:60]))
tmps = np.zeros(lcalc.shape)
for ix, lp in enumerate(lcalc):
llp = int(lp)
lp = np.float64(lp)
if abs(ll - llp) > 200 and lp > 200:
nphi = 2 * int(min(lp / 10 * acc, 200)) + 1
elif ll > 2000:
nphi = 2 * int(lp / 10 * acc) + 1
else:
nphi = 2 * int(lp) + 1
dphi = 2 * np.pi / nphi
phi = np.linspace(dphi, (nphi - 1) / 2 * dphi, (nphi - 1) // 2) # even and don't need zero
w = 2 * np.ones(phi.size)
cosphi = np.cos(phi)
lrat = lp / l
lfact = np.sqrt(1 + lrat ** 2 - 2 * cosphi * lrat)
lnorm = l * lfact
lfact[lfact <= 0] = 1
w[lnorm < lmin] = 0
w[lnorm > lmax] = 0
lnorm = np.maximum(lmin, np.minimum(lmax, lnorm))
tmps[ix] += lp * np.dot(w, (np.sin(phi) / lfact ** 2 * (cosphi - lrat)) ** 2 *
np.exp(Mf(lnorm, lp, grid=False))) * dphi
sp = UnivariateSpline(lcalc, tmps, s=0)
clcurl[i] = sp.integral(2, lpmax - 1) * 4 / (2 * np.pi) ** 2
if lpmax < lmax:
tail = np.sum(high_curl_integrand(ll, lsall[lpmax - 2:]))
clcurl[i] += tail
#.........这里部分代码省略.........