本文整理汇总了Python中scipy.integrate.trapz函数的典型用法代码示例。如果您正苦于以下问题:Python trapz函数的具体用法?Python trapz怎么用?Python trapz使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了trapz函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: trapez_moms
def trapez_moms(x_arg, y_arg, mom):
""" Returns numerically integrated moments """
if mom == 0:
ret = trapz(y_arg, x_arg)
else:
ret = trapz(y_arg * x_arg**mom, x_arg)
return ret
示例2: dbltrapz
def dbltrapz(f, x, y):
""" Perform a double trapezon integration of a vectorized function
f(x,y) where x is an array of x values and y is an array of y
values to perform the integral over.
Implementation taken from:
http://mail.scipy.org/pipermail/scipy-user/2011-February/028592.html
I don't really understand it, but it seems to work...
For example, if we wanted to integrate some really random function
f(x,y) = e^(-x^2*y)
from 1<x<5 and 1<y<10.
Using matheamtica, we find that the integral is ~0.09:
N[Integrate[Integrate[Exp[-(x^2*y)], {x, 1, 5}], {y, 1, 10}], 20]
Using our new function.
>>> import numpy as np
>>> f=lambda x,y: np.exp(-(x**2*y))
>>> int=dbltrapz(f, np.linspace(1,5,1000), np.linspace(1,10,1000))
>>> print np.allclose(int,0.089071862226039234609, rtol=1e-5, atol=1e-5)
True
"""
yy = y[:,np.newaxis]
xx = x[np.newaxis,:]
integrand=f(xx,yy)
return integrate.trapz(integrate.trapz(integrand, yy, axis=0), x, axis=0)
示例3: cPofZ
def cPofZ(self, arr, zx):
## hardcoding zmax for the time being, should fix it
zmax = 1.5
Ng = len(arr)
dz = 0.001
if not hasattr(self, "cnorm"):
Nx = int(zmax / dz)
xar = np.linspace(0, zmax, Nx)
rect = np.zeros((Ng, Nx))
for i, z in enumerate(xar):
rect[:, i] = self.PofZ(arr, float(z), dz) / dz
self.cnorm = trapz(rect, xar, axis=1)
# for floats
if type(zx) == type(0.1):
Nx = int(zx / dz)
xar = np.linspace(0, zx, Nx)
rect = np.zeros((Ng, Nx))
for i, z in enumerate(xar):
rect[:, i] = self.PofZ(arr, float(z), dz) / dz
unnormC = trapz(rect, xar, axis=1)
else:
# for arrays
zxm = zx.max()
Nx = int(zxm / dz)
xar = np.linspace(0, zxm, Nx)
rect = np.zeros((Ng, Nx))
for i, z in enumerate(xar):
rect[:, i] = self.PofZ(arr, float(z), dz) / dz
rect[np.where(zx > z), i] = 0.0
unnormC = trapz(rect, xar, axis=1)
return unnormC / self.cnorm
示例4: get_SZ
def get_SZ(self, psd, geometry):
"""
Compute the scattering matrices for the given PSD and geometries.
Returns:
The new amplitude (S) and phase (Z) matrices.
"""
if (self._S_table is None) or (self._Z_table is None):
raise AttributeError(
"Initialize or load the scattering table first.")
if (not isinstance(psd, PSD)) or self._previous_psd != psd:
self._S_dict = {}
self._Z_dict = {}
psd_w = psd(self._psd_D)
for geom in self.geometries:
self._S_dict[geom] = \
trapz(self._S_table[geom] * psd_w, self._psd_D)
self._Z_dict[geom] = \
trapz(self._Z_table[geom] * psd_w, self._psd_D)
self._previous_psd = psd
return (self._S_dict[geometry], self._Z_dict[geometry])
示例5: SF_SD
def SF_SD(m, wavelength, dp, ndp, minAngle=0, maxAngle=180, angularResolution=0.5, space='theta', angleMeasure='radians', normalization=None):
# http://pymiescatt.readthedocs.io/en/latest/forward.html#SF_SD
_steps = int(1+(maxAngle-minAngle)/angularResolution)
ndp = coerceDType(ndp)
dp = coerceDType(dp)
SL = np.zeros(_steps)
SR = np.zeros(_steps)
SU = np.zeros(_steps)
kwargs = {'minAngle':minAngle,
'maxAngle':maxAngle,
'angularResolution':angularResolution,
'space':space,
'normalization':None}
for n,d in zip(ndp,dp):
measure,l,r,u = ScatteringFunction(m,wavelength,d,**kwargs)
SL += l*n
SR += r*n
SU += u*n
if normalization in ['n','N','number','particles']:
_n = trapz(ndp,dp)
SL /= _n
SR /= _n
SU /= _n
elif normalization in ['m','M','max','MAX']:
SL /= np.max(SL)
SR /= np.max(SR)
SU /= np.max(SU)
elif normalization in ['t','T','total','TOTAL']:
SL /= trapz(SL,measure)
SR /= trapz(SR,measure)
SU /= trapz(SU,measure)
return measure,SL,SR,SU
示例6: integrate
def integrate(self, t0, tf, weights=None):
"""Integrates the data contained in the integrator from t0 to
tf, inclusive. Applies weights to the data according to function
weights(times - t0)
"""
if t0 < self.times[0] or tf > self.times[-1]:
return None
interp = spi.interp1d(x=np.squeeze(self.times),
y=np.squeeze(self.vals), axis=0)
# TODO Clean up using bisect
istart = next(i for i, x in enumerate(self.times) if x > t0)
ifinal = next((i for i, x in enumerate(self.times) if x > tf), -1)
times = [t0] + self.times[istart:ifinal] + [tf]
ref = [interp(t0)] + self.vals[istart:ifinal] + [interp(tf)]
times = np.array(times)
ref = np.asarray(ref)
if weights is not None:
w = weights(times - t0)
ref = ref * w
z = spt.trapz(y=w, x=times)
else:
z = 1.0
return spt.trapz(y=ref, x=times) / z
示例7: compute_expected_coordinates1D
def compute_expected_coordinates1D(grid, x0):
"""
Computes expected value and variance of a 1D grid along its (only) axis.
:param grid: 1D grid to integrate
:param x0: coordinates of all points along the integration axis
:type grid: numpy array
:type x0: numpy array
:rtype: float
:returns:
* exp_x0 : expected value
* var_x0 : variance
"""
# normalize 1D grid
grid_integral = compute_integral1D(grid, x0)
prob_x0 = grid / grid_integral
# get 1D marginals, expected values and variances
exp_x0 = si.trapz(x0*prob_x0, x=x0)
var_x0 = si.trapz((x0-exp_x0)*(x0-exp_x0)*prob_x0, x=x0)
return exp_x0, var_x0
示例8: get_angular_integrated
def get_angular_integrated(self, psd, geometry, property_name):
if self._angular_table is None:
raise AttributeError(
"Initialize or load the table of angular-integrated " +
"quantities first."
)
psd_w = psd(self._psd_D)
def sca_xsect(geom):
return trapz(
self._angular_table["sca_xsect"][geom] * psd_w,
self._psd_D
)
if property_name == "sca_xsect":
sca_prop = sca_xsect(geometry)
elif property_name == "ext_xsect":
sca_prop = trapz(
self._angular_table["ext_xsect"][geometry] * psd_w,
self._psd_D
)
elif property_name == "asym":
sca_xsect_int = sca_xsect(geometry)
if sca_xsect_int > 0:
sca_prop = trapz(
self._angular_table["asym"][geometry] * \
self._angular_table["sca_xsect"][geometry] * psd_w,
self._psd_D
)
sca_prop /= sca_xsect_int
else:
sca_prop = 0.0
return sca_prop
示例9: _ZZlk
def _ZZlk(self):
hist = np.zeros(self.result_bins)
histLB = np.zeros(self.result_bins)
self.ZZ_phist = [ np.zeros(self.prob_pts) for i in range(4) ]
for i in range(self.blocks):
pp00,pp01,pp10 = \
np.mgrid[i*self.prob_pts/self.blocks:(i+1)*self.prob_pts/self.blocks,\
0:self.prob_pts,0:self.prob_pts]/float(self.prob_pts)
plk = self.diag_likelihood(self.N_ZZ, pp00,pp01,pp10)
hist += np.histogram(pp01+pp10, bins=self.result_bins,
range=(0,1), density=False, weights=plk)[0]
histLB += np.histogram(pp01+pp10-2*np.sqrt(pp00*(1.-pp00-pp01-pp10)),
bins=self.result_bins, range=(0,1), density=False, weights=plk)[0]
for i,pp in enumerate([pp00,pp01,pp10,1.-pp00-pp01-pp10]):
self.ZZ_phist[i] += np.histogram(pp, bins=self.prob_pts,
range=(0,1), density=False, weights=plk)[0]
I = integrate.trapz(hist, dx=self.ZZ_dx)
hist /= I
ILB = integrate.trapz(histLB, dx=self.ZZ_dx)
histLB /= ILB
for h in self.ZZ_phist:
Ip = integrate.trapz(h, dx=1./self.prob_pts)
h /= Ip
return hist, histLB
示例10: draw_reabsorption
def draw_reabsorption(self, columns, reabsorption=1):
try:
f = plt.figure(self.graphnumber_reab)
plt.xlabel('Wavelengths, nm')
plt.ylabel('Intensity, a.u.')
Xe = self.data['Xe']
Ec = self.data['Ec']
Ec = Ec / integrate.trapz(Ec, dx=self.step_E)
Ed = self.data['Ed']
Ed = Ed / integrate.trapz(Ed, dx=self.step_E) * reabsorption
if len(Xe) > len(Ec):
Ec = np.append(Ec, np.zeros(len(Xe)-len(Ec)))
if len(Xe) > len(Ed):
Ed = np.append(Ed, np.zeros(len(Xe)-len(Ed)))
plt.plot(Xe, Ec)
plt.plot(Xe, Ed)
f.canvas.set_window_title('Reabsorption (' + str(reabsorption) + '; Xe: Ec, Ed)')
plt.title('Xe: Ec, Ed')
f.show()
self.graphnumber_reab += 1
self.logger.info('Reabsorption calculation finished')
except:
self.logger.info('No reabsortion graph could be drawn')
示例11: integration
def integration(self,is_quadrant=0,use_flux_per_mrad2_or_mm2=1):
if (self.X is None or self.Y is None):
raise Exception(" X and Y must be array for integration")
if self.X.shape != self.Y.shape:
raise Exception(" X and Y must have the same shape")
if len(self.X.shape)==2 :
X = np.linspace(self.X.min(), self.X.max(), self.X.shape[0])
Y = np.linspace(self.Y.min(), self.Y.max(), self.Y.shape[1])
res1=integrate.trapz(self.intensity, X)
res = integrate.trapz(res1, Y)
# res = integrate.simps(integrate.simps(self.intensity, X), Y)
# res = self.intensity.sum() * (self.X[1,0] - self.X[0,0]) * (self.Y[0,1] - self.X[0,0]) # regular grid only
else : # X and Y are 1d array
if len(self.X) == 1:
res = self.intensity[0]
else: # choix arbitraire
XY = np.zeros_like(self.X)
for i in range(1, len(self.X)):
XY[i] = XY[i-1]+np.sqrt((self.X[i] - self.X[i-1]) * 2 + (self.Y[i] - self.Y[i-1]) ** 2)
res = np.trapz(self.intensity, XY)
# Note that the value of flux is in phot/s/0.1%bw/mrad2 (or .../mm2) and
# our grid is in rad (or m), therefore we must account for this:
if use_flux_per_mrad2_or_mm2:
res *= 1e6
# in case the calculation is for a quadrant, the integral is four times the calculated value
if is_quadrant:
res *= 4
return res
示例12: calculate_variance
def calculate_variance(beta):
"""
This function calculates variance of curve beta
:param beta: numpy ndarray of shape (2,M) of M samples
:rtype: numpy ndarray
:return variance: variance
"""
n, T = beta.shape
betadot = gradient(beta, 1. / (T - 1))
betadot = betadot[1]
normbetadot = zeros(T)
centroid = calculatecentroid(beta)
integrand = zeros((n, n, T))
t = linspace(0, 1, T)
for i in range(0, T):
normbetadot[i] = norm(betadot[:, i])
a1 = (beta[:, i] - centroid)
a1 = a1.reshape((n, 1))
integrand[:, :, i] = a1.dot(a1.T) * normbetadot[i]
l = trapz(normbetadot, t)
variance = trapz(integrand, t, axis=2)
variance /= l
return (variance)
示例13: calculate_energy
def calculate_energy(alphadot, T=100, k=5):
"""
calculates energy along path
:param alphadot: numpy ndarray of shape (2,M) of M samples
:param T: Number of samples of curve (Default = 100)
:param k: number of samples along path (Default = 5)
:rtype: numpy scalar
:return E: energy
"""
integrand1 = zeros((k, T))
integrand2 = zeros(k)
for i in range(0, k):
for j in range(1, T):
tmp = alphadot[:, j, i].T
integrand1[i, j] = tmp.dot(alphadot[:, j, i])
integrand2[i] = trapz(integrand1[i, :], linspace(0, 1, T))
E = 0.5 * trapz(integrand2, linspace(0, 1, k))
return E
示例14: read_intensity
def read_intensity(self, freq=[0.0], t_start=0.0, t_end=100.0):
'''
Calculates the intensity of a certain frequency in the FID. It calculates
the Fourier coefficents for that specific frequency.
Parameters
----------
freq (list, float): List of frequencies for which the intensity should be
calculated (Frequencies in MHz)
Returns
-------
intensity (1D-array, float): Array of the intensities
Notes
-----
none
'''
if len(self.fid) > 0:
fid = slice_fid(self.fid, t_start, t_end)
intensity = np.array([])
for x in freq:
cos2 = np.cos(fid[:,0]*x*1.E6*2.*np.pi)
sin2 = np.sin(fid[:,0]*x*1.E6*2.*np.pi)
a = inte.trapz(cos2*fid[:,1], fid[:,0]) / abs(fid[-1,0]) * 2.
b = inte.trapz(sin2*fid[:,1], fid[:,0]) / abs(fid[-1,0]) * 2.
intensity = np.hstack((intensity, np.sqrt(a**2 + b**2)))
return intensity
else:
return np.array([])
示例15: calculate_MOC_initial_condition
def calculate_MOC_initial_condition(number_of_classes, U=0.177, D=0.03):
data = genfromtxt("paper_data/re6400/upstream.csv")
# Data provided are diameters in mm. We need to covert to volume
mm2m = 0.001
v_data = pi / 6 * (data[:, 0] * mm2m)**3
p_data = data[:, 1]
p_interpolated = interp1d(
v_data, p_data, kind='nearest', fill_value=0, bounds_error=False)
# From Galinat we know that phase volumetric fraction is 1.7%-3%
alpha = (0.03 + 0.017) / 2
A = pi / 4 * D**2
Vc = 1.0 * A # Assume unit height column
# Estimate the total number from total concentration
v = linspace(0, 1.2 * max(v_data), number_of_classes + 1)
dv = v[1]
p_empirical = p_interpolated(v) / trapz(p_interpolated(v), x=v)
cdf_empirical = cumsum(p_empirical) * dv # Cumulative distribution
meanVd = trapz(v * p_empirical, x=v) # Dispersed phase mean volume
N0 = alpha * Vc / meanVd # Number of drops in 1m column
# print("Total number of drops in unit column {0:0.0f}".format(N0))
# print("Particle influx will be: {0:0.2f} drops/s".format(U * N0))
# print(U * N0 * meanVd / (U * A))
return dv, clip(
N0 * (cdf_empirical[1:] - cdf_empirical[:-1]),
a_min=1e-4, a_max=None
)