本文整理汇总了Python中scipy.interpolate.interp1d函数的典型用法代码示例。如果您正苦于以下问题:Python interp1d函数的具体用法?Python interp1d怎么用?Python interp1d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了interp1d函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: kcorr
def kcorr(l_o, fl_o, band, z, axis=0):
'''
'''
# read in filter table
band_tab = t.Table.read('filters/{}_SDSS.res'.format(band),
names=['lam', 'f'], format='ascii')
# set up interpolator
band_interp = interp1d(x=band_tab['lam'].quantity.value,
y=band_tab['f'], fill_value=0.,
bounds_error=False)
l_o = l_o.to('AA')
l_e = l_o / (1. + z)
R_o = band_interp(l_o)
R_e = band_interp(l_e)
fl_e_ = interp1d(x=l_e, y=fl_o,
bounds_error=False, fill_value='extrapolate')
fl_o_ = interp1d(x=l_o, y=fl_o,
bounds_error=False, fill_value='extrapolate')
n = np.trapz(x=l_o,
y=(R_o * l_o * fl_o_(l_o / (1. + z))),
axis=axis)
d = np.trapz(x=l_e,
y=(R_e * l_e * fl_e_(l_e)),
axis=axis)
F = n / d
K_QR = -2.5 * np.log10(F.to('').value / (1. + z))
return K_QR
示例2: get_fn
def get_fn(data, fp):
""" Given some scores data and a false negatives rate
find the corresponding false positive rate in the ROC curve.
If the point does not exist, we will interpolate it.
"""
if fp in data.fpr:
pos = np.where(data.fpr == fp)
fnr, thr = np.mean(data.fnr[pos]), np.mean(data.thrs[pos])
else:
# Set data for interpolation
x = np.sort(data.fpr)
# Set new arange whichs includes the wanted value
xnew = np.arange(fp, x[-1])
# Interpolate the FN
y = np.sort(data.tpr)
f = interpolate.interp1d(x, y)
tpr = f(xnew)[0]
fnr = 1 - tpr
# Interpolate the threashold
y = np.sort(data.thrs)
f = interpolate.interp1d(x, y)
thr = f(xnew)[0]
print("Dado el valor de fp: {0}, el valor de fnr es: {1} y el umbral: {2} "
.format(fp, fnr, thr))
示例3: position_interpolator
def position_interpolator(background):
global positions
if not isfile(POSITIONS_DUMP_FILENAME):
def callback(event, x, y, flags, parameters):
if event == 1: #cv2.EVENT_RBUTTONDOWN:
positions.append(Coordinate(x, y))
cv2.namedWindow("Interpolator")
cv2.setMouseCallback("Interpolator", callback)
while True:
cv2.imshow("Interpolator", background.array)
if cv2.waitKey() & 0xFF == 27:
break
cv2.destroyWindow("Interpolator")
with open(POSITIONS_DUMP_FILENAME, "w") as positions_dump_file:
pickle.dump(positions, positions_dump_file)
else:
with open(POSITIONS_DUMP_FILENAME, "r") as positions_dump_file:
positions = pickle.load(positions_dump_file)
t = map(lambda i: i * STEP, range(len(positions)))
x = map(lambda p: p.x, positions)
y = map(lambda p: p.y, positions)
f_x = interpolate.interp1d(t, x, kind = "quadratic")
f_y = interpolate.interp1d(t, y, kind = "quadratic")
return PositionInterpolator(f_x, f_y)
示例4: display
def display(self,item):
self.currentFile=self.listShots.currentItem().text()
signal1=self.signalChoice1.currentText()
signal2=self.signalChoice2.currentText()
time1,data1,sampling1=readHdf5.getData(self.currentFile,signal1,self.env)
time2,data2,sampling2=readHdf5.getData(self.currentFile,signal2,self.env)
if sampling2>=sampling1:
self.data2=interp1d(time2,data2)(time1)
self.timei=time1
self.data1=data1
else:
self.data1=interp1d(time1,data1)(time2)
self.timei=time2
self.data2=data2
self.p1.clear()
self.p1.plot(self.timei,data1)
self.p2.clear()
self.p2.plot(self.timei,self.data2)
#self.p2.linkXAxis(self.p1)
self.lr1=pg.LinearRegionItem([self.timei[0],self.timei[-1]])
self.lr1.setZValue(-10)
self.lr2=pg.LinearRegionItem([self.timei[0],self.timei[-1]])
self.lr2.setZValue(-10)
self.p1.addItem(self.lr1)
self.p2.addItem(self.lr2)
self.lr1.sigRegionChanged.connect(self.updatePlot1)
self.lr2.sigRegionChanged.connect(self.updatePlot2)
示例5: loadAcorrCoeff
def loadAcorrCoeff( calFile ):
ACA_Caldata = loadtxt(calFile)
#bitDist = np.transpose(ACA_Caldata[2:10])
#analogPower = ACA_Caldata[0]
digitalPower = ACA_Caldata[1]
fitCoeff = ACA_Caldata[10:12]
return interp1d(digitalPower, fitCoeff[0], kind='cubic'), interp1d(digitalPower, fitCoeff[1], kind='cubic')
示例6: rebin_data
def rebin_data(self, grid, use_psf=True):
"""Calculates the center of mass of the grid and then
rebins so that the center pixel really is the center of the array
For this we do a 2-d interpolation on the grid
"""
a = psf_fitter.psffit(abs(grid), circle=False, rotate=1)
xcen = a[2]
ycen = a[2]
xlen, ylen = grid.shape
xval = arange(xlen)
yval = arange(ylen)
xint = interp1d(xval, self.xpos_abs)
yint = interp1d(yval, self.ypos_abs)
xintcen = self.xmax_pos-xint(xcen)
yintcen = self.ymax_pos-yint(ycen)
print self.xmax_pos, xintcen, self.ymax_pos, yintcen
f_real = interp2d(self.xpos_rel, self.ypos_rel, real(grid))
f_imag = interp2d(self.xpos_rel, self.ypos_rel, imag(grid))
xnew = self.xpos_rel - xintcen
ynew = self.ypos_rel - yintcen
recen_grid = f_real(xnew, ynew) + 1j*f_imag(xnew, ynew)
print nd.center_of_mass(abs(recen_grid))
return recen_grid
示例7: DRIVplot
def DRIVplot(folder,keys):
T = 281
APiterator = [5,10]
AP = Analysis.AnalyseFile()
P = Analysis.AnalyseFile()
if folder[0]['IVtemp'] == T:
scale = 1e6
plt.hold(True)
plt.title('NLIV in P and AP at ' + str(T) + 'K')
plt.xlabel('Current ($\mu$A)')
plt.ylabel('V$_{NL}$ ($\mu$V)')
for f in folder:
if f['iterator'] in APiterator:
AP.add_column(f.Voltage,str(f['iterator']))
else:
P.add_column(f.Voltage,str(f['iterator']))
AP.apply(func,0,replace=False,header='Mean NLVoltage')
P.apply(func,0,replace=False,header='Mean NLVoltage')
I = numpy.arange(-295e-6,295e-6,1e-6)
ap = interpolate.interp1d(f.column('Current'),AP.column('Mean NLV'))
p = interpolate.interp1d(f.column('Current'),P.column('Mean NLV'))
print P
plt.title(' ',verticalalignment='bottom')
plt.xlabel('Current ($\mu$A)')
#plt.ylabel('V$_{NL}$/|I| (V/A)')
plt.ylabel('$\Delta$V$_{NL}$/|I| (mV/A)')
plt.plot(f.column('Current')*scale,1e3*(P.column('Mean NLV')-AP.column('Mean NLV'))/abs(f.column('Current')),label =''+str(T)+ ' K')
#plt.plot(f.column('Current')*scale,1e3*(P.column('Mean NLV'))/abs(f.column('Current')),label ='P at '+str(T)+ ' K')
#plt.plot(f.column('Current')*scale,1e3*(AP.column('Mean NLV'))/abs(f.column('Current')),label ='AP at '+str(T)+ ' K')
plt.legend(loc='upper left')
else:
return 1
示例8: interpolate
def interpolate(points, lam, flux, method):
"""
NAME:
interpolate
PURPOSE:
General purpose function that can call and use various scipy.interpolate
methods. Defined for convienience.
INPUTS:
points Set of new points to get interpolated values for.
lam The wavelengths of the data points
flux The fluxes of the data points
method The method of interpolation to use. Valide values include
'interp1d:linear', 'interp1d:quadratic', and 'splrep'.
OUTPUTS:
Interpolated set of values for each corresponding input point.
EXAMPLE:
interpFlux = interpolate(interpLam, lam, flux)
"""
if method == 'interp1d:linear':
f = interp1d(lam, flux, assume_sorted = True)
return f(points)
if method == 'interp1d:quadratic':
f = interp1d(lam, flux, kind = 'quadratic', assume_sorted = True)
return f(points)
if method == 'splrep':
return splev(points, splrep(lam, flux))
raise Exception("You didn't choose a proper interpolating method")
示例9: getF1F2Params
def getF1F2Params(element = None):
"""Returns f1 and f2 scattering factors"""
alldata = np.array([])
global F1F2
with open(os.path.join(datadir, 'f1f2_Henke.dat'),'r') as infile:
for line in infile:
if line.split()[0] == '#S':
if len(alldata):
f1 = interpolate.interp1d(alldata[:,0], alldata[:,1] - thisZ)
f2 = interpolate.interp1d(alldata[:,0], alldata[:,2] * -1.)
F1F2[thisElement] = (f1, f2)
if thisElement == element:
infile.close()
return F1F2[element]
s = line.split()
thisElement = s[2]
thisZ = int(s[1])
alldata = np.array([])
elif line[0] == '#':
continue
else:
data = np.array(line.split()).astype('float32')
if not len(alldata):
alldata = data
else:
alldata = np.vstack((alldata, data))
return alldata
示例10: read_BCs
def read_BCs(log_g=LOG_G):
"""Read BCs table (Girardi 2004), compute interp1d functions at log_g."""
table_gir = at.read(model_dir+"bctab_p00.txt")
# Save relevant arrays as variables
colTeff = table_gir["Teff"]
collogg = table_gir["logg"]
colBCg = table_gir["g"]
colBCr = table_gir["r"]
colBCi = table_gir["i"]
# Only keep log_g for dwarfs
iM37g = np.where(collogg==log_g)[0]
# Compute interpolation functions
bcfuncg = interp1d(colTeff[iM37g], colBCg[iM37g], kind='linear')
bcfuncr = interp1d(colTeff[iM37g], colBCr[iM37g], kind='linear')
bcfunci = interp1d(colTeff[iM37g], colBCi[iM37g], kind='linear')
# Save the slopes separately, for computing uncertainties later
slopesBC = np.zeros((len(colTeff[iM37g]) - 1,3))
slopesBC[:,0]= np.abs(np.diff(colBCg[iM37g]) / np.diff(colTeff[iM37g]))
slopesBC[:,1]= np.abs(np.diff(colBCr[iM37g]) / np.diff(colTeff[iM37g]))
slopesBC[:,2]= np.abs(np.diff(colBCi[iM37g]) / np.diff(colTeff[iM37g]))
# Teff ranges where the interpolation functions are valid
# (for g,r,i)
teffrange = [min(colTeff[iM37g])*1.00001,max(colTeff[iM37g])*0.99999]
bcs = {"g":colBCg,"r":colBCr,"i":colBCi}
funcs = {"g":bcfuncg,"r":bcfuncr,"i":bcfunci}
slopes_dict = {"g":slopesBC[:, 0],"r":slopesBC[:, 1],"i":slopesBC[:, 2]}
teff_bins = colTeff[iM37g]
return funcs, teffrange, slopes_dict, teff_bins
示例11: read_SEDs
def read_SEDs():
"""Read SEDs table (Adam's table)."""
kh = at.read(model_dir+'kraushillenbrand5.dat')
# Save relevant arrays as variables
coltemp = kh["Teff"]
gmag = kh["Mg"]
rmag = kh["Mr"]
imag = kh["Mi"]
numrows = len(rmag)
# Interpolation functions for Teff as a function of Absolute Magnitude
gfunc = interp1d(gmag, coltemp, kind='linear')
rfunc = interp1d(rmag, coltemp, kind='linear')
ifunc = interp1d(imag, coltemp, kind='linear')
# Save the slopes separately, for computing uncertainties later
slopes = np.zeros((numrows - 1, 3))
slopes[:, 0] = np.abs(np.diff(coltemp) / np.diff(gmag))
slopes[:, 1] = np.abs(np.diff(coltemp) / np.diff(rmag))
slopes[:, 2] = np.abs(np.diff(coltemp) / np.diff(imag))
# Magnitude ranges where the interpolation functions are valid
# (for g,r,i)
magranges = {"g":[-0.39,20.98], "r":[-0.04,18.48], "i":[0.34,15.85]}
mags = {"g":gmag,"r":rmag,"i":imag}
funcs = {"g":gfunc,"r":rfunc,"i":ifunc}
slopes_dict = {"g":slopes[:, 0],"r":slopes[:, 1],"i":slopes[:, 2]}
return mags, funcs, magranges, slopes_dict
示例12: refinex2
def refinex2(x,y,tol=1e-3,maxiter=10):
from scipy.interpolate import interp1d
# assume y is appropriately normalized
print "refinex: tol=%g, maxiter=%i\n" %(tol,maxiter)
for iter in range(maxiter):
y1 = interp1d(x,y,kind="linear",axis=0)
y3 = interp1d(x,y,kind="cubic" ,axis=0)
nx,ny = y.shape
xi = 0.5*(x[:-1]+x[1:])
yi = y3(xi)
ei = abs(yi-y1(xi)).max(1)
ii = np.nonzero(ei>tol)
ix = np.arange(1,nx)
ni = len(ii[0])
print " iter %i... added %i points" %(iter,ni)
if ni>0:
x = np.insert(x,ix[ii],xi[ii])
y = np.insert(y,ix[ii],yi[ii],axis=0)
else:
break
return x,y
示例13: rainin_singlechannel_pipetting_model
def rainin_singlechannel_pipetting_model(volume):
""" Data obtained from
https://www.shoprainin.com/Pipettes/Single-Channel-Manual-Pipettes/RAININ-Classic/Rainin-Classic-Pipette-PR-10/p/17008649
Parameters
----------
volume - volume pipetted in microliters
Notes
-----
This is the pipette used for pipetting cyclohexane into octanol
Returns
-------
Expected Inaccuracy, Imprecision
"""
imprecision_function = interp1d(
[1.0, 5.0, 10.0], # volume range (uL)
[0.012, 0.006, 0.004]) # relative imprecision for these volumes from rainin website
inaccuracy_function = interp1d(
[1.0, 5.0, 10.0], # volume range (uL)
[0.025, 0.015, 0.01]) # relative inaccuracy for these volumes from rainin website
return [inaccuracy_function(volume), imprecision_function(volume)]
示例14: rainin_multichannel_pipetting_model
def rainin_multichannel_pipetting_model(volume):
""" Data obtained from
https://www.shoprainin.com/Pipettes/Multichannel-Manual-Pipettes/Pipet-Lite-XLS%2B/Pipet-Lite-Multi-Pipette-L8-200XLS%2B/p/17013805
Parameters
----------
volume - volume pipetted in microliters
Notes
-----
This is the pipette used for pipetting octanol for the cyclohexane dilution into octanol.
Returns
-------
Expected Inaccuracy, Imprecision
"""
imprecision_function = interp1d(
[20.0, 100.0, 200.0], # volume range (uL)
[0.01, 0.0025,0.0015]) # relative imprecision for these volumes from rainin website
inaccuracy_function = interp1d(
[20.0, 100.0, 200.0], # volume range (uL)
[0.025, 0.008, 0.008]) # relative inaccuracy for these volumes from rainin website
return [inaccuracy_function(volume), imprecision_function(volume)]
示例15: test_closeness_nest_lsodar
def test_closeness_nest_lsodar(self):
# Compare models to the LSODAR implementation.
simtime = 100.
# get lsodar reference
lsodar = np.loadtxt(os.path.join(path, 'test_aeif_data_lsodar.dat')).T
V_interp = interp1d(lsodar[0, :], lsodar[1, :])
w_interp = interp1d(lsodar[0, :], lsodar[2, :])
# create the neurons and devices
neurons = {model: nest.Create(model, params=aeif_param)
for model in models}
multimeters = {model: nest.Create("multimeter") for model in models}
# connect them and simulate
for model, mm in iter(multimeters.items()):
nest.SetStatus(mm, {"interval": self.resol,
"record_from": ["V_m", "w"]})
nest.Connect(mm, neurons[model])
nest.Simulate(simtime)
# relative differences: interpolate LSODAR to match NEST times
mm0 = next(iter(multimeters.values()))
nest_times = nest.GetStatus(mm0, "events")[0]["times"]
reference = {'V_m': V_interp(nest_times), 'w': w_interp(nest_times)}
rel_diff = self.compute_difference(multimeters, aeif_param, reference,
['V_m', 'w'])
self.assert_pass_tolerance(rel_diff, di_tolerances_lsodar)