本文整理汇总了Python中Stoner.Data.mean方法的典型用法代码示例。如果您正苦于以下问题:Python Data.mean方法的具体用法?Python Data.mean怎么用?Python Data.mean使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Stoner.Data
的用法示例。
在下文中一共展示了Data.mean方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: norm_group
# 需要导入模块: from Stoner import Data [as 别名]
# 或者: from Stoner.Data import mean [as 别名]
def norm_group(pos,_,**kargs):
"""Takes the drain current for each file in group and builds an analysis file and works out the mean drain"""
if "signal" in kargs:
signal=kargs["signal"]
else:
signal="fluo"
lfit=kargs["lfit"]
rfit=kargs["rfit"]
posfile=Data()
posfile.metadata=pos[0].metadata
posfile=posfile&pos[0].column(0)
posfile.column_headers=['Energy']
for f in pos:
print(str(f["run"])+str(f.find_col(signal)))
posfile=posfile&f.column(signal)
posfile.add_column(lambda r:np.mean(r[1:]),"mean drain")
ec=posfile.find_col('Energy')
md=posfile.find_col('mean drain')
linearfit=scipy.poly1d(posfile.polyfit(ec,md,1,lambda x,y:lfit[0]<=x<=lfit[1]))
posfile.add_column(lambda r:r[md]-linearfit(r[ec]),'minus linear')
highend=posfile.mean('minus',lambda r:rfit[0]<=r[ec]<=rfit[1])
ml=posfile.find_col('minus linear')
posfile.add_column(lambda r:r[ml]/highend,"normalised")
if "group_key" in kargs:
posfile[kargs["group_key"]]=pos.key
return posfile
示例2: Analysis_test
# 需要导入模块: from Stoner import Data [as 别名]
# 或者: from Stoner.Data import mean [as 别名]
class Analysis_test(unittest.TestCase):
"""Path to sample Data File"""
datadir=path.join(pth,"sample-data")
def setUp(self):
self.d1=Data(path.join(self.datadir,"OVF1.ovf"))
self.d2=Data(path.join(self.datadir,"TDI_Format_RT.txt"))
self.d3=Data(path.join(self.datadir,"New-XRay-Data.dql"))
self.d4=Data(np.column_stack([np.ones(100),np.ones(100)*2]),setas="xy")
def test_functions(self):
#Test section:
self.s1=self.d1.section(z=(12,13))
self.assertTrue(142.710<self.d2.mean("Temp")<142.711,"Failed on the mean test.")
self.assertTrue(round(self.d2.span("Temp")[0],1)==4.3 and round(self.d2.span("Temp")[1],1)==291.6,"Span test failed.")
f=self.d2.split(lambda r:r["Temp"]<150)
self.assertTrue(len(f[0])==838,"Split failed to work.")
self.assertEqual(len(self.d3.threshold(2000,rising=True,falling=True,all_vals=True)),5,"Threshold failure.")
self.d4.add(0,1,"Add")
self.d4.subtract(1,0,header="Subtract")
self.d4.multiply(0,1,header="Multiply")
self.d4.divide(0,1,header="Divide")
self.d4.diffsum(0,1,header="Diffsum")
self.assertTrue(np.all(self.d4[0]==np.array([-0.5,-1,-3,3,-1,2])),"Test column ops failed.")
d=Data(np.zeros((100,1)))
d.add(0,1.0)
self.assertEqual(np.sum(d[:,0]),100.,"Add with a flot didn't work")
d.add(0,np.ones(100))
self.assertEqual(np.sum(d[:,0]),200.,"Add with an array failed.")
def test_peaks(self):
d=self.d3.clone
d.peaks(width=8,poly=4,significance=100,modify=True)
self.assertEqual(len(d),11,"Failed on peaks test.")
def test_threshold(self):
#set up some zigzag data
#mins at 0,100,200,300,400, max at 50, 150, 250, 350 and zeros in between
ar = np.zeros((400,2))
ar[:,0]=np.arange(0,len(ar))
for i in range(4):
ar[i*100:i*100+50,1] = np.linspace(-1,1,50)
for i in range(4):
ar[i*100+50:i*100+100,1] = np.linspace(1,-1,50)
d = Data(ar, setas='xy')
self.assertTrue(len(d.threshold(0,rising=True,falling=False,all_vals=True)==4))
self.assertTrue(len(d.threshold(0,rising=False,falling=True,all_vals=True)==4))
self.assertTrue(len(d.threshold(0,interpolate=False,rising=False,falling=True,all_vals=True)==4))
self.assertTrue(d.threshold(0,all_vals=True)[1]==124.5)
self.thresh=d
self.assertTrue(np.sum(d.threshold([0.0,0.5,1.0])-np.array([[24.5,36.74999999, 49.]]))<1E-6,"Multiple threshold failed.")
self.assertAlmostEqual(d.threshold(0,interpolate=False,all_vals=True)[1],124.5,6,"Threshold without interpolation failed.")
result=d.threshold(0,interpolate=False,all_vals=True,xcol=False)
self.assertTrue(np.allclose(result,np.array([[ 24.5, 0. ],[124.5, 0. ],[224.5, 0. ],[324.5, 0. ]])),
"Failed threshold with False scol - result was {}".format(result))
def test_apply(self):
self.app=Data(np.zeros((100,1)),setas="y")
self.app.apply(lambda r:r.i[0],header="Counter")
def calc(r,omega=1.0,k=1.0):
return np.sin(r.y*omega)
self.app.apply(calc,replace=False,header="Sin",_extra={"omega":0.1},k=1.0)
self.app.apply(lambda r:r.__class__([r[1],r[0]]),replace=True,header=["Index","Sin"])
self.app.setas="xy"
self.assertAlmostEqual(self.app.integrate(),-64.1722191259037,msg="Integrate after aplies failed.")
示例3: enumerate
# 需要导入模块: from Stoner import Data [as 别名]
# 或者: from Stoner.Data import mean [as 别名]
filename = '../sample-data/6221-Lockin-DAQ Temperature Control !0001.txt'
t_col=": T2" # Temperature column label
r_cols=("Sample 4::R","Sample 7::R") #Resistance Column Labels
iterator="iterator" #Temperature ramp iteration column label
threshold=0.85 #Fraction of transition to fit to
data=Data(filename) #Use FALSE to get a dialog box to the file containing Tc data
#Define my working x and y axes
#Split one file into a folder of two files by the iterator column
fldr=data.split(iterator)
result=Data()
for data in fldr: #For each iteration ramp in the Tc data
row=[data.mean(iterator)]
data.figure(figsize=(8,4))
for i,r_col in enumerate(r_cols):
data.setas(x=t_col,y=r_col)
data.del_rows(isnan(data.y))
#Normalise data on y axis between +/- 1
data.normalise(base=(-1.,1.), replace=True)
#Swap x and y axes around so that R is x and T is y
data=~data
#Curve fit a straight line, using only the central 90% of the resistance transition
data.curve_fit(linear,bounds=lambda x,r:-threshold<x<threshold,result=True,p0=[7.0,0.0]) #result=True to record fit into metadata
#Plot the results