本文整理汇总了Python中Stoner.Data.column_headers方法的典型用法代码示例。如果您正苦于以下问题:Python Data.column_headers方法的具体用法?Python Data.column_headers怎么用?Python Data.column_headers使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Stoner.Data
的用法示例。
在下文中一共展示了Data.column_headers方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: norm_group
# 需要导入模块: from Stoner import Data [as 别名]
# 或者: from Stoner.Data import column_headers [as 别名]
def norm_group(pos,_,**kargs):
"""Takes the drain current for each file in group and builds an analysis file and works out the mean drain"""
if "signal" in kargs:
signal=kargs["signal"]
else:
signal="fluo"
lfit=kargs["lfit"]
rfit=kargs["rfit"]
posfile=Data()
posfile.metadata=pos[0].metadata
posfile=posfile&pos[0].column(0)
posfile.column_headers=['Energy']
for f in pos:
print(str(f["run"])+str(f.find_col(signal)))
posfile=posfile&f.column(signal)
posfile.add_column(lambda r:np.mean(r[1:]),"mean drain")
ec=posfile.find_col('Energy')
md=posfile.find_col('mean drain')
linearfit=scipy.poly1d(posfile.polyfit(ec,md,1,lambda x,y:lfit[0]<=x<=lfit[1]))
posfile.add_column(lambda r:r[md]-linearfit(r[ec]),'minus linear')
highend=posfile.mean('minus',lambda r:rfit[0]<=r[ec]<=rfit[1])
ml=posfile.find_col('minus linear')
posfile.add_column(lambda r:r[ml]/highend,"normalised")
if "group_key" in kargs:
posfile[kargs["group_key"]]=pos.key
return posfile
示例2: hist
# 需要导入模块: from Stoner import Data [as 别名]
# 或者: from Stoner.Data import column_headers [as 别名]
def hist(im, *args, **kargs):
"""Pass through to :py:func:`matplotlib.pyplot.hist` function."""
counts, edges = np.histogram(im.ravel(), *args, **kargs)
centres = (edges[1:] + edges[:-1]) / 2
new = Data(np.column_stack((centres, counts)))
new.column_headers = ["Intensity", "Frequency"]
new.setas = "xy"
return new
示例3: profile_line
# 需要导入模块: from Stoner import Data [as 别名]
# 或者: from Stoner.Data import column_headers [as 别名]
def profile_line(img, src=None, dst=None, linewidth=1, order=1, mode="constant", cval=0.0, constrain=True, **kargs):
"""Wrapper for sckit-image method of the same name to get a line_profile.
Parameters:
img(ImageArray): Image data to take line section of
src, dst (2-tuple of int or float): start and end of line profile. If the co-ordinates
are given as intergers then they are assumed to be pxiel co-ordinates, floats are
assumed to be real-space co-ordinates using the embedded metadata.
linewidth (int): the wideth of the profile to be taken.
order (int 1-3): Order of interpolation used to find image data when not aligned to a point
mode (str): How to handle data outside of the image.
cval (float): The constant value to assume for data outside of the image is mode is "constant"
constrain (bool): Ensure the src and dst are within the image (default True).
Returns:
A :py:class:`Stoner.Data` object containing the line profile data and the metadata from the image.
"""
scale = img.get("MicronsPerPixel", 1.0)
r, c = img.shape
if src is None and dst is None:
if "x" in kargs:
src = (kargs["x"], 0)
dst = (kargs["x"], r)
if "y" in kargs:
src = (0, kargs["y"])
dst = (c, kargs["y"])
if isinstance(src, float):
src = (src, src)
if isinstance(dst, float):
dst = (dst, dst)
dst = _scale(dst, scale)
src = _scale(src, scale)
if not istuple(src, int, int):
raise ValueError("src co-ordinates are not a 2-tuple of ints.")
if not istuple(dst, int, int):
raise ValueError("dst co-ordinates are not a 2-tuple of ints.")
if constrain:
fix = lambda x, mx: int(round(sorted([0, x, mx])[1]))
r, c = img.shape
src = list(src)
src = (fix(src[0], r), fix(src[1], c))
dst = (fix(dst[0], r), fix(dst[1], c))
result = measure.profile_line(img, src, dst, linewidth, order, mode, cval)
points = measure.profile._line_profile_coordinates(src, dst, linewidth)[:, :, 0]
ret = Data()
ret.data = points.T
ret.setas = "xy"
ret &= np.sqrt(ret.x ** 2 + ret.y ** 2) * scale
ret &= result
ret.column_headers = ["X", "Y", "Distance", "Intensity"]
ret.setas = "..xy"
ret.metadata = img.metadata.copy()
return ret
示例4:
# 需要导入模块: from Stoner import Data [as 别名]
# 或者: from Stoner.Data import column_headers [as 别名]
#Now get the section of the data file that has the peak positions
# This is really doing the hard work
# We differentiate the data using a Savitsky-Golay filter with a 5 point window fitting quartics.
# This has proved most succesful for me looking at some MdV data.
# We then threshold for zero crossing of the derivative
# And check the second derivative to see whether we like the peak as signficant. This is the significance parameter
# and seems to be largely empirical
# Finally we interpolate back to the complete data set to make sure we get the angle as well as the counts.
d.lmfit(ExponentialModel,result=True,replace=False,header="Envelope")
d.subtract("Counts","Envelope",replace=False,header="peaks")
d.setas="xy"
sys.exit()
t=Data(d.interpolate(d.peaks(significance=sensitivity,width=8,poly=4)))
t.column_headers=copy(d.column_headers)
d%='peaks'
t%='peaks'
d.setas="xy"
d.labels[d.find_col('Angle')]=r"Reflection Angle $\theta$"
t.del_rows(0, lambda x,y: x<critical_edge)
t.setas="xy"
t.template.fig_width=7.0
t.template.fig_height=5.0
t.plot(fmt='go', plotter=pyplot.semilogy)
main_fig=d.plot(figure=t.fig, plotter=pyplot.semilogy)
d.show()
#Now convert the angle to sin^2
t.apply(lambda x: np.sin(np.radians(x[0]/2.0))**2, 0,header=r"$sin^2\theta$")
# Now create the m^2 order
m=np.arange(len(t))+fringe_offset
示例5: plane
# 需要导入模块: from Stoner import Data [as 别名]
# 或者: from Stoner.Data import column_headers [as 别名]
def plane(coord, a, b, c):
"""Function to define a plane"""
return c - (coord[0] * a + coord[1] * b)
coeefs = [1, -0.5, -1]
col = linspace(-10, 10, 6)
X, Y = meshgrid(col, col)
Z = plane((X, Y), *coeefs) + normal(size=X.shape, scale=7.0)
d = Data(
column_stack((X.ravel(), Y.ravel(), Z.ravel())),
filename="Fitting a Plane",
setas="xyz",
)
d.column_headers = ["X", "Y", "Z"]
d.figure(projection="3d")
d.plot_xyz(plotter="scatter")
popt, pcov = d.curve_fit(plane, [0, 1], 2, result=True)
d.setas = "xy.z"
d.plot_xyz(linewidth=0, cmap=cmap.jet)
txt = "$z=c-ax+by$\n"
txt += "\n".join([d.format("plane:{}".format(k), latex=True) for k in ["a", "b", "c"]])
ax = plt.gca(projection="3d")
ax.text(15, 5, -50, txt)
d.draw()
示例6:
# 需要导入模块: from Stoner import Data [as 别名]
# 或者: from Stoner.Data import column_headers [as 别名]
data.del_rows(isnan(data.y))
#Normalise data on y axis between +/- 1
data.normalise(base=(-1.,1.), replace=True)
#Swap x and y axes around so that R is x and T is y
data=~data
#Curve fit a straight line, using only the central 90% of the resistance transition
data.curve_fit(linear,bounds=lambda x,r:-threshold<x<threshold,result=True,p0=[7.0,0.0]) #result=True to record fit into metadata
#Plot the results
data.setas[-1]="y"
data.subplot(1,len(r_cols),i+1)
data.plot(fmt=["k.","r-"])
data.annotate_fit(linear,x=-1.,y=7.3c,fontsize="small")
data.title="Ramp {}".format(data[iterator][0])
row.extend([data["linear:intercept"],data["linear:intercept err"]])
data.tight_layout()
result+=np.array(row)
result.column_headers=["Ramp","Sample 4 R","dR","Sample 7 R","dR"]
result.setas="xyeye"
result.plot(fmt=["k.","r."])
示例7: print
# 需要导入模块: from Stoner import Data [as 别名]
# 或者: from Stoner.Data import column_headers [as 别名]
for s in fldr.groups: # Fit each FMR spectra
subfldr = fldr[s]
subfldr.metadata["Field Sign"] = s
print("s={}".format(s))
result = []
for ix, res in enumerate(subfldr.each.iter(do_fit)):
result.append(res)
data, headers = zip(*result)
new_data = data[0]
for r in data[1:]:
new_data = append(new_data, r, axis=0)
result = Data(new_data)
result.column_headers = headers[0]
# Now plot all the fits
subfldr.plots_per_page = 6 # Plot results
subfldr.plot(figsize=(8, 8), extra=extra)
# Work with the overall results
result.setas(y="H_res", e="H_res.stderr", x="Freq")
result.y = result.y / mu_0 # Convert to A/m
result.e = result.e / mu_0
resfldr += result # Stash the results
# Merge the two field signs into a single file, taking care of the error columns too
result = resfldr[0].clone