本文整理汇总了Python中pylab.genfromtxt函数的典型用法代码示例。如果您正苦于以下问题:Python genfromtxt函数的具体用法?Python genfromtxt怎么用?Python genfromtxt使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了genfromtxt函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: showSAXSProfiles
def showSAXSProfiles(exp_data, model_data):
#Read experimental data
#Read model data
#Plot data
from matplotlib import pyplot;
from pylab import genfromtxt;
mat0 = genfromtxt(exp_data);
mat1 = genfromtxt(model_data);
pyplot.plot(mat0[:,0], mat0[:,1], label = "Experimental");
pyplot.plot(mat1[:,0], mat1[:,1], label = "Model");
pyplot.legend();
pyplot.show();
示例2: parseResults
def parseResults(files):
''' Reads all of the results files and puts them into a list with the
results. Returns field, dither, fiber, and redshift.
'''
r = []
for f in files:
print f
cluster, field, dither = f.split('_')
data = pyl.genfromtxt(f, delimiter='\t', names=True, dtype=None)
try:
for fiber, z, Q in zip(data['Fiber'], data['Redshift'],
data['Quality']):
if Q == 0:
r.append((field, 'D' + str(dither.rstrip('.results')),
fiber, z))
except TypeError:
fiber = int(data['Fiber'])
z = float(data['Redshift'])
Q = int(data['Quality'])
if Q == 0:
r.append(
(field, 'D' + str(dither.rstrip('.results')), fiber, z))
print len(r), 'objects read'
return r
示例3: draw_plot
def draw_plot(self):
global LAST_CALL, LAST_MODIFIED_DATE, IMAGE_BUFFER
data_filename = "stats.txt"
try:
mtime = os.path.getmtime(data_filename)
except OSError:
mtime = 0
modified_date = datetime.fromtimestamp(mtime)
if LAST_CALL == self.path and modified_date == LAST_MODIFIED_DATE:
IMAGE_BUFFER.seek(0)
return IMAGE_BUFFER
LAST_CALL = self.path
LAST_MODIFIED_DATE = modified_date
data = pylab.genfromtxt(data_filename , delimiter=',', dtype=int)
y_data = data[:, 0]
x_data = data[:, 1]
if self.op == 'game':
y_data = y_data[-self.game_count:]
x_data = x_data[-self.game_count:]
pylab.plot(x_data, y_data, '-')
# pylab.show()
IMAGE_BUFFER = io.BytesIO()
pylab.savefig(IMAGE_BUFFER, format='png')
IMAGE_BUFFER.seek(0)
# pylab.legend()
# pylab.title("Title of Plot")
# pylab.xlabel("X Axis Label")
# pylab.ylabel("Y Axis Label")
pylab.close()
return IMAGE_BUFFER
示例4: main
def main():
is_transparent = False
f = open("pi_data.txt","r")
# this is a little different than normal becase of the complex data for the floquet stability
# multipliers. When we use the "dtype" option we get a single array of tuples so slicing is a
# little more awkward has to look like data[#][#] to get a single value NOT data[#,#].
data = pl.genfromtxt(f,comments="e",dtype="complex,complex,float")
eigs1 = pl.array([])
eigs2 = pl.array([])
A = pl.array([])
for i,j in enumerate(data):
eigs1 = pl.append(eigs1,j[0])
eigs2 = pl.append(eigs2,j[1])
A = pl.append(A,j[2])
fig1, ax1 = pl.subplots(2,2,sharex=True)
ax1[0,0].plot(A,[k.real for k in eigs1],color = "Black")
ax1[1,0].plot(A,[k.imag for k in eigs1],color = "Black")
ax1[0,1].plot(A,[k.real for k in eigs2],color = "Black")
ax1[1,1].plot(A,[k.imag for k in eigs2],color = "Black")
ax1[0,0].set_ylabel("Re[$\lambda_1$]",fontsize=25)
ax1[1,0].set_ylabel("Im[$\lambda_1$]",fontsize=25)
ax1[0,1].set_ylabel("Re[$\lambda_2$]",fontsize=25)
ax1[1,1].set_ylabel("Im[$\lambda_2$]",fontsize=25)
ax1[1,0].set_xlabel("$A$",fontsize=25)
ax1[1,1].set_xlabel("$A$",fontsize=25)
fig1.tight_layout()
fig1.savefig("paper_A_vs_eigs.png",dpi=300,transparent=is_transparent)
os.system("open paper_A_vs_eigs.png")
示例5: readSparseSystem
def readSparseSystem (filename):
"""
Convert's iSAM library's output (when printing sparse
matrices) to a scipy sparse matrix
Returns
-------
a sparse COO matrix, the Cholesky factor R of the
information matrix
"""
f = open (filename, 'r')
line = f.readline ()
f.close ()
dimStr = re.search ('[0-9]+x[0-9]+', line).group (0)
dim = int (dimStr.split ('x')[0])
data = pl.genfromtxt (filename)
data = data[1:,:]
rows = data[:,0].astype (int)
cols = data[:,1].astype (int)
vals = data[:,2]
R = scipy.sparse.coo_matrix ((vals, (rows, cols)), shape=(dim,dim))
return R
示例6: main
def main():
f = open("final_position.txt","r")
data = pl.genfromtxt(f,comments = "L")
# need to get every other
x = pl.array([])
y = pl.array([])
for i,j in enumerate(data[:-7,2]):
if i%4 == 0:
x = pl.append(x,data[i,4])
y = pl.append(y,j)
print(x)
print(y)
fit = np.polyfit(x,y,2)
print(fit)
#fited = fit[0]+fit[1]*x + fit[2]*x**2
fited = np.poly1d(fit)
print(fited)
#pl.plot(pl.append(x,[.262,.264,.266]),fited(pl.append(x,[.262,.264,.266])),color="black")
pl.scatter(x,y,color = "black")
pl.xlabel("$A$",fontsize="30")
pl.ylabel("$x$",fontsize="30")
pl.savefig("fin_pts.png",transparent=True,dpi=300)
os.system("open fin_pts.png")
示例7: main
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--dir", action="store", dest="dir", type=str, required=True)
inargs = parser.parse_args()
os.chdir(os.path.expanduser("~/Data/EC/4DBlock/Old/" + inargs.dir))
os.mkdir("Last")
b_num = get_b_num()
os.system("cp info.txt Last/")
all = os.listdir(".")
for i, j in enumerate(all):
if "poindat" in j:
curfile = open(j, "r")
to_file = open("Last/" + j, "w")
to_file.write(curfile.readline())
to_file.write(curfile.readline())
cur_dat = pl.genfromtxt(curfile, comments="Z")
for l in range(b_num):
to_file.write(
str(cur_dat[-b_num + l, 0])
+ " "
+ str(cur_dat[-b_num + l, 1])
+ " "
+ str(cur_dat[-b_num + l, 2])
+ " "
+ str(cur_dat[-b_num + l, 3])
)
to_file.write("\n")
to_file.close()
curfile.close()
示例8: sheat_vs_tempature
def sheat_vs_tempature(ancl):
if 'ssheat_data.txt' not in os.listdir('.'):
print('Need specific heat data')
os.system('say Need specific heat data')
if 'temp_granular_sliced.txt' not in os.listdir('.'):
print('Need granular tempature data')
os.system('say Need granular tempature data')
tempature_file = open('temp_granular_sliced.txt','r')
sheat_file = open('ssheat_data.txt','r')
# first line is labels
tempature_labels = tempature_file.readline()
tempature_plotting_data = pl.genfromtxt(tempature_file)
tempature_arr = tempature_plotting_data[:,1]
# first line is labels
sheat_labels = sheat_file.readline()
sheat_plotting_data = pl.genfromtxt(sheat_file)
#first column sweep variables
var_arr = sheat_plotting_data[:,0]
# evergy_stuff is next coulumn
delta_E_sqrd = sheat_plotting_data[:,1]
s_heat_arr = sheat_plotting_data[:,2]
fig = pl.figure()
ax = fig.add_subplot(111)
# form of errorbar(x,y,xerr=xerr_arr,yerr=yerr_arr)
pl.scatter(tempature_arr,s_heat_arr,c='k')
#pl.errorbar(var_arr,averages_2,yerr=std_arr,c='b',ls='none',fmt='o')
ax.set_xlabel(r'T_g',fontsize=30)
ax.set_ylabel('Specific heat per particle',fontsize=20)
fig.tight_layout()
fig.savefig('T_vs_s_heat.png',dpi=300)
pl.close(fig)
print('\a')
os.system('say finnished plotting tempature against specific heat')
示例9: spatio_temporal
def spatio_temporal(ancl):
os.mkdir('SpatioTemporalVels')
print('RIGHT NOW THIS IS ONLY FOR VX!!!!!!!')
p_arr = pl.arange(0,ancl.N)
# How many cycles do we want to look at?
how_many = 10
var_arr = pl.array([])
for i,j in enumerate(os.listdir('.')):
if 'poindat.txt' not in j:
continue
print('working on file ' + j)
poin_num = int(j[:j.find('p')])
cur_file = open(j,'r')
cur_sweep_var = float(cur_file.readline().split()[-1])
cur_data=pl.genfromtxt(cur_file)
cur_file.close()
var_arr = pl.append(var_arr,cur_sweep_var)
count = 0
grid = cur_data[-int(how_many*2.0*pl.pi/ancl.dt):,:ancl.N]
# in 1D because particles never cross eachother we can order them in the images to mathch
# their physical order.
grid_ordered = pl.zeros(pl.shape(grid))
# can just use the initial conditions to figure out where each is
init_x = cur_data[0,ancl.N:2*ancl.N]
sorted_x = sorted(init_x)
for a,alpha in enumerate(sorted_x):
for b,beta in enumerate(init_x):
if alpha == beta:
grid_ordered[:,a]=grid[:,b]
print('shape of grid_ordered: ' + str(pl.shape(grid_ordered)))
fig = pl.figure()
ax = fig.add_subplot(111)
# form of errorbar(x,y,xerr=xerr_arr,yerr=yerr_arr)
ax.imshow(grid_ordered,interpolation="nearest", aspect='auto')
ax.set_xlabel('Particle',fontsize=30)
#ax.set_aspect('equal')
ax.set_ylabel(r'$ t $',fontsize=30)
fig.tight_layout()
fig.savefig('SpatioTemporalVels/%(number)04d.png'%{'number':poin_num})
pl.close(fig)
示例10: __init__
def __init__(self, xml_tree):
self.root = xml_tree.getroot()
self.misc = self.root.find('Misc')
self.duration = self.misc.find('Duration')
self.final_t = float(self.duration.attrib['Time'])
self.pressure = self.misc.find('Pressure')
self.pressure_avg = float(self.pressure.attrib['Avg'])
self.pressure_tensor = self.pressure.find('Tensor')
stream = io.BytesIO()
stream.write(self.pressure_tensor.text)
stream.seek(0)
self.pressure_tensor = pylab.genfromtxt(stream)
示例11: get_fit
def get_fit(which):
f = open("final_position.txt","r")
data = pl.genfromtxt(f,comments = "L")
if which=="x":
datnum = 2
if which=="vx":
datnum = 0
x = pl.array([])
y = pl.array([])
for i,j in enumerate(data[:-7,datnum]):
if i%2 == 0:
x = pl.append(x,data[i,4])
y = pl.append(y,j)
fit = pl.polyfit(x,y,2)
fitted = pl.poly1d(fit)
return fitted
示例12: loadPlayByPlay
def loadPlayByPlay(csvfile, vbose=0):
skeys = ['game_id','type','playerName','posTeam','awayTeam','homeTeam']
ikeys = ['seas','igame_id','dwn','ytg','yfog','yds']
fkeys = []
lines = [l.strip() for l in open(csvfile).readlines()]
hd = lines[0]
ks = hd.split(',')
dt = []
for k in ks:
# postgres copy to file makes headers lower-case; this is a kludge
if k=='playername':
k = 'playerName'
elif k=='posteam':
k = 'posTeam'
elif k=='away_team':
k = 'awayTeam'
elif k=='awayteam':
k = 'awayTeam'
elif k=='home_team':
k = 'homeTeam'
elif k=='hometeam':
k = 'homeTeam'
if k in skeys:
tmp = (k, 'S16')
elif k in ikeys:
tmp = (k, 'i4')
else:
tmp = (k, 'f8')
if vbose>=1:
print k, tmp
dt.append(tmp)
dt = pylab.dtype(dt)
data = pylab.genfromtxt(csvfile, dtype=dt, delimiter=',', skip_header=1)
return data
示例13: readCsv
def readCsv(ifile):
skeys = ['date', 'homeTeam', 'awayTeam', 'game_id','player','posteam','oldstate','newstate']
ikeys = ['seas','igame_id','yds']
fkeys = []
dt = []
lines = [l.strip() for l in open(ifile).readlines()]
hd = lines[0]
ks = hd.split(',')
for k in ks:
if k in skeys:
tmp = (k, 'S64')
elif k in ikeys:
tmp = (k, 'i4')
elif k in fkeys:
tmp = (k, 'f4')
else:
tmp = (k, 'f8')
dt.append(tmp)
dt = pylab.dtype(dt)
data = pylab.genfromtxt(ifile, dtype=dt, skip_header=1, delimiter=',')
return data
示例14: main
def main(cluster):
cluster = cluster + '_r_mosaic.fits'
f = pyl.figure(1, figsize=(8, 8))
gc = aplpy.FITSFigure(cluster, north=True, figure=f)
gc.show_grayscale(stretch='arcsinh')
gc.set_theme('publication')
gc.set_tick_labels_format(xformat='hh:mm:ss', yformat='dd:mm:ss')
#gc.set_tick_labels_size('small')
data = pyl.genfromtxt('./../analysis_all/redshifts/' +\
cluster.split('_')[0]+'_redshifts.csv', delimiter=',', names=True,
dtype=None)
try:
# filter out the specz's
x = pyl.isnan(data['Specz'])
# draw the specz's
gc.show_markers(data['ra'][~x], data['dec'][~x], edgecolor='#ffbf00',
facecolor='none', marker='D', s=200)
except ValueError:
print 'no Speczs found'
# draw observed but not redshifted
x = data['Q'] == 2
gc.show_markers(data['ra'][x], data['dec'][x], edgecolor='#a60628',
facecolor='none', marker='s', s=150)
# draw redshifted
x = (data['Q'] == 0) | (data['Q'] == 1)
gc.show_markers(data['ra'][x], data['dec'][x], edgecolor='#188487',
facecolor='none', marker='o', s=150)
pyl.tight_layout()
pyl.show()
示例15: crunchZfile
def crunchZfile(f,aCol,sCol,bCol,normFactor):
'''
Takes a zAveraged... data file generated from the crunchData
function of this library and produces the arithmetic mean
as well as the standard error from all seeds. The error
is done through the propagation of errors as:
e = sqrt{ \sum_k (c_k e_k)^2 } where e_k are the individual
seed's standard errors and c_k are the weighting coefficients
obeying \sum_k c_k = 1.
'''
avgs,stds,bins = pl.genfromtxt(f, usecols=(aCol, sCol, bCol),
unpack=True, delimiter=',')
# get rid of any items which are not numbers..
# this is some beautiful Python juju.
bins = bins[pl.logical_not(pl.isnan(bins))]
stds = stds[pl.logical_not(pl.isnan(stds))]
avgs = avgs[pl.logical_not(pl.isnan(avgs))]
# normalize data.
stds *= normFactor
avgs *= normFactor
weights = bins/pl.sum(bins)
avgs *= weights
stds *= weights # over-estimates error bars
stds *= stds
avg = pl.sum(avgs)
stdErr = pl.sum(stds)
stdErr = stdErr**0.5
return avg, stdErr