本文整理汇总了Python中numpy.genfromtxt函数的典型用法代码示例。如果您正苦于以下问题:Python genfromtxt函数的具体用法?Python genfromtxt怎么用?Python genfromtxt使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了genfromtxt函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: compare
def compare(mooseCsv, nrnCsv):
mooseData = None
nrnData = None
with open(mooseCsv, "r") as f:
mooseTxt = f.read().split("\n")
mooseHeader, mooseData = mooseTxt[0].split(","), np.genfromtxt(mooseTxt[1:],
delimiter=',').T
with open(nrnCsv, "r") as f:
nrnTxt = f.read().split("\n")
nrnHeader, nrnData = nrnTxt[0].split(','), 1e-3*np.genfromtxt(nrnTxt[1:],
delimiter=',').T
nrnTimeVec, nrnData = nrnData[0], nrnData[1:]
mooseTimeVec, mooseData = mooseData[0], mooseData[1:]
for i, comptName in enumerate(nrnHeader[1:]):
if i == 1:
break
nrnComptName = comptName.replace("table_", "")
mooseComptId, mooseComptName = get_index(nrnComptName, mooseHeader[1:])
print("%s %s- moose equivalent %s %s" % (i, nrnComptName, mooseComptId,
mooseComptName))
pylab.plot(mooseTimeVec, mooseData[ mooseComptId ], label = "Neuron: " + nrnComptName)
pylab.plot(nrnTimeVec, nrnData[i], label = "MOOSE: " + mooseComptName)
pylab.legend(loc='best', framealpha=0.4)
pylab.show()
示例2: setUp
def setUp(self):
"""
"""
# Read initial dataset
filename = os.path.join(self.BASE_DATA_PATH,
'completeness_test_cat.csv')
test_data = np.genfromtxt(filename, delimiter=',', skip_header=1)
# Create the catalogue A
self.catalogueA = Catalogue.make_from_dict(
{'year': test_data[:,3], 'magnitude': test_data[:,17]})
# Read initial dataset
filename = os.path.join(self.BASE_DATA_PATH,
'recurrence_test_cat_B.csv')
test_data = np.genfromtxt(filename, delimiter=',', skip_header=1)
# Create the catalogue A
self.catalogueB = Catalogue.make_from_dict(
{'year': test_data[:,3], 'magnitude': test_data[:,17]})
# Read the verification table A
filename = os.path.join(self.BASE_DATA_PATH,
'recurrence_table_test_A.csv')
self.true_tableA = np.genfromtxt(filename, delimiter = ',')
# Read the verification table A
filename = os.path.join(self.BASE_DATA_PATH,
'recurrence_table_test_B.csv')
self.true_tableB = np.genfromtxt(filename, delimiter = ',')
示例3: learn
def learn(tuned_parameters,model):
# produceFeature(trainfile)
dataset = genfromtxt(open('Data/'+trainfile,'r'), delimiter=',',dtype='f8')[0:]
target = [x[0] for x in dataset]
train = [x[1:] for x in dataset]
# print train[1:10]
# print target
# print len(train)
# produceFeature(testfile)
test = genfromtxt(open('Data/'+testfile,'r'),delimiter=',',dtype='f8')[0:]
test_target = [x[1:] for x in test]
# X, y = digits.data, digits.target
trainnp = np.asarray(train)
targetnp = np.asarray(target)
# turn the data in a (samples, feature) matrix:
X, y = trainnp, targetnp
# X = digits.images.reshape((n_samples, -1))
# y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(model, tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
开发者ID:evanslight,项目名称:Exploring-Twitter-Sentiment-Analysis-and-the-Weather,代码行数:60,代码来源:Sentimentanalysis_parameter_gridsearch.py
示例4: make_dtopo
def make_dtopo():
'''
Make geoclaw dtopo file
'''
from numpy import genfromtxt,zeros
#Run params
f='/Users/dmelgarm/Research/Slip_Inv/tohoku_tsunami/'
stafile='tohoku.sta'
dlon=0.033333
dlat=0.033333
dt=5
stat_or_dyn='s'
#Get station list
sta=genfromtxt(f+'data/station_info/'+stafile,usecols=0,dtype='S4')
s=genfromtxt(f+'data/station_info/'+stafile,usecols=[1,2])
lon=s[:,0]
lat=s[:,1]
if stat_or_dyn.lower()=='s':
n=zeros(len(sta))
e=n.copy()
u=n.copy()
for ksta in range(len(sta)):
print ksta
neu=genfromtxt(f+'output/forward_models/'+str(sta[ksta])+'.static.neu')
n[ksta]=neu[0]
e[ksta]=neu[1]
u[ksta]=neu[2]
print neu[2]
示例5: wide_dataset_large
def wide_dataset_large():
print("Reading in Arcene training data for binomial modeling.")
trainDataResponse = np.genfromtxt(pyunit_utils.locate("smalldata/arcene/arcene_train_labels.labels"), delimiter=' ')
trainDataResponse = np.where(trainDataResponse == -1, 0, 1)
trainDataFeatures = np.genfromtxt(pyunit_utils.locate("smalldata/arcene/arcene_train.data"), delimiter=' ')
xtrain = np.transpose(trainDataFeatures).tolist()
ytrain = trainDataResponse.tolist()
trainData = h2o.H2OFrame.fromPython([ytrain]+xtrain)
trainData[0] = trainData[0].asfactor()
print("Run model on 3250 columns of Arcene with strong rules off.")
model = H2OGeneralizedLinearEstimator(family="binomial", lambda_search=False, alpha=1)
model.train(x=range(1,3250), y=0, training_frame=trainData)
print("Test model on validation set.")
validDataResponse = np.genfromtxt(pyunit_utils.locate("smalldata/arcene/arcene_valid_labels.labels"), delimiter=' ')
validDataResponse = np.where(validDataResponse == -1, 0, 1)
validDataFeatures = np.genfromtxt(pyunit_utils.locate("smalldata/arcene/arcene_valid.data"), delimiter=' ')
xvalid = np.transpose(validDataFeatures).tolist()
yvalid = validDataResponse.tolist()
validData = h2o.H2OFrame.fromPython([yvalid]+xvalid)
prediction = model.predict(validData)
print("Check performance of predictions.")
performance = model.model_performance(validData)
print("Check that prediction AUC better than guessing (0.5).")
assert performance.auc() > 0.5, "predictions should be better then pure chance"
示例6: read
def read(input_file="POSITIONS.OUT"):
""" Reads a geometry """
m = np.genfromtxt(input_file).transpose()
g = geometry() # cretae geometry
g.dimensionality = 0
g.x = m[0]
g.y = m[1]
g.x = g.x - sum(g.x)/len(g.x) # normalize
g.y = g.y - sum(g.y)/len(g.y) # normalize
g.z = m[2]
g.xyz2r() # create r coordinates
try: lat = np.genfromtxt("LATTICE.OUT") # read lattice
except: lat = None
try: # two dimensional
g.a1 = np.array([lat[0,0],lat[0,1],0.0])
g.a2 = np.array([lat[1,0],lat[1,1],0.0])
g.dimensionality=2
return g
except: pass
try:
g.celldis = lat
g.dimensionality = 1
return g
except: pass
g.dimensionality = 0
return g
示例7: test_dtype_with_object
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """
1; 2001-01-01
2; 2002-01-31
"""
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(StringIO.StringIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array([(1, datetime(2001,1,1)), (2, datetime(2002,1,31))],
dtype=ndtype)
assert_equal(test, control)
#
ndtype = [('nest', [('idx', int), ('code', np.object)])]
try:
test = np.genfromtxt(StringIO.StringIO(data), delimiter=";",
dtype=ndtype, converters=converters)
except NotImplementedError:
pass
else:
errmsg = "Nested dtype involving objects should be supported."
raise AssertionError(errmsg)
示例8: plotme
def plotme(typeid,num,h):
yMax = '6'
cuibmFolder = '/scratch/src/cuIBM'
caseFolder = cuibmFolder + '/validation/error/cylinder/'+typeid+num
validationData = cuibmFolder + '/validation-data/cylinderRe40-KL95.txt'
print caseFolder+'/forces'
my_data = genfromtxt(caseFolder+'/forces',dtype=float,delimiter='\t')
time = [my_data[i][0] for i in xrange(1,len(my_data))]
force = [my_data[i][1]*2 for i in xrange(1,len(my_data))]
validation_data = genfromtxt(validationData, dtype=float, delimiter='\t')
validation_time=[validation_data[i][0]*0.5 for i in xrange(1,len(validation_data))]
validation_force=[validation_data[i][1] for i in xrange(1,len(validation_data))]
plt.plot(validation_time,validation_force, 'o', color = 'red', markersize = 8, label = 'Koumoutsakos and Leonard, 1995')
plt.plot(time,force, '-', color='blue', linewidth=2, label='Present Work')
plt.title('Flow over an impulsively started cylinder at Reynolds number 40')
plt.legend(loc='upper right', numpoints=1, fancybox=True)
plt.xlabel('Non-dimensional time')
plt.ylabel('Drag Coefficient')
plt.xlim([0,3])
plt.ylim([0,int(yMax)])
plt.savefig('/scratch/src/cuIBM/validation/error/cylinder/'+typeid+h+'.pdf')
plt.clf()
示例9: co
def co():
import numpy as np
import os
home = os.path.expanduser('~')
band = raw_input('Select the band:')
if band == 'pacs':
upper, lower = 200, 54
if band == 'spire':
upper, lower = 671, 200
level = np.genfromtxt(home+'/data/co_level.txt', dtype='str')
ref = np.genfromtxt(home+'/data/co_ref.txt','str')
for i in range(0,len(level[0:])):
for j in range(0, len(ref[0:])):
if ref[j,1] == level[i,0]:
ref[j,0] = level[i,2]
ref[j,1] = level[i,3]
if ref[j,2] == level[i,0]:
ref[j,2] = level[i,3]
c = 2.998e8
ref[:,4] = c/ref[:,4].astype(float)/1e9*1e6
ref = ref[np.argsort(ref[:,4].astype(float))]
ref_sort = ref
dummy = np.copy(ref[:,0])
ref_sort[:,0],ref_sort[:,1],ref_sort[:,2],ref_sort[:,3],ref_sort[:,4] = ref[:,1],ref[:,2],ref[:,4],ref[:,3],ref[:,5]
ref_sort[:,5] = dummy
ind = np.where((ref_sort[:,2].astype(float) >= lower) & (ref_sort[:,2].astype(float) <= upper))
slt_trans = ref_sort[ind,:]
print slt_trans
print len(slt_trans[0,:,0])
foo = open(home+'/data/co_ref_sort.txt','w')
np.savetxt(foo,ref_sort, fmt='%s')
foo.close()
示例10: load
def load(self):
# load data
values = []
if verbose: print()
if verbose: print("visualization: loading chains ...")
f = "prob-chain0.dump"
if not os.path.exists(f):
raise Exception("visualization: chains not available yet.")
try:
# I think the first column is the probabilities, the second is without prior
probabilities = numpy.genfromtxt(f, skip_footer=1, dtype='f')[:,0]
except Exception as e:
raise Exception("visualization: chains couldn't be loaded; perhaps no data yet: " + str(e))
for p in self.params:
f = "%s-chain-0.prob.dump" % p['name']
if verbose: print(" loading chain %s" % f)
if not os.path.exists(f):
raise Exception("visualization: chains not available yet.")
try:
v = numpy.genfromtxt(f, skip_footer=1, dtype='f')
except Exception as e:
raise Exception("visualization: chains couldn't be loaded; perhaps no data yet: " + str(e))
values.append(v)
nvalues = min(map(len, values))
if verbose: print("visualization: loading chains finished; %d values" % nvalues)
self.values = [v[:nvalues][-self.nlast::nevery] for v in values]
self.probabilities = probabilities[:nvalues][-self.nlast::nevery]
示例11: read_gf_from_txt
def read_gf_from_txt(block_txtfiles, block_name):
"""
Read a GfReFreq from text files with the format (w, Re(G), Im(G)) for a single block.
Notes
-----
A BlockGf must be constructed from multiple GfReFreq objects if desired.
The mesh must be the same for all files read in.
Non-uniform meshes are not supported.
Parameters
----------
block_txtfiles: Rank 2 square np.array(str) or list[list[str]]
The text files containing the GF data that need to read for the block.
e.g. [['up_eg1.dat']] for a one-dimensional block and
[['up_eg1_1.dat','up_eg2_1.dat'],
['up_eg1_2.dat','up_eg2_2.dat']] for a 2x2 block.
block_name: str
Name of the block.
Returns
-------
g: GfReFreq
The real frequency Green's function read in.
"""
block_txtfiles = np.array(block_txtfiles) # Must be an array to use certain functions
N1,N2 = block_txtfiles.shape
mesh = np.genfromtxt(block_txtfiles[0,0],usecols=[0]) # Mesh needs to be the same for all blocks
g = GfReFreq(indices=range(N1),window=(np.min(mesh),np.max(mesh)),n_points=len(mesh),name=block_name)
for i,j in product(range(N1),range(N2)):
data = np.genfromtxt(block_txtfiles[i,j],usecols=[1,2])
g.data[:,i,j] = data[:,0] + 1j*data[:,1]
return g
示例12: load_hop
def load_hop(s, hop=hop_script_path):
"""
Loads the hop catalog for the given RAMSES snapshot. If the
catalog doesn't exist, it tries to run hop to create one via the
'script_hop.sh' script found in the RAMSES distribution. The hop
output should be in a 'hop' directory in the base directory of the
simulation.
**Input**:
*s* : loaded RAMSES snapshot
**Optional Keywords**:
*hop* : path to `script_hop.sh`
"""
if s.filename[-1] == '/' :
name = s.filename[-6:-1]
filename = s.filename[:-13]+'hop/grp%s.pos'%name
else:
name = s.filename[-5:]
filename = s.filename[:-12]+'hop/grp%s.pos'%name
try :
data = np.genfromtxt(filename,unpack=True)
except IOError :
import os
dir = s.filename[:-12] if len(s.filename[:-12]) else './'
os.system('cd %s;/home/itp/roskar/ramses/galaxy_formation/script_hop.sh %d;cd ..'%(dir,int(name)))
data = np.genfromtxt(filename,unpack=True)
return data
示例13: main
def main():
print "Solve small matrix..."
R = array([0, 0, 1, 1, 1, 2, 2])
C = array([0, 1, 0, 1, 2, 1, 2])
V = array([4.0, -1.0, -1.0, 4.0, -1.0, -1.0, 4.0])
b = array([3.0, 2.0, 3.0])
A = coo_matrix((V, (R, C)), shape=(3, 3))
# convert to csr format for efficiency
x = spsolve(A.tocsr(), b)
print "x = ", x
print "Solve psd matrix..."
# skip the first row (n, nnz)
A = numpy.genfromtxt('../data/psd.txt', skiprows=1)
b = numpy.genfromtxt('../data/b.txt')
coo = coo_matrix((A[:, 2], (A[:, 0], A[:, 1])))
x = spsolve(coo.tocsr(), b)
print 'x = ', x
print "Solve big matrix..."
A = numpy.genfromtxt('../data/mat_helmholtz.txt', skiprows=1)
coo = coo_matrix((A[:, 2], (A[:, 0], A[:, 1])))
n = coo.shape[0]
b = numpy.ones(n)
x = spsolve(coo.tocsr(), b)
print 'x = ', x
示例14: main
def main(options):
freq_range=range(options["from"], options["to"]+1)
gt_file=gzip.open(options["gt_file"], "r")
pos_file=gzip.open(options["pos_file"], "r")
out_haps=gzip.open(options["out_root"]+"/haps.gz", "w")
out_haps_fn=[gzip.open(options["out_root"]+"/haps.f"+str(x)+".gz", "w") for x in freq_range]
out_samples=open(options["out_root"]+"/samples.txt", "w")
gt=np.genfromtxt(gt_file, delimiter=1)
pos=np.genfromtxt(pos_file)
pos=np.floor(pos*options["chr_len"]).astype(int)
gt=gt.transpose().astype(int)
# This is because on some platforms the np.genfromtxt tries to import the line endings...
gt=gt[range(len(pos)),]
(nsnp,nind)=gt.shape
ACs=np.sum(gt, axis=1)
MACs=np.minimum(ACs, nind-ACs)
for i in range(nsnp):
out_haps.write(("\t".join(["%d"]*(nind+1))+"\n")%((pos[i],)+tuple(gt[i,])))
if MACs[i]>=options["from"] and MACs[i]<= options["to"]:
idx=MACs[i]-options["from"]
out_haps_fn[idx].write(("\t".join(["%d"]*(nind+1))+"\n")%((pos[i],)+tuple(gt[i,])))
for i in range(int(nind/2)):
out_samples.write("SIM%d\n"%(i+1,))
for fil in [gt_file, pos_file, out_haps]+out_haps_fn:
fil.close()
示例15: main
def main():
trainset = np.genfromtxt(open('train.csv','r'), delimiter=',')[1:]
X = np.array([x[1:8] for x in trainset])
y = np.array([x[8] for x in trainset])
#print X,y
import math
for i, x in enumerate(X):
for j, xx in enumerate(x):
if(math.isnan(xx)):
X[i][j] = 26.6
testset = np.genfromtxt(open('test.csv','r'), delimiter = ',')[1:]
test = np.array([x[1:8] for x in testset])
for i, x in enumerate(test):
for j, xx in enumerate(x):
if(math.isnan(xx)):
test[i][j] = 26.6
X, test = decomposition_pca(X, test)
bdt = AdaBoostClassifier(base_estimator = KNeighborsClassifier(n_neighbors=20, algorithm = 'auto'), algorithm="SAMME", n_estimators = 200)
bdt.fit(X, y)
print 'PassengerId,Survived'
for i, t in enumerate(test):
print '%d,%d' % (i + 892, int(bdt.predict(t)[0]))