本文整理汇总了Python中numpy.recfromtxt函数的典型用法代码示例。如果您正苦于以下问题:Python recfromtxt函数的具体用法?Python recfromtxt怎么用?Python recfromtxt使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了recfromtxt函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: check_n_in_aper
def check_n_in_aper(radius_factor=1, k=100):
for catfile in find_files(bcdphot_out_path, "*_combined_hdr_catalog.txt"):
print
print catfile
names = open(catfile).readline().split()[1:]
cat = np.recfromtxt(catfile, names=names)
xscfile = catfile.replace('combined_hdr_catalog.txt','2mass_xsc.tbl')
print xscfile
names = open(xscfile).read().split('\n')[76].split('|')[1:-1]
xsc = np.recfromtxt(xscfile, skip_header=80, names=names)
n_in_aper = []
coords = radec_to_coords(cat.ra, cat.dec)
kdt = KDT(coords)
for i in range(xsc.size):
r_deg = xsc.r_ext[i]/3600.
idx, ds = spherematch2(xsc.ra[i], xsc.dec[i], cat.ra, cat.dec,
kdt, tolerance=radius_factor*r_deg, k=k)
n_in_aper.append(ds.size)
for i in [(i,n_in_aper.count(i)) for i in set(n_in_aper)]:
print i
示例2: read_data
def read_data(data_files):
x_files, y_files = data_files
XX, Y = None, None
for x_file in x_files:
print 'Reading..', x_file
sys.stdout.flush()
X_subject = np.recfromtxt(x_file, delimiter=',')
if XX is None:
XX = X_subject
else:
XX = np.concatenate((XX, X_subject))
XX = XX.reshape((XX.shape[0], 3, config.width, config.height, config.time_slice))
#print XX.shape
for y_file in y_files:
print 'Reading..', y_file
sys.stdout.flush()
Y_subject = np.recfromtxt(y_file)
if Y is None:
Y = Y_subject
else:
Y = np.concatenate((Y, Y_subject))
return XX, Y
示例3: initialize_database
def initialize_database(self, configuration_dir=None):
"""Read in GMOS filter/grating information, for matching to headers."""
if configuration_dir is None:
configuration_dir = default_configuration_dir
logger.info('Reading Filter information')
gmos_filters = np.recfromtxt(
os.path.join(configuration_dir, 'GMOSfilters.dat'),
names=['name', 'wave_start', 'wave_end', 'fname'])
for line in gmos_filters:
new_filter = GMOSFilter(name=line['name'],
wavelength_start_value=line['wave_start'],
wavelength_start_unit='nm',
wavelength_end_value=line['wave_end'],
wavelength_end_unit='nm',
fname=line['fname'],
path=os.path.join(configuration_dir,
'filter_data'))
self.session.add(new_filter)
open_filter = GMOSFilter(name='open', wavelength_start_value=0,
wavelength_start_unit='nm',
wavelength_end_value=np.inf,
wavelength_end_unit='nm',
fname=None, path=None)
self.session.add(open_filter)
gmos_gratings = np.recfromtxt(
os.path.join(configuration_dir, 'GMOSgratings.dat'),
names = ['name', 'ruling_density', 'blaze_wave', 'R', 'coverage',
'wave_start', 'wave_end', 'wave_offset', 'y_offset'])
logger.info('Reading grating information')
for line in gmos_gratings:
new_grating = GMOSGrating(
name=line['name'],
ruling_density_value=line['ruling_density'],
blaze_wavelength_value=line['blaze_wave'],
R=line['R'],
coverage_value=line['coverage'],
wavelength_start_value=line['wave_start'],
wavelength_end_value=line['wave_end'],
wavelength_offset_value=line['wave_offset'],
y_offset_value=line['y_offset'])
self.session.add(new_grating)
mirror = GMOSGrating(name='mirror',
ruling_density_value=0.0,
blaze_wavelength_value=0.0, R=0.0,
coverage_value=np.inf,
wavelength_start_value=0.0,
wavelength_end_value=np.inf,
wavelength_offset_value=0.0,
y_offset_value=0.0)
self.session.add(mirror)
self.session.commit()
示例4: _get_data
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
with open(filepath + '/statecrime.csv', 'rb') as f:
try:
data = np.recfromtxt(f, delimiter=",", names=True,
dtype=None, encoding='utf-8')
except TypeError:
data = np.recfromtxt(f, delimiter=",", names=True, dtype=None)
return data
示例5: handle_special
def handle_special(self, q, p):
if q[0] == '\\':
NULL = p.stdout.readline()
if len(q.split()) > 1:
names = p.stdout.readline().split(',')
r = np.recfromtxt(p.stdout, skip_footer=2, delimiter=',', names=names)
else:
names = p.stdout.readline().split(',')
r = np.recfromtxt(p.stdout, skip_footer=1, delimiter=',', names=names)
else:
names = p.stdout.readline().split(',')
r = np.recfromtxt(p.stdout, skip_footer=1, delimiter=',', names=names)
return r
示例6: layoutFromTxt
def layoutFromTxt(filename):
"""Read plate layout from text file and return a structured array."""
if not os.path.isfile(filename):
msg = "No Plate Layout provided. File not found {}".format(filename)
raise IOError(msg)
try:
rec = np.recfromtxt(filename, dtype=LayoutDtype, skip_header=True)
except ValueError:
rec = np.recfromtxt(filename, dtype=LayoutDtype, delimiter="\t",
skip_header=True)
return rec
示例7: getList
def getList(file):
posList= np.recfromtxt(file)
l = [posList['f0'],posList['f2'],posList['f3']]
l = np.array(l)
l = l.T
names = posList['f4']
return l,names
示例8: _get_data
def _get_data():
filepath = dirname(abspath(__file__))
with open(filepath + "/anes96.csv", "rb") as f:
data = recfromtxt(f, delimiter="\t", names=True, dtype=float)
logpopul = log(data["popul"] + 0.1)
data = nprf.append_fields(data, "logpopul", logpopul, usemask=False, asrecarray=True)
return data
示例9: _get_data
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
with open(filepath + '/engel.csv', 'rb') as f:
data = np.recfromtxt(f,
delimiter=",", names = True, dtype=float)
return data
示例10: __init__
def __init__(self, path, coordkeys = "time time_bounds TFLAG ETFLAG latitude latitude_bounds longitude longitude_bounds lat lat_bnds lon lon_bnds etam_pressure etai_pressure layer_bounds layer47 layer".split(), delimiter = ',', names = True, **kwds):
"""
path - place to find csv file
coordkeys - use these keys as dimensions and coordinate variables
delimiter - use this as delimiter (default = ',')
names - see help in recfromtxt (Default = True)
kwds - np.recfromtxt keywords
* Note: currently only works when all coordinate variables are 1-d
"""
kwds['names'] = names
kwds['delimiter'] = delimiter
data = np.recfromtxt(path, **kwds)
dimkeys = [dk for dk in data.dtype.names if dk in coordkeys]
varkeys = [vk for vk in data.dtype.names if not vk in coordkeys]
for dk in dimkeys:
dv = np.unique(data[dk])
dv.sort()
self.createDimension(dk, len(dv))
dvar = self.createVariable(dk, dv.dtype.char, (dk,))
dvar[:] = dv
for vk in varkeys:
vv = data[vk]
var = self.createVariable(vk, vv.dtype.char, tuple(dimkeys))
for idx in np.ndindex(var.shape):
thisidx = np.sum([data[dk] == self.variables[dk][di] for di, dk in zip(idx, dimkeys)], axis = 0) == len(dimkeys)
if thisidx.any():
var[idx] = vv[thisidx]
示例11: SNrest
def SNrest():
path = "../data/restframe/"
objnames, band, mjd, mag, magerr, stype = [],[],[],[],[], []
formatcode = ('|S16,'.rstrip('#') +'f8,'*6 + '|S16,' + 4 * 'f8,' + '|S16,' * 3 + 'f8,' * 2 + '|S16,' + 'f8,' * 2)
filenames = os.listdir(path)
for filename in filenames:
data = np.recfromtxt(os.path.join(path, filename),usecols = (0,1,2,3,4), dtype = formatcode, names = True, skip_header = 13, case_sensitive = 'lower', invalid_raise = False)
name = np.empty(len(data.band), dtype = 'S20')
name.fill(filename)
objnames.append(name)
data.band = [x.lower() for x in data.band]
band.append(data.band)
mjd.append(data.phase)
mag.append(data.mag)
magerr.append(data.err)
objnames = np.fromiter(itertools.chain.from_iterable(objnames), dtype = 'S20')
band = np.fromiter(itertools.chain.from_iterable(band), dtype = 'S16')
mjd = np.fromiter(itertools.chain.from_iterable(mjd), dtype = 'float')
mag = np.fromiter(itertools.chain.from_iterable(mag), dtype = 'float')
magerr = np.fromiter(itertools.chain.from_iterable(magerr), dtype = 'float')
stype = np.full(len(objnames), 1)
LC = Lightcurve(objnames, band, mjd, mag, magerr, stype)
return LC
示例12: readin_aavso
def readin_aavso(filename):
formatcode = ('f8,'*4 + '|S16,'*20).rstrip(',')
data = np.recfromtxt(filename, delimiter='\t', names=True, dtype=formatcode,autostrip=True,case_sensitive='lower', invalid_raise=False)
ind = np.where((data.band == 'V') & (data.uncertainty > 0) & (np.isnan(data.uncertainty) == 0) & (data.uncertainty < 0.02))
banddata = data[ind]
return banddata
示例13: read
def read(filename):
"""
Read a table (.tbl) file.
Parameters:
* filename Name of table file to read
Returns: (comments,rec)
* comments List of comments (strings terminated with newline)
* rec Records array with named fields.
"""
# pull out the comment lines from the file (start with #)
f = open(filename,'r')
comments = [l for l in f if l[0]=='#']
f.close()
# find the line beginning with # NAMES and parse out the column names
nl = [i for i,l in enumerate(comments) if l[:7]=="# NAMES"]
if len(nl)!=1:
raise IOError("%s does not have a # NAMES line"%(filename))
dtd = {'names':comments.pop(nl[0])[7:].split()}
# find the line beginning with # DTYPE and parse out the column names
dl = [i for i,l in enumerate(comments) if l[:9]=="# FORMATS"]
if len(dl)!=1:
raise IOError("%s does not have a # FORMATS line"%(filename))
dtd['formats'] = comments.pop(dl[0])[9:].split()
# return the data as a records array
return comments,np.atleast_1d(np.recfromtxt(filename,dtype=dtd))
示例14: __init__
def __init__(self, pathlike):
if isinstance(pathlike, str):
paths = glob(pathlike)
else:
paths = pathlike
paths.sort()
pfile = self
pfile._vars = dict()
files = [open(path) for path in paths]
datas = [np.recfromtxt(f, names = True, case_sensitive = True) for f in files]
data = np.ma.concatenate(datas)
desired_unit = dict(O3 = 'ppb', GMAO_TEMP = 'K', PRESS = 'hPa', TEMP = 'K')
unit_factor = {'ppt': 1e12, 'ppb': 1e9}
pfile.createDimension('time', data.shape[0])
for ki, key in enumerate(data.dtype.names):
typecode = data[key].dtype.char
if typecode not in ('c', 'S'):
unit = desired_unit.get(key, 'ppt')
factor = unit_factor.get(unit, 1)
values = np.ma.masked_values(data[key], -1000) * factor
else:
unit = 'unknown'
values = data[key]
pfile.createVariable(key, typecode, dimensions = ('time',), units = unit, values = values)
示例15: load
def load():
"""
Load the star98 data and returns a Dataset class instance.
Returns
-------
Load instance:
a class of the data with array attrbutes 'endog' and 'exog'
"""
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
names = ["NABOVE","NBELOW","LOWINC","PERASIAN","PERBLACK","PERHISP",
"PERMINTE","AVYRSEXP","AVSALK","PERSPENK","PTRATIO","PCTAF",
"PCTCHRT","PCTYRRND","PERMINTE_AVYRSEXP","PERMINTE_AVSAL",
"AVYRSEXP_AVSAL","PERSPEN_PTRATIO","PERSPEN_PCTAF","PTRATIO_PCTAF",
"PERMINTE_AVYRSEXP_AVSAL","PERSPEN_PTRATIO_PCTAF"]
data = recfromtxt(open(filepath + '/star98.csv',"rb"), delimiter=",",
names=names, skip_header=1, dtype=float)
names = list(data.dtype.names)
# endog = (successes, failures)
NABOVE = array(data[names[1]]).astype(float) # successes
NBELOW = array(data[names[0]]).astype(float) \
- array(data[names[1]]).astype(float) # now its failures
endog = column_stack((NABOVE,NBELOW))
endog_name = names[:2]
exog = column_stack(data[i] for i in names[2:]).astype(float)
exog_name = names[2:]
dataset = Dataset(data=data, names=names, endog=endog, exog=exog,
endog_name = endog_name, exog_name=exog_name)
return dataset