本文整理汇总了Python中numpy.recarray函数的典型用法代码示例。如果您正苦于以下问题:Python recarray函数的具体用法?Python recarray怎么用?Python recarray使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了recarray函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_subheaders
def _get_subheaders(self):
"""retreive all subheaders and return list of subheader recarrays
"""
subheaders = []
header = self._header
endianness = self.endianness
dt = self._subhdrdtype
if not self.endianness is native_code:
dt = self._subhdrdtype.newbyteorder(self.endianness)
if self._header['num_frames'] > 1:
for item in self._mlist._mlist:
if item[1] == 0:
break
self.fileobj.seek(0)
offset = (int(item[1])-1)*512
self.fileobj.seek(offset)
tmpdat = self.fileobj.read(512)
sh = (np.recarray(shape=(), dtype=dt,
buf=tmpdat))
subheaders.append(sh.copy())
else:
self.fileobj.seek(0)
offset = (int(self._mlist._mlist[0][1])-1)*512
self.fileobj.seek(offset)
tmpdat = self.fileobj.read(512)
sh = (np.recarray(shape=(), dtype=dt,
buf=tmpdat))
subheaders.append(sh)
return subheaders
示例2: test_usecase1
def test_usecase1(self):
pyfunc = usecase1
# This is an unaligned dtype
mystruct_dt = numpy.dtype([('p', numpy.float64),
('row', numpy.float64),
('col', numpy.float64)])
mystruct = numpy_support.from_dtype(mystruct_dt)
cres = compile_isolated(pyfunc, (mystruct[:], mystruct[:]))
cfunc = cres.entry_point
st1 = numpy.recarray(3, dtype=mystruct_dt)
st2 = numpy.recarray(3, dtype=mystruct_dt)
st1.p = numpy.arange(st1.size) + 1
st1.row = numpy.arange(st1.size) + 1
st1.col = numpy.arange(st1.size) + 1
st2.p = numpy.arange(st2.size) + 1
st2.row = numpy.arange(st2.size) + 1
st2.col = numpy.arange(st2.size) + 1
expect1 = st1.copy()
expect2 = st2.copy()
got1 = expect1.copy()
got2 = expect2.copy()
pyfunc(expect1, expect2)
cfunc(got1, got2)
self.assertTrue(numpy.all(expect1 == got1))
self.assertTrue(numpy.all(expect2 == got2))
示例3: __init__
def __init__(self, analyzer):
n = analyzer.frames_in_flight
fw, fh = analyzer.camera.frame_size
c = analyzer.camera.channels
rw, rh = analyzer.rectifier.image_size
self.frames = numpy.recarray(n, [
("index", "u4"),
("timestamp", "f8"),
("image", "u1", (fh, fw, c)),
("image_f", "f4", (fh, fw, c)),
("table", analyzer.table_tracker.dtype),
("rectification", "f4", (rh, rw, c)),
("background", analyzer.background_analyzer.dtype),
("team_foosmen", analyzer.team_foosmen_analyzer.dtype, len(analyzer.table.teams)),
("ball", analyzer.ball_analyzer.dtype),
("rod", [
("%s_%s" % (rod.type.name, rod.team.name), analyzer.rod_analyzer[i].dtype) for i, rod in enumerate(analyzer.table.rods)
])
])
self.background = numpy.recarray((), [
("color_mean", "f4", (rh, rw, c)),
("variance", "f8", (c, c)),
("q_estimation", "f4", (rh, rw))
])
self.team_foosmen = numpy.recarray(len(analyzer.table.teams), [
("color_mean", "f4", (rh, rw, c)),
("variance", "f8", (c, c)),
])
示例4: test_structured_arrays
def test_structured_arrays(self):
def check(arr, dtype, ndim, layout, aligned):
ty = typeof(arr)
self.assertIsInstance(ty, types.Array)
self.assertEqual(ty.dtype, dtype)
self.assertEqual(ty.ndim, ndim)
self.assertEqual(ty.layout, layout)
self.assertEqual(ty.aligned, aligned)
dtype = np.dtype([('m', np.int32), ('n', 'S5')])
rec_ty = numpy_support.from_struct_dtype(dtype)
arr = np.empty(4, dtype=dtype)
check(arr, rec_ty, 1, "C", False)
arr = np.recarray(4, dtype=dtype)
check(arr, rec_ty, 1, "C", False)
dtype = np.dtype([('m', np.int32), ('n', 'S5')], align=True)
rec_ty = numpy_support.from_struct_dtype(dtype)
# On Numpy 1.6, align=True doesn't align the itemsize
actual_aligned = numpy_support.version >= (1, 7)
arr = np.empty(4, dtype=dtype)
check(arr, rec_ty, 1, "C", actual_aligned)
arr = np.recarray(4, dtype=dtype)
check(arr, rec_ty, 1, "C", actual_aligned)
示例5: addfield
def addfield(mrecord, newfield, newfieldname=None):
"""Adds a new field to the masked record array, using `newfield` as data
and `newfieldname` as name. If `newfieldname` is None, the new field name is
set to 'fi', where `i` is the number of existing fields.
"""
_data = mrecord._data
_mask = mrecord._mask
if newfieldname is None or newfieldname in reserved_fields:
newfieldname = 'f%i' % len(_data.dtype)
newfield = ma.array(newfield)
# Get the new data ............
# Create a new empty recarray
newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)])
newdata = recarray(_data.shape, newdtype)
# Add the exisintg field
[newdata.setfield(_data.getfield(*f), *f)
for f in _data.dtype.fields.values()]
# Add the new field
newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname])
newdata = newdata.view(MaskedRecords)
# Get the new mask .............
# Create a new empty recarray
newmdtype = np.dtype([(n, bool_) for n in newdtype.names])
newmask = recarray(_data.shape, newmdtype)
# Add the old masks
[newmask.setfield(_mask.getfield(*f), *f)
for f in _mask.dtype.fields.values()]
# Add the mask of the new field
newmask.setfield(getmaskarray(newfield),
*newmask.dtype.fields[newfieldname])
newdata._mask = newmask
return newdata
示例6: test_multiple_args_records
def test_multiple_args_records(self):
pyfunc = foobar
mystruct_dt = np.dtype([('p', np.float64),
('row', np.float64),
('col', np.float64)])
mystruct = numpy_support.from_dtype(mystruct_dt)
cres = compile_isolated(pyfunc, [mystruct[:], types.uint64, types.uint64],
return_type=mystruct[:])
cfunc = cres.entry_point
st1 = np.recarray(3, dtype=mystruct_dt)
st2 = np.recarray(3, dtype=mystruct_dt)
st1.p = np.arange(st1.size) + 1
st1.row = np.arange(st1.size) + 1
st1.col = np.arange(st1.size) + 1
st2.p = np.arange(st2.size) + 1
st2.row = np.arange(st2.size) + 1
st2.col = np.arange(st2.size) + 1
test_fail_args = ((st1, -1, st2), (st1, st2, -1))
# TypeError is for 2.6
if sys.version_info >= (2, 7):
with self.assertRaises(OverflowError):
for a, b, c in test_fail_args:
cfunc(a, b, c)
else:
with self.assertRaises(TypeError):
for a, b, c in test_fail_args:
cfunc(a, b, c)
示例7: events
def events(self):
'''return a rec array with all events of this channel'''
if self.__events == None:
#the event data from where to extract
data = self.__eventdata._data
#select all events for this cluster
eventmask = data['chid'] == self.__index
# if there are no events return empty recarray
if len(data) == 0:
self.__events = numpy.recarray(shape = (0),dtype = [('t', '<f8'), ('states', self.__model.state_type())])
return self.__events
#select first and last frame
eventmask[0] = True
eventmask[-1] = True
#create recarray that stores the events of this channel
# this works for DYK
# self.__events = numpy.recarray(shape = (eventmask.sum()),dtype = [('t', '<f8'), ('states', '|i1', 8)])
self.__events = numpy.recarray(shape = (eventmask.sum()),dtype = [('t', '<f8'), ('states', self.__model.state_type())])
# copy time chid and subspace of state column to new recarray
self.__events['t'] = data[eventmask]['t']
# this works for DYK
#self.__events['states'] = data[eventmask]['states'][:,self.__index,:]
self.__events['states'] = data[eventmask]['states'][:,self.clusterindex(),self.__index]
return self.__events
示例8: Xi2_line_ratios
def Xi2_line_ratios(obs_ratios, arxvPDR):
'''Computes the Xi2 statistic given the observed lines and a PDR arxv.'''
allData = numpy.recarray([],[('x', 'f8'),('y', 'f8'),('z', 'f8'),('t', 'f8'),('v', 'f8'),])
models = {}
specStrs, codes = obs_ratios.species_and_codes()
#collecting all the line intensities of the ratios involved in the observations (obs_ratio)
#from the model database. Proccessing one Av at a time...
for i, AvStr in enumerate(arxvPDR.radexDbs):
Av = numpy.float64(AvStr)
#array which will hold the grid points and the values for this Av
data = numpy.recarray((arxvPDR.nMeshes), allData.dtype.descr)
#getting the emissions for each line from the PDR database for all the models for the current Av
for code in codes:
models[code] = 10.0**arxvPDR.get_emissions_from_databases(line={'type':'radex-lvg', 'code':code}, Av_use=Av)
#defining the array which will hold the Xi2 for all the models for this Av
Xi2 = numpy.zeros(arxvPDR.nMeshes, 'f8')
#compute the Xi2
for obs_ratio in obs_ratios:
#the line codes invloved in this ratio
code1, code2 = codes_from_ratio(obs_ratio)
#the ratios for all the models at this Av for this particular line ratio
model_ratio = models[code1] / models[code2]
#computing the Xi2
f_o = obs_ratios[obs_ratio]['v']
f_e = obs_ratios[obs_ratio]['e']
f_m = model_ratio
Xi2 += ((f_m - f_o)/f_e)**2.0
#
data.x = arxvPDR.grid_x
data.y = arxvPDR.grid_y
data.z = arxvPDR.grid_z
data.t = Av
data.v = Xi2
allData = numpy.hstack((allData, data) )
#removing the first entry (redundant ;;; .. todo:: fix this [low priority])
allData = allData[1::]
#filtering out the points which have Nans
inds_not_nan = numpy.where( numpy.isfinite(allData['v']) )
return allData[inds_not_nan]
return allData[1::]
示例9: events
def events(self):
if isinstance(self.__events, types.NoneType):
# the indices of the channels within this cluster
cidx = [channel.index() for channel in self]
#print cidx
# the eventdata from where to extract
data = self.__eventdata._data
#select all events for this cluster
eventmask = data['clid'] == self.__index
#select first and last frame
eventmask[0] = True
eventmask[-1] = True
#create recarray that stores the events of this cluster
if self.__model == dyk:
self.__events = numpy.recarray(shape = (eventmask.sum()),dtype = [('t', '<f8'), ('noch', '<i2'), ('chid', int), ('states', '|i1', (len(cidx), 8))])
elif self.__model == deterministic:
self.__events = numpy.recarray(shape = (eventmask.sum()),dtype = [('t', '<f8'), ('noch', '<i2'), ('chid', int), ('states', bool, (len(cidx),))])
# copy time chid and subspace of state column to new recarray
self.__events['t'] = data[eventmask]['t']
self.__events['chid'] = data[eventmask]['chid']
self.__events['states'] = data[eventmask]['states'][:,self.__index,...]
# cache the number of open channels
model = self.__eventdata.model()
self.__events['noch'] = model.open(self.__events).sum(-1)
return self.__events
示例10: selectOnSharpeRatio
def selectOnSharpeRatio(self, ls_symbols, top_n_equities=10):
''' Choose the best portfolio over the stock universe,
according to their sharpe ratio'''
#TODO: change this to a DataAccess utilitie --------------
symbols, files = getAllFromCSV()
datalength = len(recfromcsv(files[0])['close'])
print('Datalength: {}'.format(datalength))
#---------------------------------------------------------
#Initiaing data arrays
closes = np.recarray((datalength,), dtype=[(symbol, 'float') for symbol in symbols])
daily_ret = np.recarray((datalength - 1,), dtype=[(symbol, 'float') for symbol in symbols])
average_returns = np.zeros(len(files))
return_stdev = np.zeros(len(files))
sharpe_ratios = np.zeros(len(files))
cumulative_returns = np.recarray((datalength-1,), dtype=[(symbol, 'float') for symbol in symbols])
# Here is the meat
#TODO: data = dataobj.getData(ls_symbols)
for i, symbol in enumerate(ls_symbols):
if len(data) != datalength:
continue
print('Processing {} file'.format(file))
closes[symbols[i]] = data['close'][::-1]
daily_ret[symbols[i]] = dailyReturns()
# We now can compute:
average_returns[i] = daily_ret[symbols[i]].mean()
return_stdev[i] = daily_ret[symbols[i]].stdev()
sharpe_ratios[i] = (average_returns[i] / return_stdev[i]) * np.sqrt(datalength) # compare to course
print('\tavg: {}, stdev: {}, sharpe ratio: {}'.format(average_returns[i], return_stdev[i], sharpe_ratios[i]))
sorted_sharpe_indices = np.argsort(sharpe_ratios)[::-1][0:top_n_equities]
#TODO: return a disct as {symbol: sharpe_ratio}, or a df with all 3 components
return sorted_sharpe_indices
示例11: test_add_data_then_read
def test_add_data_then_read(self):
data0 = np.recarray((1,), dtype=[("f0", "<f8"), ("f1", "<f8"), ("f2", "<f8")])
data0[0] = (1, 2, 3)
data1 = np.recarray((1,), dtype=[("f0", "<f8"), ("f1", "<f8"), ("f2", "<f8")])
data1[0] = (4, 5, 6)
self.data.addData(data0)
self.data.addData(data1)
self.assert_data_in_backend(self.data, [[1, 2, 3], [4, 5, 6]])
示例12: create_neurohdf_file
def create_neurohdf_file(filename, data):
with closing(h5py.File(filename, 'w')) as hfile:
hfile.attrs['neurohdf_version'] = '0.1'
mcgroup = hfile.create_group("Microcircuit")
mcgroup.attrs['node_type'] = 'irregular_dataset'
vert = mcgroup.create_group("vertices")
conn = mcgroup.create_group("connectivity")
vert.create_dataset("id", data=data['vert']['id'])
vert.create_dataset("location", data=data['vert']['location'])
verttype=vert.create_dataset("type", data=data['vert']['type'])
# create rec array with two columns, value and name
my_dtype = np.dtype([('value', 'l'), ('name', h5py.new_vlen(str))])
helpdict={VerticesTypeSkeletonRootNode['id']: VerticesTypeSkeletonRootNode['name'],
VerticesTypeSkeletonNode['id']: VerticesTypeSkeletonNode['name'],
VerticesTypeConnectorNode['id']: VerticesTypeConnectorNode['name']
}
arr=np.recarray( len(helpdict), dtype=my_dtype )
for i,kv in enumerate(helpdict.items()):
arr[i][0] = kv[0]
arr[i][1] = kv[1]
verttype.attrs['value_name']=arr
vert.create_dataset("confidence", data=data['vert']['confidence'])
vert.create_dataset("userid", data=data['vert']['userid'])
vert.create_dataset("radius", data=data['vert']['radius'])
vert.create_dataset("skeletonid", data=data['vert']['skeletonid'])
vert.create_dataset("creation_time", data=data['vert']['creation_time'])
vert.create_dataset("modification_time", data=data['vert']['modification_time'])
conn.create_dataset("id", data=data['conn']['id'])
if data['conn'].has_key('type'):
conntype=conn.create_dataset("type", data=data['conn']['type'])
helpdict={ConnectivityNeurite['id']: ConnectivityNeurite['name'],
ConnectivityPresynaptic['id']: ConnectivityPresynaptic['name'],
ConnectivityPostsynaptic['id']: ConnectivityPostsynaptic['name']
}
arr=np.recarray( len(helpdict), dtype=my_dtype )
for i,kv in enumerate(helpdict.items()):
arr[i][0] = kv[0]
arr[i][1] = kv[1]
conntype.attrs['value_name']=arr
if data['conn'].has_key('skeletonid'):
conn.create_dataset("skeletonid", data=data['conn']['skeletonid'])
if data.has_key('meta'):
metadata=mcgroup.create_group('metadata')
# create recarray with two columns, skeletonid and string
my_dtype = np.dtype([('skeletonid', 'l'), ('name', h5py.new_vlen(str))])
arr=np.recarray( len(data['meta']), dtype=my_dtype )
for i,kv in enumerate(data['meta'].items()):
arr[i][0] = kv[0]
arr[i][1] = kv[1]
metadata.create_dataset('skeleton_name', data=arr )
示例13: __init__
def __init__(self, stid, nlat, elon, elev):
self.stid = stid
self.nlat = nlat
self.elon = elon
self.elev = elev
# Measured data
self.datat = np.recarray((NPTSt,), dtype={"names": ("flux", "sun_alt", "moon_phase"),
"formats": (np.int64, np.float64, np.float64)})
self.datap = np.recarray((NPTSp,), dtype={"names": ("flux", "sun_alt", "moon_phase"),
"formats": (np.int64, np.float64, np.float64)})
示例14: make_polynomial_psf_params
def make_polynomial_psf_params(ntrain, nvalidate, nvisualize):
""" Make training/testing data for PSF with params varying as polynomials.
"""
bd = galsim.BaseDeviate(5772156649+314159)
ud = galsim.UniformDeviate(bd)
training_data = np.recarray((ntrain,), dtype=star_type)
validate_data = np.recarray((nvalidate,), dtype=star_type)
# Make randomish Chebyshev polynomial coefficients
# 5 Different arrays (hlr, g1, g2, u0, v0), and up to 3rd order in each of x and y.
coefs = np.empty((4, 4, 5), dtype=float)
for (i, j, k), _ in np.ndenumerate(coefs):
coefs[i, j, k] = 2*ud() - 1.0
for i in range(ntrain):
u = ud()
v = ud()
flux = ud()*50+100
vals = np.polynomial.chebyshev.chebval2d(u, v, coefs)/6 # range is [-0.5, 0.5]
hlr = vals[0] * 0.1 + 0.35
g1 = vals[1] * 0.1
g2 = vals[2] * 0.1
u0 = vals[3]
v0 = vals[4]
training_data[i] = (u, v, hlr, g1, g2, u0, v0, flux)
for i in range(nvalidate):
u = ud()*0.5 + 0.25
v = ud()*0.5 + 0.25
flux = 1.0
vals = np.polynomial.chebyshev.chebval2d(u, v, coefs)/6 # range is [-0.5, 0.5]
hlr = vals[0] * 0.1 + 0.35
g1 = vals[1] * 0.1
g2 = vals[2] * 0.1
u0 = vals[3]
v0 = vals[4]
validate_data[i] = (u, v, hlr, g1, g2, u0, v0, flux)
vis_data = np.recarray((nvisualize*nvisualize), dtype=star_type)
u = v = np.linspace(0, 1, nvisualize)
u, v = np.meshgrid(u, v)
for i, (u1, v1) in enumerate(zip(u.ravel(), v.ravel())):
vals = np.polynomial.chebyshev.chebval2d(u1, v1, coefs)/6 # range is [-0.5, 0.5]
hlr = vals[0] * 0.1 + 0.35
g1 = vals[1] * 0.1
g2 = vals[2] * 0.1
u0 = vals[3]
v0 = vals[4]
vis_data[i] = (u1, v1, hlr, g1, g2, u0, v0, 1.0)
return training_data, validate_data, vis_data.reshape((nvisualize, nvisualize))
示例15: test_save_results
def test_save_results(self):
# test for 1d
# test for 2d
# test for 3d
# test for very large
nr_experiments = 10000
experiments = np.recarray((nr_experiments,),
dtype=[('x', float), ('y', float)])
outcome_a = np.random.rand(nr_experiments,1)
results = (experiments, {'a': outcome_a})
fn = u'../data/test.tar.gz'
save_results(results, fn)
os.remove(fn)
# ema_logging.info('1d saved successfully')
nr_experiments = 10000
nr_timesteps = 100
experiments = np.recarray((nr_experiments,),
dtype=[('x', float), ('y', float)])
outcome_a = np.zeros((nr_experiments,nr_timesteps))
results = (experiments, {'a': outcome_a})
save_results(results, fn)
os.remove(fn)
# ema_logging.info('2d saved successfully')
nr_experiments = 10000
nr_timesteps = 100
nr_replications = 10
experiments = np.recarray((nr_experiments,),
dtype=[('x', float), ('y', float)])
outcome_a = np.zeros((nr_experiments,nr_timesteps,nr_replications))
results = (experiments, {'a': outcome_a})
save_results(results, fn)
os.remove(fn)
# ema_logging.info('3d saved successfully')
nr_experiments = 500000
nr_timesteps = 100
experiments = np.recarray((nr_experiments,),
dtype=[('x', float), ('y', float)])
outcome_a = np.zeros((nr_experiments,nr_timesteps))
results = (experiments, {'a': outcome_a})
save_results(results, fn)
os.remove(fn)