本文整理汇总了Python中spacepy.datamodel.dmarray函数的典型用法代码示例。如果您正苦于以下问题:Python dmarray函数的具体用法?Python dmarray怎么用?Python dmarray使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dmarray函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: FIRE_HiRes_L1_L2
def FIRE_HiRes_L1_L2(datafile, ephemfile):
full_data = dm.readJSONheadedASCII(datafile)
ephem = dm.readJSONheadedASCII(ephemfile)
data = Trim_data_file(full_data, ephem)
labels = ephem.keys()
ephem_fields = ['Lsimple', 'CDMAG_MLT']
dt = spt.Ticktock(data['Epoch']).TAI
et = spt.Ticktock(ephem['DateTime']).TAI
for i in range(len(ephem_fields)):
print ephem_fields[i]
y = ephem[ephem_fields[i]]
nx = tb.interpol(dt, et, y)
data[ephem_fields[i]] = dm.dmarray(nx)
ephem_lat = ephem['Rgeod_LatLon'][:,0]
ephem_lon = ephem['Rgeod_LatLon'][:,1]
nx = tb.interpol(dt, et, ephem_lat)
data['Lat'] = dm.dmarray(nx)
nx = tb.interpol(dt, et, ephem_lon)
data['Lon'] = dm.dmarray(nx)
n_lines = len(data['Epoch'])
eflux = np.zeros(n_lines,12)
day = ephem['DateTime'][0][0:10]
outfile = datafile[:-23] + day + '-HiRes_L2.txt'
dm.toJSONheadedASCII(outfile, data)
return data
示例2: test_defaults
def test_defaults(self):
"""run it and check that defaults were set correctly"""
a = spectrogram(self.data, variables=self.kwargs['variables'])
ans = {'bins': [dm.dmarray([ 730120.0, 730135.30769231, 730150.61538462,
730165.92307692, 730181.23076923, 730196.53846154,
730211.84615385, 730227.15384615, 730242.46153846,
730257.76923077, 730273.07692308, 730288.38461538,
730303.69230769, 730319. ]),
dm.dmarray([ 0.00169679, 0.07848775, 0.1552787 , 0.23206965, 0.30886061,
0.38565156, 0.46244251, 0.53923347, 0.61602442, 0.69281538,
0.76960633, 0.84639728, 0.92318824, 0.99997919])],
'variables': ['xval', 'yval', 'zval'],
'ylim': (0.0012085702179961411, 0.99323954710300699),
'zlim': (0.001696792515639145, 0.99997919064162388)}
for key in ans:
if key == 'variables':
self.assertEqual(a.specSettings[key], ans[key])
else:
if key == 'bins':
# np.testing.assert_allclose(a.specSettings[key], ans[key], atol=1e-2, rtol=1e-3)
np.testing.assert_almost_equal(a.specSettings[key], ans[key], decimal=2)
else:
# np.testing.assert_allclose(a.specSettings[key], ans[key], rtol=1e-5)
np.testing.assert_almost_equal(a.specSettings[key], ans[key], decimal=6)
self.assertRaises(NotImplementedError, a.add_data, self.data)
示例3: _parse
def _parse(self, lines):
'''
Given raw ascii input as a list of lines, parse into object.
'''
if lines[0][0:7]==' Format': #IAGA-2002 formatted file.
data=parse_iaga(lines, iagacode='DST')
for key in data:
self[key]=data[key]
self['dst'].attrs['units']='nT'
self.attrs['npts']=data['time'].size
return
self.attrs['npts']=len(lines)
time = []
dst = np.zeros(24*self.attrs['npts'])
for i,line in enumerate(lines):
# Get year, month, day.
try:
yy = int(line[14:16]) * 100
except:
yy = 1900
yy = yy + int(line[3:5])
dd = int(line[8:10])
mm = int(line[5:7 ])
# Parse the rest of the data.
for j in range(0,24):
time.append(dt.datetime(yy, mm, dd, j))
loc = 20 + 4*j
dst[24*i + j] = float(line[loc:loc+4])
self['time']= dmarray(time)
self['dst'] = dmarray(dst, attrs={'units':'nT'})
示例4: __init__
def __init__(self, inlst):
dt = zip(*inlst)[0]
data = np.hstack(zip(*inlst)[1]).reshape((-1, 1))
dat = dm.SpaceData()
dat['time'] = dm.dmarray(data[:,0])
dat['time'].attrs['CATDESC'] = 'Start or stop time'
dat['time'].attrs['FIELDNAM'] = 'time'
dat['time'].attrs['LABLAXIS'] = 'Start or stop time'
dat['time'].attrs['SCALETYP'] = 'linear'
#dat['time'].attrs['UNITS'] = 'none'
dat['time'].attrs['UNITS'] = 'ms'
dat['time'].attrs['VALIDMIN'] = datetime.datetime(1990,1,1)
dat['time'].attrs['VALIDMAX'] = datetime.datetime(2029,12,31,23,59,59,999000)
dat['time'].attrs['VAR_TYPE'] = 'support_data'
dat['time'].attrs['VAR_NOTES'] = 'Time data started or stopped'
dat['time'].attrs['DEPEND_0'] = 'Epoch'
dat['time'].attrs['FILLVAL'] = 'None'
dat['Epoch'] = dm.dmarray(dt)
dat['Epoch'].attrs['CATDESC'] = 'Default Time'
dat['Epoch'].attrs['FIELDNAM'] = 'Epoch'
#dat['Epoch'].attrs['FILLVAL'] = datetime.datetime(2100,12,31,23,59,59,999000)
dat['Epoch'].attrs['LABLAXIS'] = 'Epoch'
dat['Epoch'].attrs['SCALETYP'] = 'linear'
dat['Epoch'].attrs['UNITS'] = 'ms'
dat['Epoch'].attrs['VALIDMIN'] = datetime.datetime(1990,1,1)
dat['Epoch'].attrs['VALIDMAX'] = datetime.datetime(2029,12,31,23,59,59,999000)
dat['Epoch'].attrs['VAR_TYPE'] = 'support_data'
dat['Epoch'].attrs['TIME_BASE'] = '0 AD'
dat['Epoch'].attrs['MONOTON'] = 'INCREASE'
dat['Epoch'].attrs['VAR_NOTES'] = 'Epoch at each configuration point'
self.data = dat
示例5: test_resample1
def test_resample1(self):
'''resample should give consistent results'''
ans = dm.SpaceData()
ans.attrs['foo'] = 'bar'
ans['a'] = [ 1., 3., 5., 7.]
ans['b'] = dm.dmarray([5., 7., 9., 11.])
ans['b'].attrs['marco'] = 'polo'
ans['Epoch'] = [datetime.datetime(2010, 1, 1, 1, 0),
datetime.datetime(2010, 1, 1, 3, 0),
datetime.datetime(2010, 1, 1, 5, 0),
datetime.datetime(2010, 1, 1, 7, 0)]
a = dm.SpaceData()
a['a'] = dm.dmarray(range(10))
a['b'] = dm.dmarray(range(10)) + 4
a['b'].attrs['marco'] = 'polo'
a['c'] = dm.dmarray(range(3)) + 10
a.attrs['foo'] = 'bar'
times = [datetime.datetime(2010, 1, 1) + datetime.timedelta(hours=i) for i in range(10)]
out = dm.resample(a, times, winsize=datetime.timedelta(hours=2), overlap=datetime.timedelta(hours=0))
for k, v in out.items():
np.testing.assert_equal(v, ans[k])
self.assertEqual(ans.attrs, out.attrs)
self.assertEqual(ans['b'].attrs['marco'], 'polo')
self.assertTrue(out['b'].attrs['DEPEND_0'], 'Epoch')
self.assertFalse('c' in out)
示例6: __init__
def __init__(self,filename,*args,**kwargs):
'''
Reads the data; sorts into arrays.
'''
import datetime as dt
from spacepy.datamodel import dmarray
from matplotlib.dates import date2num
super(MltSlice, self).__init__(*args, **kwargs) # Init as PbData.
self.attrs['file'] = filename
f = open(filename, 'r')
# Parse header.
self.attrs['mlt'] = float(f.readline().split()[-1])
self['L'] = dmarray(np.array(f.readline().split()[1:], dtype=float),
{'units':'$R_E$'})
# Parse remainder of file.
lines = f.readlines()
self['n'] = dmarray(np.zeros([len(lines), len(self['L'])]),
{'units':'cm^{-3}'})
self['time'] = dmarray(np.zeros(len(lines), dtype=object))
for i,l in enumerate(lines):
p = l.split()
self['time'][i] = dt.datetime(int(p[0]), int(p[1]), int(p[2]),
int(p[3]), int(p[4]), int(p[5]),
int(p[6])*1000)
self['n'][i,:] = p[7:]
# Some "hidden" variables for plotting.
self._dtime = date2num(self['time'])
self._dy = self['L'][1] - self['L'][0]
示例7: setUp
def setUp(self):
super(spectrogramTests, self).setUp()
self.kwargs = {}
self.kwargs['variables'] = ['xval', 'yval', 'zval']
np.random.seed(8675309)
self.data = dm.SpaceData(xval = dm.dmarray(np.random.random_sample(200)),
yval = dm.dmarray(np.random.random_sample(200)),
zval = dm.dmarray(np.random.random_sample(200)))
示例8: test_resample_shape
def test_resample_shape(self):
'''resample should give consistent results, 1d or 2d'''
a = dm.SpaceData()
a['a'] = dm.dmarray(range(10*3*4)).reshape(10,3,4)
a['b'] = dm.dmarray(range(10)) + 4
a['c'] = dm.dmarray(range(3)) + 10
times = [datetime.datetime(2010, 1, 1) + datetime.timedelta(hours=i) for i in range(10)]
self.assertRaises(IndexError, dm.resample, a, times, datetime.timedelta(hours=2), datetime.timedelta(hours=0))
示例9: __init__
def __init__(self, inlst):
if not inlst:
print("** No packets decoded cannot continue **")
sys.exit(1)
dt = zip(*inlst)[0]
data = np.hstack(zip(*inlst)[1]).reshape((-1, 2))
# go through Context and change the data type and set the None to fill
tmp = np.zeros(data.shape, dtype=int)
for (i, j), val in np.ndenumerate(data):
try:
tmp[i,j] = val
except (TypeError, ValueError):
tmp[i,j] = -2**16-1
dat = dm.SpaceData()
dat['Context'] = dm.dmarray(tmp[:])
dat['Context'].attrs['CATDESC'] = 'Context data'
dat['Context'].attrs['FIELDNAM'] = 'Context'
dat['Context'].attrs['ELEMENT_LABELS'] = "Det_0", "Det_1",
dat['Context'].attrs['ELEMENT_NAMES'] = "Det_0", "Det_1",
dat['Context'].attrs['LABEL'] = 'Context data'
dat['Context'].attrs['SCALE_TYPE'] = 'log'
#dat['time'].attrs['UNITS'] = 'none'
dat['Context'].attrs['UNITS'] = ''
dat['Context'].attrs['VALID_MIN'] = 0
dat['Context'].attrs['VALID_MAX'] = 2**15-1
dat['Context'].attrs['VAR_TYPE'] = 'data'
dat['Context'].attrs['VAR_NOTES'] = 'Context data 6s average'
dat['Context'].attrs['DEPEND_0'] = 'Epoch'
dat['Context'].attrs['FILL_VALUE'] = -2**16-1
dat['Epoch'] = dm.dmarray(dt)
dat['Epoch'].attrs['CATDESC'] = 'Default Time'
dat['Epoch'].attrs['FIELDNAM'] = 'Epoch'
#dat['Epoch'].attrs['FILLVAL'] = datetime.datetime(2100,12,31,23,59,59,999000)
dat['Epoch'].attrs['LABEL'] = 'Epoch'
dat['Epoch'].attrs['SCALE_TYPE'] = 'linear'
# dat['Epoch'].attrs['VALID_MIN'] = datetime.datetime(1990,1,1)
# dat['Epoch'].attrs['VALID_MAX'] = datetime.datetime(2029,12,31,23,59,59,999000)
dat['Epoch'].attrs['VAR_TYPE'] = 'support_data'
dat['Epoch'].attrs['TIME_BASE'] = '0 AD'
dat['Epoch'].attrs['MONOTON'] = 'INCREASE'
dat['Epoch'].attrs['VAR_NOTES'] = 'Epoch at each configuration point'
# go through and remove duplicate times and data
print("Looking for duplicate measurements")
arr, dt_ind, return_inverse = np.unique(dat['Epoch'], return_index=True, return_inverse=True) # this is unique an sort
print("Found {0} duplicates of {1}".format(len(return_inverse)-len(dt_ind), len(return_inverse)))
dat['Epoch'] = arr
dat['Context'] = dat['Context'][dt_ind]
self.data = dat
示例10: __init__
def __init__(self, inlst):
dt = zip(*inlst)[0]
data = np.hstack(zip(*inlst)[1]).reshape((-1, 1))
dat = dm.SpaceData()
dat['Time'] = dm.dmarray(data[:,0])
dat['Time'].attrs['CATDESC'] = 'Start or stop Time'
dat['Time'].attrs['FIELDNAM'] = 'Time'
dat['Time'].attrs['LABLAXIS'] = 'Start or stop Time'
dat['Time'].attrs['SCALETYP'] = 'linear'
#dat['Time'].attrs['UNITS'] = 'none'
dat['Time'].attrs['VALIDMIN'] = datetime.datetime(1990,1,1)
dat['Time'].attrs['VALIDMAX'] = datetime.datetime(2029,12,31,23,59,59,999000)
dat['Time'].attrs['VAR_TYPE'] = 'support_data'
dat['Time'].attrs['VAR_NOTES'] = 'Time data started or stopped'
dat['Time'].attrs['DEPEND_0'] = 'Epoch'
dat['Time'].attrs['FILLVAL'] = 'None'
dat['Epoch'] = dm.dmarray(dt)
dat['Epoch'].attrs['CATDESC'] = 'Default Time'
dat['Epoch'].attrs['FIELDNAM'] = 'Epoch'
#dat['Epoch'].attrs['FILLVAL'] = datetime.datetime(2100,12,31,23,59,59,999000)
dat['Epoch'].attrs['LABLAXIS'] = 'Epoch'
dat['Epoch'].attrs['SCALETYP'] = 'linear'
dat['Epoch'].attrs['VALIDMIN'] = datetime.datetime(1990,1,1)
dat['Epoch'].attrs['VALIDMAX'] = datetime.datetime(2029,12,31,23,59,59,999000)
dat['Epoch'].attrs['VAR_TYPE'] = 'support_data'
dat['Epoch'].attrs['TIME_BASE'] = '0 AD'
dat['Epoch'].attrs['MONOTON'] = 'INCREASE'
dat['Epoch'].attrs['VAR_NOTES'] = 'Epoch at each configuration point'
dat['Mode'] = dm.dmarray(np.zeros(len(dt), dtype=int))
dat['Mode'][...] = -1
dat['Mode'].attrs['FIELDNAM'] = 'Mode'
dat['Mode'].attrs['FILLVAL'] = -1
dat['Mode'].attrs['LABLAXIS'] = 'FIRE Mode'
dat['Mode'].attrs['SCALETYP'] = 'linear'
dat['Mode'].attrs['VALIDMIN'] = 0
dat['Mode'].attrs['VALIDMAX'] = 1
dat['Mode'].attrs['VAR_TYPE'] = 'support_data'
dat['Mode'].attrs['VAR_NOTES'] = 'Is the line FIRE on (=1) or FIRE off (=0)'
dat['Mode'][::2] = 1
dat['Mode'][1::2] = 0
dat['Duration'] = dm.dmarray(np.zeros(len(dt), dtype=int))
dat['Duration'][...] = -1
dat['Duration'].attrs['FIELDNAM'] = 'Duration'
dat['Duration'].attrs['FILLVAL'] = -1
dat['Duration'].attrs['LABLAXIS'] = 'FIRE Duration'
dat['Duration'].attrs['SCALETYP'] = 'linear'
dat['Duration'].attrs['VALIDMIN'] = 0
dat['Duration'].attrs['VALIDMAX'] = 100000
dat['Duration'].attrs['VAR_TYPE'] = 'support_data'
dat['Duration'].attrs['VAR_NOTES'] = 'Duration of the on or off'
df = np.asarray([ v1 - v2 for v1, v2 in zip(dat['Time'],dat['Epoch']) ])
dat['Duration'][...] = np.asarray([ v.days*24*60*60 + v.seconds for v in df ])
self.data = dat
示例11: test_toRecArray
def test_toRecArray(self):
'''a record array can be created from a SpaceData'''
sd = dm.SpaceData()
sd['x'] = dm.dmarray([1.0, 2.0])
sd['y'] = dm.dmarray([2,4])
ra = dm.toRecArray(sd)
np.testing.assert_equal(ra['x'], [1.0, 2.0])
np.testing.assert_equal(ra['y'], [2, 4])
self.assertEqual(ra.dtype, np.dtype((np.record, [('x', '<f8'), ('y', '<i8'), ])))
示例12: test_HDF5roundtrip2GZIP
def test_HDF5roundtrip2GZIP(self):
"""Data can go to hdf without altering datetimes in the datamodel with compression"""
a = dm.SpaceData()
a['foo'] = dm.SpaceData()
dm.toHDF5(self.testfile, a, compression='gzip')
newobj = dm.fromHDF5(self.testfile)
self.assertEqual(a['foo'], newobj['foo'])
a['bar'] = dm.dmarray([datetime.datetime(2000, 1, 1)])
dm.toHDF5(self.testfile, a, compression='gzip')
self.assertEqual(a['bar'], dm.dmarray([datetime.datetime(2000, 1, 1)]))
示例13: calc_deg
def calc_deg(self):
"""
Gitm defaults to radians for lat and lon, which is sometimes difficult
to use. This method creates *dLat* and *dLon*, which is lat and lon
in degrees.
"""
from numpy import pi
if "Latitude" in self:
self["dLat"] = dmarray(self["Latitude"] * 180.0 / pi, attrs={"units": "degrees"})
if "Longitude" in self:
self["dLon"] = dmarray(self["Longitude"] * 180.0 / pi, attrs={"units": "degrees"})
示例14: test_creation_dmarray
def test_creation_dmarray(self):
"""When a dmarray is created it should have attrs empty or not"""
self.assertTrue(hasattr(self.dat, 'attrs'))
self.assertEqual(self.dat.attrs['a'], 'a')
data = dm.dmarray([1,2,3])
self.assertTrue(hasattr(data, 'attrs'))
self.assertEqual(data.attrs, {})
data2 = dm.dmarray([1,2,3], attrs={'coord':'GSM'})
self.assertEqual(data.attrs, {})
self.assertEqual(data2.attrs, {'coord':'GSM'})
data2 = dm.dmarray([1,2,3], dtype=float, attrs={'coord':'GSM'})
np.testing.assert_almost_equal([1,2,3], data2)
示例15: GSMtoMLT
def GSMtoMLT(gsm, dt):
"""
convert GSM values to MLT in the lgm way
Parameters
----------
gsm : array_like
Nx3 array_like of the GSM position
dt : array_like
N elementarray_like of datetime objects
Returns
-------
out : numpy.array
N element array of the MLT values
"""
def doConv(gsm, dt):
Pgsm = Lgm_Vector.Lgm_Vector(*gsm)
Pwgs = Lgm_Vector.Lgm_Vector()
Pmlt = Lgm_Vector.Lgm_Vector()
cT = pointer(Lgm_CTrans())
Lgm_Set_Coord_Transforms( dateToDateLong(dt), dateToFPHours(dt), cT)
Lgm_Convert_Coords( pointer(Pgsm), pointer(Pwgs), GSM_TO_WGS84, cT )
Lgm_Convert_Coords( pointer(Pwgs), pointer(Pmlt), WGS84_TO_EDMAG, cT )
R, MLat, MLon, MLT = c_double(), c_double(), c_double(), c_double(),
Lgm_EDMAG_to_R_MLAT_MLON_MLT( pointer(Pmlt), pointer(R), pointer(MLat), pointer(MLon),
pointer(MLT), cT)
return MLT.value
gsm_ = numpy.asanyarray(gsm)
if isinstance(dt, datetime.datetime):
dt_ = numpy.asanyarray([dt])
else:
dt_ = numpy.asanyarray(dt)
if gsm_.ndim == 2:
if gsm_.shape[1] != 3:
raise(ValueError("Invalid vector shape"))
if gsm_.shape[0] != dt_.size:
if dt_.size == 1:
dt_ = dm.dmarray([dt_]*gsm_.shape[0])
else:
raise(ValueError("Array size mismatch"))
ans = dm.dmarray(numpy.empty(len(dt_)), dtype=numpy.double, attrs={'coord_system': 'EDMAG'})
for ii, (gsm_val, dt_val) in enumerate(itertools.izip(gsm_, dt_)):
ans[ii] = doConv(gsm_val, dt_val)
else:
if dt_.size==1:
ans = dm.dmarray([doConv(gsm_, dt_)], attrs={'coord_system': 'EDMAG'})
else:
ans = dm.dmarray(doConv(gsm_, dt_), attrs={'coord_system': 'EDMAG'})
return ans