本文整理汇总了Python中numpy.core.numeric.dtype函数的典型用法代码示例。如果您正苦于以下问题:Python dtype函数的具体用法?Python dtype怎么用?Python dtype使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dtype函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_upgrade
def test_upgrade(self):
"Tests the upgrade method."
converter = StringConverter()
assert_equal(converter._status, 0)
# test int
assert_equal(converter.upgrade(b'0'), 0)
assert_equal(converter._status, 1)
# On systems where integer defaults to 32-bit, the statuses will be
# offset by one, so we check for this here.
import numpy.core.numeric as nx
status_offset = int(nx.dtype(nx.integer).itemsize < nx.dtype(nx.int64).itemsize)
# test int > 2**32
assert_equal(converter.upgrade(b'17179869184'), 17179869184)
assert_equal(converter._status, 1 + status_offset)
# test float
assert_allclose(converter.upgrade(b'0.'), 0.0)
assert_equal(converter._status, 2 + status_offset)
# test complex
assert_equal(converter.upgrade(b'0j'), complex('0j'))
assert_equal(converter._status, 3 + status_offset)
# test str
assert_equal(converter.upgrade(b'a'), b'a')
assert_equal(converter._status, len(converter._mapper) - 1)
示例2: __init__
def __init__(self, pyfunc, otypes='', doc=None):
self.thefunc = pyfunc
self.ufunc = None
nin, ndefault = _get_nargs(pyfunc)
if nin == 0 and ndefault == 0:
self.nin = None
self.nin_wo_defaults = None
else:
self.nin = nin
self.nin_wo_defaults = nin - ndefault
self.nout = None
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, types.StringType):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError, "invalid otype specified"
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError, "output types must be a string of typecode characters or a list of data-types"
self.lastcallargs = 0
示例3: _parseFormats
def _parseFormats(self, formats, aligned=0):
""" Parse the field formats """
if formats is None:
raise ValueError, "Need formats argument"
if isinstance(formats, list):
if len(formats) < 2:
formats.append('')
formats = ','.join(formats)
dtype = sb.dtype(formats, aligned)
fields = dtype.fields
if fields is None:
dtype = sb.dtype([('f1', dtype)], aligned)
fields = dtype.fields
keys = dtype.names
self._f_formats = [fields[key][0] for key in keys]
self._offsets = [fields[key][1] for key in keys]
self._nfields = len(keys)
示例4: view
def view(self, obj):
try:
if issubclass(obj, ndarray):
return ndarray.view(self, obj)
except TypeError:
pass
dtype = sb.dtype(obj)
if dtype.fields is None:
return self.__array__().view(dtype)
return ndarray.view(self, obj)
示例5: _createdescr
def _createdescr(self, byteorder):
descr = sb.dtype({'names':self._names,
'formats':self._f_formats,
'offsets':self._offsets,
'titles':self._titles})
if (byteorder is not None):
byteorder = _byteorderconv[byteorder[0]]
descr = descr.newbyteorder(byteorder)
self._descr = descr
示例6: fromrecords
def fromrecords(
reclist,
dates=None,
freq=None,
start_date=None,
dtype=None,
shape=None,
formats=None,
names=None,
titles=None,
aligned=False,
byteorder=None,
):
"""Creates a MaskedRecords from a list of records.
The data in the same field can be heterogeneous, they will be promoted
to the highest data type. This method is intended for creating
smaller record arrays. If used to create large array without formats
defined, it can be slow.
If formats is None, then this will auto-detect formats. Use a list of
tuples rather than a list of lists for faster processing.
"""
# reclist is in fact a mrecarray .................
if isinstance(reclist, MultiTimeSeries):
mdescr = reclist.dtype
shape = reclist.shape
return MultiTimeSeries(reclist, dtype=mdescr)
# No format, no dtype: create from to arrays .....
_data = mrecfromrecords(
reclist,
dtype=dtype,
shape=shape,
formats=formats,
names=names,
titles=titles,
aligned=aligned,
byteorder=byteorder,
)
_dtype = _data.dtype
# Check the names for a '_dates' .................
newdates = None
_names = list(_dtype.names)
reserved = [n for n in _names if n.lower() in ["dates", "_dates"]]
if len(reserved) > 0:
newdates = _data[reserved[-1]]
[_names.remove(n) for n in reserved]
_dtype = numeric.dtype([t for t in _dtype.descr if t[0] not in reserved])
_data = [_data[n] for n in _names]
#
newdates = __getdates(dates=dates, newdates=newdates, length=len(_data), freq=freq, start_date=start_date)
#
return MultiTimeSeries(_data, dates=newdates, dtype=_dtype, names=_names)
示例7: __new__
def __new__(subtype, data, dtype=None, copy=True):
warnings.warn('the matrix subclass is not the recommended way to '
'represent matrices or deal with linear algebra (see '
'https://docs.scipy.org/doc/numpy/user/'
'numpy-for-matlab-users.html). '
'Please adjust your code to use regular ndarray.',
PendingDeprecationWarning, stacklevel=2)
if isinstance(data, matrix):
dtype2 = data.dtype
if (dtype is None):
dtype = dtype2
if (dtype2 == dtype) and (not copy):
return data
return data.astype(dtype)
if isinstance(data, N.ndarray):
if dtype is None:
intype = data.dtype
else:
intype = N.dtype(dtype)
new = data.view(subtype)
if intype != data.dtype:
return new.astype(intype)
if copy: return new.copy()
else: return new
if isinstance(data, str):
data = _convert_from_string(data)
# now convert data to an array
arr = N.array(data, dtype=dtype, copy=copy)
ndim = arr.ndim
shape = arr.shape
if (ndim > 2):
raise ValueError("matrix must be 2-dimensional")
elif ndim == 0:
shape = (1, 1)
elif ndim == 1:
shape = (1, shape[0])
order = 'C'
if (ndim == 2) and arr.flags.fortran:
order = 'F'
if not (order or arr.flags.contiguous):
arr = arr.copy()
ret = N.ndarray.__new__(subtype, shape, arr.dtype,
buffer=arr,
order=order)
return ret
示例8: test_upgrade
def test_upgrade(self):
"Tests the upgrade method."
converter = StringConverter()
assert_equal(converter._status, 0)
# test int
assert_equal(converter.upgrade('0'), 0)
assert_equal(converter._status, 1)
# On systems where long defaults to 32-bit, the statuses will be
# offset by one, so we check for this here.
import numpy.core.numeric as nx
status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize)
# test int > 2**32
assert_equal(converter.upgrade('17179869184'), 17179869184)
assert_equal(converter._status, 1 + status_offset)
# test float
assert_allclose(converter.upgrade('0.'), 0.0)
assert_equal(converter._status, 2 + status_offset)
# test complex
assert_equal(converter.upgrade('0j'), complex('0j'))
assert_equal(converter._status, 3 + status_offset)
# test str
# note that the longdouble type has been skipped, so the
# _status increases by 2. Everything should succeed with
# unicode conversion (5).
for s in ['a', u'a', b'a']:
res = converter.upgrade(s)
assert_(type(res) is unicode)
assert_equal(res, u'a')
assert_equal(converter._status, 5 + status_offset)
示例9: __new__
def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None,
formats=None, names=None, titles=None,
byteorder=None, aligned=False):
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
if buf is None:
self = ndarray.__new__(subtype, shape, (record, descr))
else:
self = ndarray.__new__(subtype, shape, (record, descr),
buffer=buf, offset=offset,
strides=strides)
return self
示例10: __new__
def __new__(subtype, data, dtype=None, copy=True):
if isinstance(data, matrix):
dtype2 = data.dtype
if (dtype is None):
dtype = dtype2
if (dtype2 == dtype) and (not copy):
return data
return data.astype(dtype)
if isinstance(data, N.ndarray):
if dtype is None:
intype = data.dtype
else:
intype = N.dtype(dtype)
new = data.view(subtype)
if intype != data.dtype:
return new.astype(intype)
if copy: return new.copy()
else: return new
if isinstance(data, str):
data = _convert_from_string(data)
# now convert data to an array
arr = N.array(data, dtype=dtype, copy=copy)
ndim = arr.ndim
shape = arr.shape
if (ndim > 2):
raise ValueError("matrix must be 2-dimensional")
elif ndim == 0:
shape = (1, 1)
elif ndim == 1:
shape = (1, shape[0])
order = False
if (ndim == 2) and arr.flags.fortran:
order = True
if not (order or arr.flags.contiguous):
arr = arr.copy()
ret = N.ndarray.__new__(subtype, shape, arr.dtype,
buffer=arr,
order=order)
return ret
示例11: fromrecords
def fromrecords(recList, dtype=None, intNullVal=None):
""" This function was taken from np.core.records and updated to
support conversion null integers to intNullVal
"""
nfields = len(recList[0])
shape = None
descr = sb.dtype((np.core.records.record, dtype))
try:
retval = sb.array(recList, dtype=descr)
except TypeError: # list of lists instead of list of tuples
shape = (len(recList),)
_array = np.core.records.recarray(shape, descr)
try:
for k in range(_array.size):
_array[k] = tuple(recList[k])
except TypeError:
convs = []
ncols = len(dtype.fields)
for _k in dtype.names:
_v = dtype.fields[_k]
if _v[0] in [np.int16, np.int32, np.int64]:
convs.append(lambda x: intNullVal if x is None else x)
else:
convs.append(lambda x: x)
convs = tuple(convs)
convF = lambda x: [convs[_](x[_]) for _ in range(ncols)]
for k in range(k, _array.size):
try:
_array[k] = tuple(recList[k])
except TypeError:
_array[k] = tuple(convF(recList[k]))
return _array
else:
if shape is not None and retval.shape != shape:
retval.shape = shape
res = retval.view(numpy.core.records.recarray)
return res
示例12: fromstring
def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
""" create a (read-only) record array from binary data contained in
a string"""
if dtype is None and formats is None:
raise ValueError, "Must have dtype= or formats="
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
itemsize = descr.itemsize
if (shape is None or shape == 0 or shape == -1):
shape = (len(datastring)-offset) // itemsize
_array = recarray(shape, descr, buf=datastring, offset=offset)
return _array
示例13: array
def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None, copy=True):
"""Construct a record array from a wide-variety of objects.
"""
if isinstance(obj, (type(None), str, file)) and (formats is None) \
and (dtype is None):
raise ValueError("Must define formats (or dtype) if object is "\
"None, string, or an open file")
kwds = {}
if dtype is not None:
dtype = sb.dtype(dtype)
elif formats is not None:
dtype = format_parser(formats, names, titles,
aligned, byteorder)._descr
else:
kwds = {'formats': formats,
'names' : names,
'titles' : titles,
'aligned' : aligned,
'byteorder' : byteorder
}
if obj is None:
if shape is None:
raise ValueError("Must define a shape if obj is None")
return recarray(shape, dtype, buf=obj, offset=offset, strides=strides)
elif isinstance(obj, str):
return fromstring(obj, dtype, shape=shape, offset=offset, **kwds)
elif isinstance(obj, (list, tuple)):
if isinstance(obj[0], (tuple, list)):
return fromrecords(obj, dtype=dtype, shape=shape, **kwds)
else:
return fromarrays(obj, dtype=dtype, shape=shape, **kwds)
elif isinstance(obj, recarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
return new
elif isinstance(obj, file) or isinstance(obj, StringIO.StringIO):
return fromfile(obj, dtype=dtype, shape=shape, offset=offset)
elif isinstance(obj, ndarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
res = new.view(recarray)
if issubclass(res.dtype.type, nt.void):
res.dtype = sb.dtype((record, res.dtype))
return res
else:
interface = getattr(obj, "__array_interface__", None)
if interface is None or not isinstance(interface, dict):
raise ValueError("Unknown input type")
obj = sb.array(obj)
if dtype is not None and (obj.dtype != dtype):
obj = obj.view(dtype)
res = obj.view(recarray)
if issubclass(res.dtype.type, nt.void):
res.dtype = sb.dtype((record, res.dtype))
return res
示例14: fromtextfile
def fromtextfile(
fname, delimitor=None, commentchar="#", missingchar="", dates_column=None, varnames=None, vartypes=None, dates=None
):
"""Creates a multitimeseries from data stored in the file `filename`.
:Parameters:
- `filename` : file name/handle
Handle of an opened file.
- `delimitor` : Character *None*
Alphanumeric character used to separate columns in the file.
If None, any (group of) white spacestring(s) will be used.
- `commentchar` : String *['#']*
Alphanumeric character used to mark the start of a comment.
- `missingchar` : String *['']*
String indicating missing data, and used to create the masks.
- `datescol` : Integer *[None]*
Position of the columns storing dates. If None, a position will be
estimated from the variable names.
- `varnames` : Sequence *[None]*
Sequence of the variable names. If None, a list will be created from
the first non empty line of the file.
- `vartypes` : Sequence *[None]*
Sequence of the variables dtypes. If None, the sequence will be estimated
from the first non-commented line.
Ultra simple: the varnames are in the header, one line"""
# Try to open the file ......................
f = openfile(fname)
# Get the first non-empty line as the varnames
while True:
line = f.readline()
firstline = line[: line.find(commentchar)].strip()
_varnames = firstline.split(delimitor)
if len(_varnames) > 1:
break
if varnames is None:
varnames = _varnames
# Get the data ..............................
_variables = MA.asarray([line.strip().split(delimitor) for line in f if line[0] != commentchar and len(line) > 1])
(nvars, nfields) = _variables.shape
# Check if we need to get the dates..........
if dates_column is None:
dates_column = [i for (i, n) in enumerate(list(varnames)) if n.lower() in ["_dates", "dates"]]
elif isinstance(dates_column, (int, float)):
if dates_column > nfields:
raise ValueError, "Invalid column number: %i > %i" % (dates_column, nfields)
dates_column = [dates_column]
if len(dates_column) > 0:
cols = range(nfields)
[cols.remove(i) for i in dates_column]
newdates = date_array(_variables[:, dates_column[-1]])
_variables = _variables[:, cols]
varnames = [varnames[i] for i in cols]
if vartypes is not None:
vartypes = [vartypes[i] for i in cols]
nfields -= len(dates_column)
else:
newdates = None
# Try to guess the dtype ....................
if vartypes is None:
vartypes = _guessvartypes(_variables[0])
else:
vartypes = [numeric.dtype(v) for v in vartypes]
if len(vartypes) != nfields:
msg = "Attempting to %i dtypes for %i fields!"
msg += " Reverting to default."
warnings.warn(msg % (len(vartypes), nfields))
vartypes = _guessvartypes(_variables[0])
# Construct the descriptor ..................
mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)]
# Get the data and the mask .................
# We just need a list of masked_arrays. It's easier to create it like that:
_mask = _variables.T == missingchar
_datalist = [masked_array(a, mask=m, dtype=t) for (a, m, t) in zip(_variables.T, _mask, vartypes)]
#
newdates = __getdates(dates=dates, newdates=newdates, length=nvars, freq=None, start_date=None)
return MultiTimeSeries(_datalist, dates=newdates, dtype=mdescr)
示例15: fromfile
def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
"""Create an array from binary file data
If file is a string then that file is opened, else it is assumed
to be a file object.
>>> from tempfile import TemporaryFile
>>> a = N.empty(10,dtype='f8,i4,a5')
>>> a[5] = (0.5,10,'abcde')
>>>
>>> fd=TemporaryFile()
>>> a = a.newbyteorder('<')
>>> a.tofile(fd)
>>>
>>> fd.seek(0)
>>> r=fromfile(fd, formats='f8,i4,a5', shape=10, byteorder='<')
>>> print r[5]
(0.5, 10, 'abcde')
>>> r.shape
(10,)
"""
if (shape is None or shape == 0):
shape = (-1,)
elif isinstance(shape, (int, long)):
shape = (shape,)
name = 0
if isinstance(fd, str):
name = 1
fd = open(fd, 'rb')
if (offset > 0):
fd.seek(offset, 1)
size = get_remaining_size(fd)
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
itemsize = descr.itemsize
shapeprod = sb.array(shape).prod()
shapesize = shapeprod*itemsize
if shapesize < 0:
shape = list(shape)
shape[ shape.index(-1) ] = size // -shapesize
shape = tuple(shape)
shapeprod = sb.array(shape).prod()
nbytes = shapeprod*itemsize
if nbytes > size:
raise ValueError(
"Not enough bytes left in file for specified shape and type")
# create the array
if isinstance (fd, file):
arr = np.fromfile(fd,dtype=descr,count=shape[0])
else:
read_size = np.dtype(descr).itemsize * shape[0]
st=fd.read(read_size)
arr = np.fromstring(st, dtype=descr, count=shape[0])
# TODO: There was a problem with large arrays, don't fully understand
# but this is more efficient anyway
#_array = recarray(shape, descr, arr.data)
_array = arr.view(recarray)
if name:
fd.close()
return _array