本文整理汇总了Python中numpy.compat.asbytes函数的典型用法代码示例。如果您正苦于以下问题:Python asbytes函数的具体用法?Python asbytes怎么用?Python asbytes使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了asbytes函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: str2bool
def str2bool(value):
"""
Tries to transform a string supposed to represent a boolean to a boolean.
Parameters
----------
value : str
The string that is transformed to a boolean.
Returns
-------
boolval : bool
The boolean representation of `value`.
Raises
------
ValueError
If the string is not 'True' or 'False' (case independent)
Examples
--------
>>> np.lib._iotools.str2bool('TRUE')
True
>>> np.lib._iotools.str2bool('false')
False
"""
value = value.upper()
if value == asbytes('TRUE'):
return True
elif value == asbytes('FALSE'):
return False
else:
raise ValueError("Invalid boolean")
示例2: nc_generator
def nc_generator(ncfile, input):
""" an iteration-based approach to nc_streamer above
input should be an iterable of numpy arrays with which to
fill the netcdf file
Examples
--------
>>> from itertools import chain
>>> import numpy
>>> nc = netcdf_file(None)
>>> # add attributes, dimensions and variables to the netcdf_file object
>>> def input():
>>> yield numpy.random(100, 100)
>>> def more_input():
>>> yield numpy.arange(10000).reshape(100, 100)
>>> pipeline = nc_generator(nc, chain(input, more_input))
>>> f = open('foo.nc', 'w')
>>> for block in pipeline:
>>> f.write(block)
"""
input = check_byteorder(input)
assert type(ncfile) == netcdf_file
count = 0
ncfile._calc_begins()
yield ncfile._header()
count += len(ncfile._header())
try:
if ncfile.variables and ncfile.non_recvars:
for name, var in ncfile.non_recvars.items():
end = var._begin + var._vsize if var.dimensions else var._begin
while count < end:
data = input.next()
bytes = data.tostring()
count += len(bytes)
# padding
if end - count < data.itemsize:
bytes += asbytes("0") * (end - count)
count = end
yield bytes
# Record variables... keep taking data until it stops coming (i.e. a StopIteration is raised)
if ncfile.variables and ncfile.recvars:
while True:
vars = ncfile.recvars.values()
while True:
for var in vars:
data = input.next()
bytes = data.tostring()
yield bytes
padding = len(bytes) % 4
if padding:
yield asbytes("0") * padding
count += len(bytes) + padding
except StopIteration:
pass
示例3: test_space_delimiter
def test_space_delimiter(self):
"Test space delimiter"
strg = asbytes(" 1 2 3 4 5 # test")
test = LineSplitter(asbytes(" "))(strg)
assert_equal(test, asbytes_nested(["1", "2", "3", "4", "", "5"]))
test = LineSplitter(asbytes(" "))(strg)
assert_equal(test, asbytes_nested(["1 2 3 4", "5"]))
示例4: test_bad_header
def test_bad_header():
# header of length less than 2 should fail
s = BytesIO()
assert_raises(ValueError, format.read_array_header_1_0, s)
s = BytesIO(asbytes('1'))
assert_raises(ValueError, format.read_array_header_1_0, s)
# header shorter than indicated size should fail
s = BytesIO(asbytes('\x01\x00'))
assert_raises(ValueError, format.read_array_header_1_0, s)
# headers without the exact keys required should fail
d = {"shape": (1, 2),
"descr": "x"}
s = BytesIO()
format.write_array_header_1_0(s, d)
assert_raises(ValueError, format.read_array_header_1_0, s)
d = {"shape": (1, 2),
"fortran_order": False,
"descr": "x",
"extrakey": -1}
s = BytesIO()
format.write_array_header_1_0(s, d)
assert_raises(ValueError, format.read_array_header_1_0, s)
示例5: _write_var_data
def _write_var_data(self, name):
var = self.variables[name]
# Set begin in file header.
the_beguine = self.fp.tell()
self.fp.seek(var._begin)
self._pack_begin(the_beguine)
self.fp.seek(the_beguine)
# Write data.
if not var.isrec:
self.fp.write(var.data.tostring())
count = var.data.size * var.data.itemsize
self.fp.write(asbytes('0') * (var._vsize - count))
else: # record variable
# Handle rec vars with shape[0] < nrecs.
if self._recs > len(var.data):
shape = (self._recs,) + var.data.shape[1:]
var.data.resize(shape)
pos0 = pos = self.fp.tell()
for rec in var.data:
# Apparently scalars cannot be converted to big endian. If we
# try to convert a ``=i4`` scalar to, say, '>i4' the dtype
# will remain as ``=i4``.
if not rec.shape and (rec.dtype.byteorder == '<' or
(rec.dtype.byteorder == '=' and LITTLE_ENDIAN)):
rec = rec.byteswap()
self.fp.write(rec.tostring())
# Padding
count = rec.size * rec.itemsize
self.fp.write(asbytes('0') * (var._vsize - count))
pos += self._recsize
self.fp.seek(pos)
self.fp.seek(pos0 + var._vsize)
示例6: gen_for_simple
def gen_for_simple(ncfileobj):
''' Generator for example fileobj tests '''
yield assert_equal, ncfileobj.history, asbytes('Created for a test')
time = ncfileobj.variables['time']
yield assert_equal, time.units, asbytes('days since 2008-01-01')
yield assert_equal, time.shape, (N_EG_ELS,)
yield assert_equal, time[-1], N_EG_ELS-1
示例7: test_method_array
def test_method_array(self):
r = np.rec.array(
asbytes('abcdefg') * 100,
formats='i2,a3,i4',
shape=3,
byteorder='big')
assert_equal(r[1].item(), (25444, asbytes('efg'), 1633837924))
示例8: writeout
def writeout(self, f=None):
"""write all the dump items and the summary out to file(s)
Parameters
----------
f : filename or filehandle
If specified then all summary and object data will go in one file.
If None is specified then type specific files will be generated
in the dump_dir
If a filehandle is specified then it must be a byte mode file
as numpy.savetxt is used, and requires this.
"""
fall = None
# If specific file given then write everything to it
if hasattr(f, 'write'):
if not 'b' in f.mode:
raise RuntimeError("File stream must be in binary mode")
# write all to this stream
fall = f
fs = f
closefall = False
closefs = False
elif f:
# Assume f is a filename
fall = open(f, 'wb')
fs = fall
closefs = False
closefall = True
else:
self.create_dump_dir()
closefall = False
if self.dump_summary:
fs = open(self.summary_file, 'wb')
closefs = True
if self.dump_summary:
for ecs in self.evo_summary:
if ecs.idx == 0:
fs.write(asbytes("{}\n{}\n".format(
ecs.get_header_line(self.summary_sep),
ecs.get_value_line(self.summary_sep))))
else:
fs.write(asbytes("{}\n".format(
ecs.get_value_line(self.summary_sep))))
if closefs:
fs.close()
logger.info("Dynamics dump summary saved to {}".format(
self.summary_file))
for di in self.evo_dumps:
di.writeout(fall)
if closefall:
fall.close()
logger.info("Dynamics dump saved to {}".format(f))
else:
if fall:
logger.info("Dynamics dump saved to specified stream")
else:
logger.info("Dynamics dump saved to {}".format(self.dump_dir))
示例9: test_int64_dtype
def test_int64_dtype(self):
"Check that int64 integer types can be specified"
converter = StringConverter(np.int64, default=0)
val = asbytes("-9223372036854775807")
assert_(converter(val) == -9223372036854775807)
val = asbytes("9223372036854775807")
assert_(converter(val) == 9223372036854775807)
示例10: gen_for_simple
def gen_for_simple(ncfileobj):
""" Generator for example fileobj tests """
yield assert_equal, ncfileobj.history, asbytes("Created for a test")
time = ncfileobj.variables["time"]
yield assert_equal, time.units, asbytes("days since 2008-01-01")
yield assert_equal, time.shape, (N_EG_ELS,)
yield assert_equal, time[-1], N_EG_ELS - 1
示例11: test_space_delimiter
def test_space_delimiter(self):
"Test space delimiter"
strg = asbytes(" 1 2 3 4 5 # test")
test = LineSplitter(asbytes(' '))(strg)
assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5']))
test = LineSplitter(asbytes(' '))(strg)
assert_equal(test, asbytes_nested(['1 2 3 4', '5']))
示例12: test_recarray_returntypes
def test_recarray_returntypes(self):
qux_fields = {'C': (np.dtype('S5'), 0), 'D': (np.dtype('S5'), 6)}
a = np.rec.array([('abc ', (1,1), 1, ('abcde', 'fgehi')),
('abc', (2,3), 1, ('abcde', 'jklmn'))],
dtype=[('foo', 'S4'),
('bar', [('A', int), ('B', int)]),
('baz', int), ('qux', qux_fields)])
assert_equal(type(a.foo), np.ndarray)
assert_equal(type(a['foo']), np.ndarray)
assert_equal(type(a.bar), np.recarray)
assert_equal(type(a['bar']), np.recarray)
assert_equal(a.bar.dtype.type, np.record)
assert_equal(type(a['qux']), np.recarray)
assert_equal(a.qux.dtype.type, np.record)
assert_equal(dict(a.qux.dtype.fields), qux_fields)
assert_equal(type(a.baz), np.ndarray)
assert_equal(type(a['baz']), np.ndarray)
assert_equal(type(a[0].bar), np.record)
assert_equal(type(a[0]['bar']), np.record)
assert_equal(a[0].bar.A, 1)
assert_equal(a[0].bar['A'], 1)
assert_equal(a[0]['bar'].A, 1)
assert_equal(a[0]['bar']['A'], 1)
assert_equal(a[0].qux.D, asbytes('fgehi'))
assert_equal(a[0].qux['D'], asbytes('fgehi'))
assert_equal(a[0]['qux'].D, asbytes('fgehi'))
assert_equal(a[0]['qux']['D'], asbytes('fgehi'))
示例13: save_nparray_to_hdfs
def save_nparray_to_hdfs(fname, X, hdfs):
'''
An instance of numpy's savetext function to enable saving numpy
arrays in HDFS as text files
'''
fmt = '%.18e'
delimiter = ' '
newline = '\n'
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
X = np.asarray(X)
if X.ndim == 1:
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
n_fmt_chars = fmt.count('%')
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
first = True
for row in X:
if first:
hdfs.create_file(fname, asbytes(format % tuple(row) + newline), overwrite=True)
first = False
else:
hdfs.appendfile(fname, asbytes(format % tuple(row) + newline))
示例14: test_tab_delimiter
def test_tab_delimiter(self):
"Test tab delimiter"
strg = asbytes(" 1\t 2\t 3\t 4\t 5 6")
test = LineSplitter(asbytes('\t'))(strg)
assert_equal(test, asbytes_nested(['1', '2', '3', '4', '5 6']))
strg = asbytes(" 1 2\t 3 4\t 5 6")
test = LineSplitter(asbytes('\t'))(strg)
assert_equal(test, asbytes_nested(['1 2', '3 4', '5 6']))
示例15: test_variable_fixed_width
def test_variable_fixed_width(self):
strg = asbytes(" 1 3 4 5 6# test")
test = LineSplitter((3, 6, 6, 3))(strg)
assert_equal(test, asbytes_nested(['1', '3', '4 5', '6']))
#
strg = asbytes(" 1 3 4 5 6# test")
test = LineSplitter((6, 6, 9))(strg)
assert_equal(test, asbytes_nested(['1', '3 4', '5 6']))