本文整理汇总了Python中pyarrow.float32函数的典型用法代码示例。如果您正苦于以下问题:Python float32函数的具体用法?Python float32怎么用?Python float32使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了float32函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_struct_array_field
def test_struct_array_field():
ty = pa.struct([pa.field('x', pa.int16()),
pa.field('y', pa.float32())])
a = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty)
x0 = a.field(0)
y0 = a.field(1)
x1 = a.field(-2)
y1 = a.field(-1)
x2 = a.field('x')
y2 = a.field('y')
assert isinstance(x0, pa.lib.Int16Array)
assert isinstance(y1, pa.lib.FloatArray)
assert x0.equals(pa.array([1, 3, 5], type=pa.int16()))
assert y0.equals(pa.array([2.5, 4.5, 6.5], type=pa.float32()))
assert x0.equals(x1)
assert x0.equals(x2)
assert y0.equals(y1)
assert y0.equals(y2)
for invalid_index in [None, pa.int16()]:
with pytest.raises(TypeError):
a.field(invalid_index)
for invalid_index in [3, -3]:
with pytest.raises(IndexError):
a.field(invalid_index)
for invalid_name in ['z', '']:
with pytest.raises(KeyError):
a.field(invalid_name)
示例2: test_convert_options
def test_convert_options():
cls = ConvertOptions
opts = cls()
assert opts.check_utf8 is True
opts.check_utf8 = False
assert opts.check_utf8 is False
assert opts.strings_can_be_null is False
opts.strings_can_be_null = True
assert opts.strings_can_be_null is True
assert opts.column_types == {}
# Pass column_types as mapping
opts.column_types = {'b': pa.int16(), 'c': pa.float32()}
assert opts.column_types == {'b': pa.int16(), 'c': pa.float32()}
opts.column_types = {'v': 'int16', 'w': 'null'}
assert opts.column_types == {'v': pa.int16(), 'w': pa.null()}
# Pass column_types as schema
schema = pa.schema([('a', pa.int32()), ('b', pa.string())])
opts.column_types = schema
assert opts.column_types == {'a': pa.int32(), 'b': pa.string()}
# Pass column_types as sequence
opts.column_types = [('x', pa.binary())]
assert opts.column_types == {'x': pa.binary()}
with pytest.raises(TypeError, match='DataType expected'):
opts.column_types = {'a': None}
with pytest.raises(TypeError):
opts.column_types = 0
assert isinstance(opts.null_values, list)
assert '' in opts.null_values
assert 'N/A' in opts.null_values
opts.null_values = ['xxx', 'yyy']
assert opts.null_values == ['xxx', 'yyy']
assert isinstance(opts.true_values, list)
opts.true_values = ['xxx', 'yyy']
assert opts.true_values == ['xxx', 'yyy']
assert isinstance(opts.false_values, list)
opts.false_values = ['xxx', 'yyy']
assert opts.false_values == ['xxx', 'yyy']
opts = cls(check_utf8=False, column_types={'a': pa.null()},
null_values=['N', 'nn'], true_values=['T', 'tt'],
false_values=['F', 'ff'], strings_can_be_null=True)
assert opts.check_utf8 is False
assert opts.column_types == {'a': pa.null()}
assert opts.null_values == ['N', 'nn']
assert opts.false_values == ['F', 'ff']
assert opts.true_values == ['T', 'tt']
assert opts.strings_can_be_null is True
示例3: test_dictionary_type
def test_dictionary_type():
ty0 = pa.dictionary(pa.int32(), pa.array(['a', 'b', 'c']))
assert ty0.index_type == pa.int32()
assert isinstance(ty0.dictionary, pa.Array)
assert ty0.dictionary.to_pylist() == ['a', 'b', 'c']
assert ty0.ordered is False
ty1 = pa.dictionary(pa.float32(), pa.array([1.0, 2.0]), ordered=True)
assert ty1.index_type == pa.float32()
assert isinstance(ty0.dictionary, pa.Array)
assert ty1.dictionary.to_pylist() == [1.0, 2.0]
assert ty1.ordered is True
示例4: test_column_flatten
def test_column_flatten():
ty = pa.struct([pa.field('x', pa.int16()),
pa.field('y', pa.float32())])
a = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty)
col = pa.Column.from_array('foo', a)
x, y = col.flatten()
assert x == pa.column('foo.x', pa.array([1, 3, 5], type=pa.int16()))
assert y == pa.column('foo.y', pa.array([2.5, 4.5, 6.5],
type=pa.float32()))
# Empty column
a = pa.array([], type=ty)
col = pa.Column.from_array('foo', a)
x, y = col.flatten()
assert x == pa.column('foo.x', pa.array([], type=pa.int16()))
assert y == pa.column('foo.y', pa.array([], type=pa.float32()))
示例5: test_float32_integer_coerce_representable_range
def test_float32_integer_coerce_representable_range():
f32 = np.float32
valid_values = [f32(1.5), 1 << 24, -(1 << 24)]
invalid_values = [f32(1.5), (1 << 24) + 1]
invalid_values2 = [f32(1.5), -((1 << 24) + 1)]
# it works
pa.array(valid_values, type=pa.float32())
# it fails
with pytest.raises(ValueError):
pa.array(invalid_values, type=pa.float32())
with pytest.raises(ValueError):
pa.array(invalid_values2, type=pa.float32())
示例6: test_struct_array_slice
def test_struct_array_slice():
# ARROW-2311: slicing nested arrays needs special care
ty = pa.struct([pa.field('a', pa.int8()),
pa.field('b', pa.float32())])
arr = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty)
assert arr[1:].to_pylist() == [{'a': 3, 'b': 4.5},
{'a': 5, 'b': 6.5}]
示例7: test_empty_cast
def test_empty_cast():
types = [
pa.null(),
pa.bool_(),
pa.int8(),
pa.int16(),
pa.int32(),
pa.int64(),
pa.uint8(),
pa.uint16(),
pa.uint32(),
pa.uint64(),
pa.float16(),
pa.float32(),
pa.float64(),
pa.date32(),
pa.date64(),
pa.binary(),
pa.binary(length=4),
pa.string(),
]
for (t1, t2) in itertools.product(types, types):
try:
# ARROW-4766: Ensure that supported types conversion don't segfault
# on empty arrays of common types
pa.array([], type=t1).cast(t2)
except pa.lib.ArrowNotImplementedError:
continue
示例8: test_float_nulls
def test_float_nulls(self):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = [('f4', pa.float32()), ('f8', pa.float64())]
names = ['f4', 'f8']
expected_cols = []
arrays = []
fields = []
for name, arrow_dtype in dtypes:
values = np.random.randn(num_values).astype(name)
arr = pa.array(values, from_pandas=True, mask=null_mask)
arrays.append(arr)
fields.append(pa.field(name, arrow_dtype))
values[null_mask] = np.nan
expected_cols.append(values)
ex_frame = pd.DataFrame(dict(zip(names, expected_cols)),
columns=names)
table = pa.Table.from_arrays(arrays, names)
assert table.schema.equals(pa.schema(fields))
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
示例9: test_type_to_pandas_dtype
def test_type_to_pandas_dtype():
M8_ns = np.dtype('datetime64[ns]')
cases = [
(pa.null(), np.float64),
(pa.bool_(), np.bool_),
(pa.int8(), np.int8),
(pa.int16(), np.int16),
(pa.int32(), np.int32),
(pa.int64(), np.int64),
(pa.uint8(), np.uint8),
(pa.uint16(), np.uint16),
(pa.uint32(), np.uint32),
(pa.uint64(), np.uint64),
(pa.float16(), np.float16),
(pa.float32(), np.float32),
(pa.float64(), np.float64),
(pa.date32(), M8_ns),
(pa.date64(), M8_ns),
(pa.timestamp('ms'), M8_ns),
(pa.binary(), np.object_),
(pa.binary(12), np.object_),
(pa.string(), np.object_),
(pa.list_(pa.int8()), np.object_),
]
for arrow_type, numpy_type in cases:
assert arrow_type.to_pandas_dtype() == numpy_type
示例10: test_orcfile_empty
def test_orcfile_empty():
from pyarrow import orc
f = orc.ORCFile(path_for_orc_example('TestOrcFile.emptyFile'))
table = f.read()
assert table.num_rows == 0
schema = table.schema
expected_schema = pa.schema([
('boolean1', pa.bool_()),
('byte1', pa.int8()),
('short1', pa.int16()),
('int1', pa.int32()),
('long1', pa.int64()),
('float1', pa.float32()),
('double1', pa.float64()),
('bytes1', pa.binary()),
('string1', pa.string()),
('middle', pa.struct([
('list', pa.list_(pa.struct([
('int1', pa.int32()),
('string1', pa.string()),
]))),
])),
('list', pa.list_(pa.struct([
('int1', pa.int32()),
('string1', pa.string()),
]))),
('map', pa.list_(pa.struct([
('key', pa.string()),
('value', pa.struct([
('int1', pa.int32()),
('string1', pa.string()),
])),
]))),
])
assert schema == expected_schema
示例11: test_is_integer
def test_is_integer():
signed_ints = [pa.int8(), pa.int16(), pa.int32(), pa.int64()]
unsigned_ints = [pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()]
for t in signed_ints + unsigned_ints:
assert types.is_integer(t)
for t in signed_ints:
assert types.is_signed_integer(t)
assert not types.is_unsigned_integer(t)
for t in unsigned_ints:
assert types.is_unsigned_integer(t)
assert not types.is_signed_integer(t)
assert not types.is_integer(pa.float32())
assert not types.is_signed_integer(pa.float32())
示例12: test_table_flatten
def test_table_flatten():
ty1 = pa.struct([pa.field('x', pa.int16()),
pa.field('y', pa.float32())])
ty2 = pa.struct([pa.field('nest', ty1)])
a = pa.array([(1, 2.5), (3, 4.5)], type=ty1)
b = pa.array([((11, 12.5),), ((13, 14.5),)], type=ty2)
c = pa.array([False, True], type=pa.bool_())
table = pa.Table.from_arrays([a, b, c], names=['a', 'b', 'c'])
t2 = table.flatten()
t2._validate()
expected = pa.Table.from_arrays([
pa.array([1, 3], type=pa.int16()),
pa.array([2.5, 4.5], type=pa.float32()),
pa.array([(11, 12.5), (13, 14.5)], type=ty1),
c],
names=['a.x', 'a.y', 'b.nest', 'c'])
assert t2.equals(expected)
示例13: test_mixed_sequence_errors
def test_mixed_sequence_errors():
with pytest.raises(ValueError, match="tried to convert to boolean"):
pa.array([True, 'foo'], type=pa.bool_())
with pytest.raises(ValueError, match="tried to convert to float32"):
pa.array([1.5, 'foo'], type=pa.float32())
with pytest.raises(ValueError, match="tried to convert to double"):
pa.array([1.5, 'foo'])
示例14: dataframe_with_arrays
def dataframe_with_arrays(include_index=False):
"""
Dataframe with numpy arrays columns of every possible primtive type.
Returns
-------
df: pandas.DataFrame
schema: pyarrow.Schema
Arrow schema definition that is in line with the constructed df.
"""
dtypes = [('i1', pa.int8()), ('i2', pa.int16()),
('i4', pa.int32()), ('i8', pa.int64()),
('u1', pa.uint8()), ('u2', pa.uint16()),
('u4', pa.uint32()), ('u8', pa.uint64()),
('f4', pa.float32()), ('f8', pa.float64())]
arrays = OrderedDict()
fields = []
for dtype, arrow_dtype in dtypes:
fields.append(pa.field(dtype, pa.list_(arrow_dtype)))
arrays[dtype] = [
np.arange(10, dtype=dtype),
np.arange(5, dtype=dtype),
None,
np.arange(1, dtype=dtype)
]
fields.append(pa.field('str', pa.list_(pa.string())))
arrays['str'] = [
np.array([u"1", u"ä"], dtype="object"),
None,
np.array([u"1"], dtype="object"),
np.array([u"1", u"2", u"3"], dtype="object")
]
fields.append(pa.field('datetime64', pa.list_(pa.timestamp('ms'))))
arrays['datetime64'] = [
np.array(['2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
None,
None,
np.array(['2007-07-13T02',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
]
if include_index:
fields.append(pa.field('__index_level_0__', pa.int64()))
df = pd.DataFrame(arrays)
schema = pa.schema(fields)
return df, schema
示例15: json_to_parquet
def json_to_parquet(data, output, schema):
column_data = {}
array_data = []
for row in data:
for column in schema.names:
_col = column_data.get(column, [])
_col.append(row.get(column))
column_data[column] = _col
for column in schema:
_col = column_data.get(column.name)
if isinstance(column.type, pa.lib.TimestampType):
_converted_col = []
for t in _col:
try:
_converted_col.append(pd.to_datetime(t))
except pd._libs.tslib.OutOfBoundsDatetime:
_converted_col.append(pd.Timestamp.max)
array_data.append(pa.Array.from_pandas(pd.to_datetime(_converted_col), type=pa.timestamp('ms')))
# Float types are ambiguous for conversions, need to specify the exact type
elif column.type.id == pa.float64().id:
array_data.append(pa.array(_col, type=pa.float64()))
elif column.type.id == pa.float32().id:
# Python doesn't have a native float32 type
# and PyArrow cannot cast float64 -> float32
_col = pd.to_numeric(_col, downcast='float')
array_data.append(pa.Array.from_pandas(_col, type=pa.float32()))
elif column.type.id == pa.int64().id:
array_data.append(pa.array([int(ele) for ele in _col], type=pa.int64()))
else:
array_data.append(pa.array(_col, type=column.type))
data = pa.RecordBatch.from_arrays(array_data, schema.names)
try:
table = pa.Table.from_batches(data)
except TypeError:
table = pa.Table.from_batches([data])
pq.write_table(table, output, compression='SNAPPY', coerce_timestamps='ms')