本文整理汇总了Python中pyarrow.float32方法的典型用法代码示例。如果您正苦于以下问题:Python pyarrow.float32方法的具体用法?Python pyarrow.float32怎么用?Python pyarrow.float32使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyarrow
的用法示例。
在下文中一共展示了pyarrow.float32方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_load_empty_table_arrow
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import float32 [as 别名]
def test_load_empty_table_arrow(self, con):
con.execute("drop table if exists baz;")
con.execute("create table baz (a int, b float, c text);")
data = [(1, 1.1, 'a'), (2, 2.2, '2'), (3, 3.3, '3')]
df = pd.DataFrame(data, columns=list('abc')).astype(
{'a': 'int32', 'b': 'float32'}
)
table = pa.Table.from_pandas(df, preserve_index=False)
con.load_table("baz", table, method='arrow')
result = sorted(con.execute("select * from baz"))
self.check_empty_insert(result, data)
con.execute("drop table if exists baz;")
示例2: array_chunked_nulls
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import float32 [as 别名]
def array_chunked_nulls(request):
case_dict = {
"all": pa.chunked_array([pa.array([None] * 4) for _ in range(10)]),
"all_float": pa.chunked_array(
[pa.array([None] * 4, type=pa.float32()) for _ in range(10)]
),
"some_in_all_chunks": pa.chunked_array(
[pa.array(["a", "b", None] * 4), pa.array(["a", None, "b"] * 4)]
),
"only_in_some_chunk": pa.chunked_array(
[
pa.array(["a", "x"]),
pa.array(["a", "b", None] * 4),
pa.array(["a", "b"] * 4),
]
),
"none": pa.chunked_array([pa.array(["a", "b"] * 4) for _ in range(10)]),
}
return case_dict[request.param]
# ----------------------------------------------------------------------------
# Block Methods
# ----------------------------------------------------------------------------
示例3: test_get_flattened_array_parent_indices
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import float32 [as 别名]
def test_get_flattened_array_parent_indices(self, list_type_factory,
parent_indices_type):
indices = array_util.GetFlattenedArrayParentIndices(
pa.array([], type=list_type_factory(pa.int32())))
self.assertTrue(indices.equals(pa.array([], type=parent_indices_type)))
indices = array_util.GetFlattenedArrayParentIndices(
pa.array([[1.], [2.], [], [3., 4.]],
type=list_type_factory(pa.float32())))
self.assertTrue(
indices.equals(pa.array([0, 1, 3, 3], type=parent_indices_type)))
indices = array_util.GetFlattenedArrayParentIndices(
pa.array([[1.], [2.], [], [3., 4.]],
type=list_type_factory(pa.float32())).slice(1))
self.assertTrue(
indices.equals(pa.array([0, 2, 2], type=parent_indices_type)))
indices = array_util.GetFlattenedArrayParentIndices(
pa.array([list(range(1024))],
type=list_type_factory(pa.int64())))
self.assertTrue(
indices.equals(pa.array([0] * 1024, type=parent_indices_type)))
示例4: _get_numeric_byte_size_test_cases
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import float32 [as 别名]
def _get_numeric_byte_size_test_cases():
result = []
for array_type, sizeof in [
(pa.int8(), 1),
(pa.uint8(), 1),
(pa.int16(), 2),
(pa.uint16(), 2),
(pa.int32(), 4),
(pa.uint32(), 4),
(pa.int64(), 8),
(pa.uint64(), 8),
(pa.float32(), 4),
(pa.float64(), 8),
]:
result.append(
dict(
testcase_name=str(array_type),
array=pa.array(range(9), type=array_type),
slice_offset=2,
slice_length=3,
expected_size=(_all_false_null_bitmap_size(2) + sizeof * 9),
expected_sliced_size=(_all_false_null_bitmap_size(1) + sizeof * 3)))
return result
示例5: _GetExpectedColumnValues
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import float32 [as 别名]
def _GetExpectedColumnValues(tfxio):
if tfxio._can_produce_large_types:
list_factory = pa.large_list
bytes_type = pa.large_binary()
else:
list_factory = pa.list_
bytes_type = pa.binary()
return {
path.ColumnPath(["int_feature"]):
pa.array([[1], [2], [3]], type=list_factory(pa.int64())),
path.ColumnPath(["float_feature"]):
pa.array([[1, 2, 3, 4], [2, 3, 4, 5], None],
type=list_factory(pa.float32())),
path.ColumnPath([_SEQUENCE_COLUMN_NAME, "int_feature"]):
pa.array([[[1, 2], [3]], None, [[4]]],
list_factory(list_factory(pa.int64()))),
path.ColumnPath([_SEQUENCE_COLUMN_NAME, "string_feature"]):
pa.array([None, [[b"foo", b"bar"], []], [[b"baz"]]],
list_factory(list_factory(bytes_type)))
}
示例6: get_pyarrow_types
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import float32 [as 别名]
def get_pyarrow_types():
return {
'bool': PA_BOOL,
'float32': PA_FLOAT32,
'float64': PA_FLOAT64,
'int8': PA_INT8,
'int16': PA_INT16,
'int32': PA_INT32,
'int64': PA_INT64,
'string': PA_STRING,
'timestamp': PA_TIMESTAMP,
'base64': PA_BINARY
}
# pylint: disable=too-many-branches,too-many-statements
示例7: setUp
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import float32 [as 别名]
def setUp(self):
self.sa_meta = sa.MetaData()
self.data = [
[17.124, 1.12, 3.14, 13.37],
[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4],
[True, None, False, True],
['string 1', 'string 2', None, 'string 3'],
[datetime(2007, 7, 13, 1, 23, 34, 123456),
None,
datetime(2006, 1, 13, 12, 34, 56, 432539),
datetime(2010, 8, 13, 5, 46, 57, 437699), ],
["Test Text", "Some#More#Test# Text", "!@#$%%^&*&", None],
]
self.table = sa.Table(
'unit_test_table',
self.sa_meta,
sa.Column('real_col', sa.REAL),
sa.Column('bigint_col', sa.BIGINT),
sa.Column('int_col', sa.INTEGER),
sa.Column('smallint_col', sa.SMALLINT),
sa.Column('bool_col', sa.BOOLEAN),
sa.Column('str_col', sa.VARCHAR),
sa.Column('timestamp_col', sa.TIMESTAMP),
sa.Column('plaintext_col', sa.TEXT),
)
self.expected_datatypes = [
pa.float32(),
pa.int64(),
pa.int32(),
pa.int16(),
pa.bool_(),
pa.string(),
pa.timestamp('ns'),
pa.string(),
]
示例8: test_dict_to_spark_row_field_validation_ndarrays
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import float32 [as 别名]
def test_dict_to_spark_row_field_validation_ndarrays():
"""Test various validations done on data types when converting a dictionary to a spark row"""
TestSchema = Unischema('TestSchema', [
UnischemaField('tensor3d', np.float32, (10, 20, 30), NdarrayCodec(), False),
])
assert isinstance(dict_to_spark_row(TestSchema, {'tensor3d': np.zeros((10, 20, 30), dtype=np.float32)}), Row)
# Null value into not nullable field
with pytest.raises(ValueError):
isinstance(dict_to_spark_row(TestSchema, {'string_field': None}), Row)
# Wrong dimensions
with pytest.raises(ValueError):
isinstance(dict_to_spark_row(TestSchema, {'string_field': np.zeros((1, 2, 3), dtype=np.float32)}), Row)
示例9: test_make_named_tuple
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import float32 [as 别名]
def test_make_named_tuple():
TestSchema = Unischema('TestSchema', [
UnischemaField('string_scalar', np.string_, (), ScalarCodec(StringType()), True),
UnischemaField('int32_scalar', np.int32, (), ScalarCodec(ShortType()), False),
UnischemaField('uint8_scalar', np.uint8, (), ScalarCodec(ShortType()), False),
UnischemaField('int32_matrix', np.float32, (10, 20, 3), NdarrayCodec(), True),
UnischemaField('decimal_scalar', Decimal, (10, 20, 3), ScalarCodec(DecimalType(10, 9)), False),
])
TestSchema.make_namedtuple(string_scalar='abc', int32_scalar=10, uint8_scalar=20,
int32_matrix=np.int32((10, 20, 3)), decimal_scalar=Decimal(123) / Decimal(10))
TestSchema.make_namedtuple(string_scalar=None, int32_scalar=10, uint8_scalar=20,
int32_matrix=None, decimal_scalar=Decimal(123) / Decimal(10))
示例10: test_arrow_schema_convertion
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import float32 [as 别名]
def test_arrow_schema_convertion():
fields = [
pa.field('string', pa.string()),
pa.field('int8', pa.int8()),
pa.field('int16', pa.int16()),
pa.field('int32', pa.int32()),
pa.field('int64', pa.int64()),
pa.field('float', pa.float32()),
pa.field('double', pa.float64()),
pa.field('bool', pa.bool_(), False),
pa.field('fixed_size_binary', pa.binary(10)),
pa.field('variable_size_binary', pa.binary()),
pa.field('decimal', pa.decimal128(3, 4)),
pa.field('timestamp_s', pa.timestamp('s')),
pa.field('timestamp_ns', pa.timestamp('ns')),
pa.field('date_32', pa.date32()),
pa.field('date_64', pa.date64())
]
arrow_schema = pa.schema(fields)
mock_dataset = _mock_parquet_dataset([], arrow_schema)
unischema = Unischema.from_arrow_schema(mock_dataset)
for name in arrow_schema.names:
assert getattr(unischema, name).name == name
assert getattr(unischema, name).codec is None
if name == 'bool':
assert not getattr(unischema, name).nullable
else:
assert getattr(unischema, name).nullable
# Test schema preserve fields order
field_name_list = [f.name for f in fields]
assert list(unischema.fields.keys()) == field_name_list
示例11: to_arrow_type
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import float32 [as 别名]
def to_arrow_type(dt):
""" Convert Spark data type to pyarrow type
"""
from distutils.version import LooseVersion
import pyarrow as pa
if type(dt) == BooleanType:
arrow_type = pa.bool_()
elif type(dt) == ByteType:
arrow_type = pa.int8()
elif type(dt) == ShortType:
arrow_type = pa.int16()
elif type(dt) == IntegerType:
arrow_type = pa.int32()
elif type(dt) == LongType:
arrow_type = pa.int64()
elif type(dt) == FloatType:
arrow_type = pa.float32()
elif type(dt) == DoubleType:
arrow_type = pa.float64()
elif type(dt) == DecimalType:
arrow_type = pa.decimal128(dt.precision, dt.scale)
elif type(dt) == StringType:
arrow_type = pa.string()
elif type(dt) == BinaryType:
# TODO: remove version check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt) +
"\nPlease install pyarrow >= 0.10.0 for BinaryType support.")
arrow_type = pa.binary()
elif type(dt) == DateType:
arrow_type = pa.date32()
elif type(dt) == TimestampType:
# Timestamps should be in UTC, JVM Arrow timestamps require a timezone to be read
arrow_type = pa.timestamp('us', tz='UTC')
elif type(dt) == ArrayType:
if type(dt.elementType) == TimestampType:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
arrow_type = pa.list_(to_arrow_type(dt.elementType))
else:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
return arrow_type
示例12: test_select_ipc_parametrized
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import float32 [as 别名]
def test_select_ipc_parametrized(self, con, query, parameters):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
result = con.select_ipc(query, parameters=parameters)
expected = pd.DataFrame(
{
"qty": np.array([100, 100], dtype=np.int32),
"price": np.array(
[35.13999938964844, 12.140000343322754], dtype=np.float32
),
}
)[['qty', 'price']]
tm.assert_frame_equal(result, expected)
c.execute('drop table if exists stocks;')
示例13: test_select_ipc_gpu
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import float32 [as 别名]
def test_select_ipc_gpu(self, con, query, parameters):
from cudf.core.dataframe import DataFrame
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
result = con.select_ipc_gpu("select qty, price from stocks")
assert isinstance(result, DataFrame)
dtypes = dict(qty=np.int32, price=np.float32)
expected = pd.DataFrame(
[[100, 35.14], [100, 12.14]], columns=['qty', 'price']
).astype(dtypes)
result = result.to_pandas()[['qty', 'price']] # column order
pd.testing.assert_frame_equal(result, expected)
c.execute('drop table if exists stocks;')
示例14: test_load_infer
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import float32 [as 别名]
def test_load_infer(self, con):
con.execute("drop table if exists baz;")
con.execute("create table baz (a int, b float, c text);")
data = pd.DataFrame(
{
'a': np.array([0, 1], dtype=np.int32),
'b': np.array([1.1, 2.2], dtype=np.float32),
'c': ['a', 'b'],
}
)
con.load_table("baz", data)
con.execute("drop table if exists baz;")
示例15: test_load_table_creates
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import float32 [as 别名]
def test_load_table_creates(self, con):
data = pd.DataFrame(
{
"boolean_": [True, False],
"smallint_cast": np.array([0, 1], dtype=np.int8),
"smallint_": np.array([0, 1], dtype=np.int16),
"int_": np.array([0, 1], dtype=np.int32),
"bigint_": np.array([0, 1], dtype=np.int64),
"float_": np.array([0, 1], dtype=np.float32),
"double_": np.array([0, 1], dtype=np.float64),
"varchar_": ["a", "b"],
"text_": ['a', 'b'],
"time_": [datetime.time(0, 11, 59), datetime.time(13)],
"timestamp_": [pd.Timestamp("2016"), pd.Timestamp("2017")],
"date_": [
datetime.date(2016, 1, 1),
datetime.date(2017, 1, 1),
],
},
columns=[
'boolean_',
'smallint_',
'int_',
'bigint_',
'float_',
'double_',
'varchar_',
'text_',
'time_',
'timestamp_',
'date_',
],
)
con.execute("drop table if exists test_load_table_creates;")
con.load_table("test_load_table_creates", data, create=True)
con.execute("drop table if exists test_load_table_creates;")