本文整理汇总了Python中pyarrow.timestamp方法的典型用法代码示例。如果您正苦于以下问题:Python pyarrow.timestamp方法的具体用法?Python pyarrow.timestamp怎么用?Python pyarrow.timestamp使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyarrow
的用法示例。
在下文中一共展示了pyarrow.timestamp方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _check_series_localize_timestamps
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import timestamp [as 别名]
def _check_series_localize_timestamps(s, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone.
If the input series is not a timestamp series, then the same series is returned. If the input
series is a timestamp series, then a converted series is returned.
:param s: pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series that have been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64tz_dtype
tz = timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(tz).dt.tz_localize(None)
else:
return s
示例2: test_metadata_comes_from_db_columns
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import timestamp [as 别名]
def test_metadata_comes_from_db_columns(self):
columns = [
Column("A", ColumnType.Number(format="{:,.2f}")),
Column("B", ColumnType.Datetime()),
Column("C", ColumnType.Text()),
]
result = RenderResult(
arrow_table(
{
"A": [1],
"B": pa.array([datetime.datetime.now()], pa.timestamp("ns")),
"C": ["x"],
},
columns=columns,
)
)
cache_render_result(self.workflow, self.wf_module, self.delta.id, result)
# Delete from disk entirely, to prove we did not read.
minio.remove(BUCKET, crr_parquet_key(self.wf_module.cached_render_result))
# Load _new_ CachedRenderResult -- from DB columns, not memory
fresh_wf_module = WfModule.objects.get(id=self.wf_module.id)
cached_result = fresh_wf_module.cached_render_result
self.assertEqual(cached_result.table_metadata, TableMetadata(1, columns))
示例3: test_list_prompting_error_concatenate_different_type
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import timestamp [as 别名]
def test_list_prompting_error_concatenate_different_type(self):
context = self._render_context(
input_table=arrow_table(
{"A": ["1"], "B": pa.array([datetime.now()], pa.timestamp("ns"))}
)
)
schema = ParamDType.List(
inner_dtype=ParamDType.Column(column_types=frozenset({"number"}))
)
with self.assertRaises(PromptingError) as cm:
clean_value(schema, ["A", "B"], context)
self.assertEqual(
cm.exception.errors,
[
PromptingError.WrongColumnType(["A"], "text", frozenset({"number"})),
PromptingError.WrongColumnType(
["B"], "datetime", frozenset({"number"})
),
],
)
示例4: test_list_prompting_error_concatenate_different_type_to_text
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import timestamp [as 别名]
def test_list_prompting_error_concatenate_different_type_to_text(self):
context = self._render_context(
input_table=arrow_table(
{"A": [1], "B": pa.array([datetime.now()], pa.timestamp("ns"))}
)
)
schema = ParamDType.List(
inner_dtype=ParamDType.Column(column_types=frozenset({"text"}))
)
with self.assertRaises(PromptingError) as cm:
clean_value(schema, ["A", "B"], context)
self.assertEqual(
cm.exception.errors,
[PromptingError.WrongColumnType(["A", "B"], None, frozenset({"text"}))],
)
示例5: test_dict_prompting_error_concatenate_different_types
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import timestamp [as 别名]
def test_dict_prompting_error_concatenate_different_types(self):
context = self._render_context(
input_table=arrow_table(
{"A": ["1"], "B": pa.array([datetime.now()], pa.timestamp("ns"))}
)
)
schema = ParamDType.Dict(
{
"x": ParamDType.Column(column_types=frozenset({"number"})),
"y": ParamDType.Column(column_types=frozenset({"number"})),
}
)
with self.assertRaises(PromptingError) as cm:
clean_value(schema, {"x": "A", "y": "B"}, context)
self.assertEqual(
cm.exception.errors,
[
PromptingError.WrongColumnType(["A"], "text", frozenset({"number"})),
PromptingError.WrongColumnType(
["B"], "datetime", frozenset({"number"})
),
],
)
示例6: test_clean_multichartseries_non_number_is_prompting_error
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import timestamp [as 别名]
def test_clean_multichartseries_non_number_is_prompting_error(self):
context = self._render_context(
input_table=arrow_table(
{"A": ["a"], "B": pa.array([datetime.now()], pa.timestamp("ns"))}
)
)
value = [
{"column": "A", "color": "#aaaaaa"},
{"column": "B", "color": "#cccccc"},
]
with self.assertRaises(PromptingError) as cm:
clean_value(ParamDType.Multichartseries(), value, context)
self.assertEqual(
cm.exception.errors,
[
PromptingError.WrongColumnType(["A"], "text", frozenset({"number"})),
PromptingError.WrongColumnType(
["B"], "datetime", frozenset({"number"})
),
],
)
示例7: test_render_with_input_columns
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import timestamp [as 别名]
def test_render_with_input_columns(self):
def render(table, params, *, input_columns):
self.assertEqual(
input_columns,
{
"A": ptypes.RenderColumn("A", "text", None),
"B": ptypes.RenderColumn("B", "number", "{:,.3f}"),
"C": ptypes.RenderColumn("C", "datetime", None),
},
)
with arrow_table_context(
{"A": ["x"], "B": [1], "C": pa.array([datetime.now()], pa.timestamp("ns"))},
columns=[
Column("A", ColumnType.Text()),
Column("B", ColumnType.Number("{:,.3f}")),
Column("C", ColumnType.Datetime()),
],
dir=self.basedir,
) as arrow_table:
self._test_render(render, arrow_table=arrow_table)
示例8: test_dataframe_datetime_column
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import timestamp [as 别名]
def test_dataframe_datetime_column(self):
assert_arrow_table_equals(
dataframe_to_arrow_table(
pd.DataFrame(
{"A": ["2019-09-17T21:21:00.123456Z", None]}, dtype="datetime64[ns]"
),
[Column("A", ColumnType.DATETIME())],
self.path,
),
arrow_table(
{
"A": pyarrow.array(
[dt.fromisoformat("2019-09-17T21:21:00.123456"), None],
type=pyarrow.timestamp(unit="ns", tz=None),
)
},
[atypes.Column("A", atypes.ColumnType.Datetime())],
),
)
示例9: test_arrow_datetime_column
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import timestamp [as 别名]
def test_arrow_datetime_column(self):
dataframe, columns = arrow_table_to_dataframe(
arrow_table(
{
"A": pyarrow.array(
[dt.fromisoformat("2019-09-17T21:21:00.123456"), None],
type=pyarrow.timestamp(unit="ns", tz=None),
)
},
[atypes.Column("A", atypes.ColumnType.Datetime())],
)
)
assert_frame_equal(
dataframe,
pd.DataFrame(
{"A": ["2019-09-17T21:21:00.123456Z", None]}, dtype="datetime64[ns]"
),
)
self.assertEqual(columns, [Column("A", ColumnType.DATETIME())])
示例10: test_wf_module_render_null_datetime
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import timestamp [as 别名]
def test_wf_module_render_null_datetime(self):
# Ran into problems 2019-09-06, when switching to Arrow
cache_render_result(
self.workflow,
self.wf_module2,
self.wf_module2.last_relevant_delta_id,
RenderResult(
arrow_table(
{
"A": pa.array(
[dt(2019, 1, 2, 3, 4, 5, 6007, None), None],
pa.timestamp("ns"),
)
}
)
),
)
response = self.client.get("/api/wfmodules/%d/render" % self.wf_module2.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
json.loads(response.content)["rows"],
[{"A": "2019-01-02T03:04:05.006007Z"}, {"A": None}],
)
示例11: _parquet_bytes_to_dict
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import timestamp [as 别名]
def _parquet_bytes_to_dict(column: str, index_buffer: bytes):
reader = pa.BufferReader(index_buffer)
# This can be done much more efficient but would take a lot more
# time to implement so this will be only done on request.
table = pq.read_table(reader)
if ARROW_LARGER_EQ_0150:
column_type = table.schema.field(column).type
else:
column_type = table.schema.field_by_name(column).type
# `datetime.datetime` objects have a precision of up to microseconds only, so arrow
# parses the type to `pa.timestamp("us")`. Since the
# values are normalized to `numpy.datetime64[ns]` anyways, we do not care about this
# and load the column type as `pa.timestamp("ns")`
if column_type == pa.timestamp("us"):
column_type = pa.timestamp("ns")
df = _fix_pyarrow_07992_table(table).to_pandas() # Could eventually be phased out
index_dct = dict(
zip(df[column].values, (list(x) for x in df[_PARTITION_COLUMN_NAME].values))
)
return index_dct, column_type
示例12: test_serialization_normalization
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import timestamp [as 别名]
def test_serialization_normalization(key):
"""
Check that index normalizes values consistently after serializing.
This is helpful to ensure correct behavior for cases such as when
key=`datetime.datetime(2018, 1, 1, 12, 30)`, as this would be parsed to
`pa.timestamp("us")` during index creation, but stored as `pa.timestamp("ns")`.
"""
index = ExplicitSecondaryIndex(
column="col", index_dct={key: ["part_2", "part_4", "part_1"]}
)
index2 = pickle.loads(pickle.dumps(index))
assert index.normalize_value(index.dtype, key) == index2.normalize_value(
index2.dtype, key
)
示例13: get_pyarrow_types
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import timestamp [as 别名]
def get_pyarrow_types():
return {
'bool': PA_BOOL,
'float32': PA_FLOAT32,
'float64': PA_FLOAT64,
'int8': PA_INT8,
'int16': PA_INT16,
'int32': PA_INT32,
'int64': PA_INT64,
'string': PA_STRING,
'timestamp': PA_TIMESTAMP,
'base64': PA_BINARY
}
# pylint: disable=too-many-branches,too-many-statements
示例14: _pa_timestamp_ns
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import timestamp [as 别名]
def _pa_timestamp_ns():
"""Wrapper function around Arrow's timestamp type function, which is the
only type function that requires an argument...
"""
return pa.timestamp('ns')
示例15: setUp
# 需要导入模块: import pyarrow [as 别名]
# 或者: from pyarrow import timestamp [as 别名]
def setUp(self):
self.sa_meta = sa.MetaData()
self.data = [
[17.124, 1.12, 3.14, 13.37],
[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4],
[True, None, False, True],
['string 1', 'string 2', None, 'string 3'],
[datetime(2007, 7, 13, 1, 23, 34, 123456),
None,
datetime(2006, 1, 13, 12, 34, 56, 432539),
datetime(2010, 8, 13, 5, 46, 57, 437699), ],
["Test Text", "Some#More#Test# Text", "!@#$%%^&*&", None],
]
self.table = sa.Table(
'unit_test_table',
self.sa_meta,
sa.Column('real_col', sa.REAL),
sa.Column('bigint_col', sa.BIGINT),
sa.Column('int_col', sa.INTEGER),
sa.Column('smallint_col', sa.SMALLINT),
sa.Column('bool_col', sa.BOOLEAN),
sa.Column('str_col', sa.VARCHAR),
sa.Column('timestamp_col', sa.TIMESTAMP),
sa.Column('plaintext_col', sa.TEXT),
)
self.expected_datatypes = [
pa.float32(),
pa.int64(),
pa.int32(),
pa.int16(),
pa.bool_(),
pa.string(),
pa.timestamp('ns'),
pa.string(),
]