本文整理汇总了Python中numpy.timedelta64方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.timedelta64方法的具体用法?Python numpy.timedelta64怎么用?Python numpy.timedelta64使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numpy
的用法示例。
在下文中一共展示了numpy.timedelta64方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_maybe_apply_time_shift
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import timedelta64 [as 别名]
def test_maybe_apply_time_shift(data_loader, ds_with_time_bounds, ds_inst,
var_name, generate_file_set_args):
ds = xr.decode_cf(ds_with_time_bounds)
da = ds[var_name]
result = data_loader._maybe_apply_time_shift(
da.copy(), **generate_file_set_args)[TIME_STR]
assert result.identical(da[TIME_STR])
offset = data_loader._maybe_apply_time_shift(
da.copy(), {'days': 1}, **generate_file_set_args)
result = offset[TIME_STR]
expected = da[TIME_STR] + np.timedelta64(1, 'D')
expected[TIME_STR] = expected
assert result.identical(expected)
示例2: test_maybe_apply_time_shift_inst
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import timedelta64 [as 别名]
def test_maybe_apply_time_shift_inst(gfdl_data_loader, ds_inst, var_name,
generate_file_set_args):
ds_inst = xr.decode_cf(ds_inst)
generate_file_set_args['dtype_in_time'] = 'inst'
generate_file_set_args['intvl_in'] = '3hr'
da = ds_inst[var_name]
result = gfdl_data_loader._maybe_apply_time_shift(
da.copy(), **generate_file_set_args)[TIME_STR]
expected = da[TIME_STR] + np.timedelta64(-3, 'h')
expected[TIME_STR] = expected
assert result.identical(expected)
generate_file_set_args['intvl_in'] = 'daily'
da = ds_inst[var_name]
result = gfdl_data_loader._maybe_apply_time_shift(
da.copy(), **generate_file_set_args)[TIME_STR]
expected = da[TIME_STR]
expected[TIME_STR] = expected
assert result.identical(expected)
示例3: __init__
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import timedelta64 [as 别名]
def __init__(self, ts, granularity, start=None):
# NOTE(sileht): The whole class assumes ts is ordered and don't have
# duplicate timestamps, it uses numpy.unique that sorted list, but
# we always assume the orderd to be the same as the input.
self.granularity = granularity
self.can_derive = isinstance(granularity, numpy.timedelta64)
self.start = start
if start is None:
self._ts = ts
self._ts_for_derive = ts
else:
self._ts = ts[numpy.searchsorted(ts['timestamps'], start):]
if self.can_derive:
start_derive = start - granularity
self._ts_for_derive = ts[
numpy.searchsorted(ts['timestamps'], start_derive):
]
if self.can_derive:
self.indexes = round_timestamp(self._ts['timestamps'], granularity)
elif calendar.GROUPINGS.get(granularity):
self.indexes = calendar.GROUPINGS.get(granularity)(
self._ts['timestamps'])
self.tstamps, self.counts = numpy.unique(self.indexes,
return_counts=True)
示例4: truncate
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import timedelta64 [as 别名]
def truncate(self, oldest_point=None):
"""Truncate the time series up to oldest_point excluded.
:param oldest_point: Oldest point to keep from, this excluded.
Default is the aggregation timespan.
:type oldest_point: numpy.datetime64 or numpy.timedelta64
:return: The oldest point that could have been kept.
"""
last = self.last
if last is None:
return
if oldest_point is None:
oldest_point = self.aggregation.timespan
if oldest_point is None:
return
if isinstance(oldest_point, numpy.timedelta64):
oldest_point = last - oldest_point
index = numpy.searchsorted(self.ts['timestamps'], oldest_point,
side='right')
self.ts = self.ts[index:]
return oldest_point
示例5: test_corrupted_split
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import timedelta64 [as 别名]
def test_corrupted_split(self):
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69),
])
self.trigger_processing()
aggregation = self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(5, 'm'))
with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.unserialize',
side_effect=carbonara.InvalidData()):
results = self.storage._get_splits_and_unserialize({
self.metric: {
aggregation: [
carbonara.SplitKey(
numpy.datetime64(1387800000, 's'),
numpy.timedelta64(5, 'm'))
],
},
})[self.metric][aggregation]
self.assertEqual(1, len(results))
self.assertIsInstance(results[0], carbonara.AggregatedTimeSerie)
# Assert it's an empty one since corrupted
self.assertEqual(0, len(results[0]))
self.assertEqual(results[0].aggregation, aggregation)
示例6: test_get_splits_and_unserialize
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import timedelta64 [as 别名]
def test_get_splits_and_unserialize(self):
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69),
])
self.trigger_processing()
aggregation = self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(5, 'm'))
results = self.storage._get_splits_and_unserialize({
self.metric: {
aggregation: [
carbonara.SplitKey(
numpy.datetime64(1387800000, 's'),
numpy.timedelta64(5, 'm')),
],
},
})[self.metric][aggregation]
self.assertEqual(1, len(results))
self.assertIsInstance(results[0], carbonara.AggregatedTimeSerie)
# Assert it's not empty one since corrupted
self.assertGreater(len(results[0]), 0)
self.assertEqual(results[0].aggregation, aggregation)
示例7: test_derived_hole
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import timedelta64 [as 别名]
def test_derived_hole(self):
ts = carbonara.TimeSerie.from_data(
[datetime.datetime(2014, 1, 1, 12, 0, 0),
datetime.datetime(2014, 1, 1, 12, 0, 4),
datetime.datetime(2014, 1, 1, 12, 1, 2),
datetime.datetime(2014, 1, 1, 12, 1, 14),
datetime.datetime(2014, 1, 1, 12, 1, 24),
datetime.datetime(2014, 1, 1, 12, 3, 2),
datetime.datetime(2014, 1, 1, 12, 3, 22),
datetime.datetime(2014, 1, 1, 12, 3, 42),
datetime.datetime(2014, 1, 1, 12, 4, 9)],
[50, 55, 65, 66, 70, 105, 108, 200, 202])
ts = self._resample(ts, numpy.timedelta64(60, 's'), 'last',
derived=True)
self.assertEqual(4, len(ts))
self.assertEqual(
[(datetime64(2014, 1, 1, 12, 0, 0), 5),
(datetime64(2014, 1, 1, 12, 1, 0), 4),
(datetime64(2014, 1, 1, 12, 3, 0), 92),
(datetime64(2014, 1, 1, 12, 4, 0), 2)],
list(ts.fetch(
from_timestamp=datetime64(2014, 1, 1, 12))))
示例8: _do_test_aggregation
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import timedelta64 [as 别名]
def _do_test_aggregation(self, name, v1, v2, v3):
# NOTE(gordc): test data must have a group of odd count to properly
# test 50pct test case.
ts = carbonara.TimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 0, 10),
datetime64(2014, 1, 1, 12, 0, 20),
datetime64(2014, 1, 1, 12, 0, 30),
datetime64(2014, 1, 1, 12, 0, 40),
datetime64(2014, 1, 1, 12, 1, 0),
datetime64(2014, 1, 1, 12, 1, 10),
datetime64(2014, 1, 1, 12, 1, 20),
datetime64(2014, 1, 1, 12, 1, 30),
datetime64(2014, 1, 1, 12, 1, 40),
datetime64(2014, 1, 1, 12, 1, 50),
datetime64(2014, 1, 1, 12, 2, 0),
datetime64(2014, 1, 1, 12, 2, 10)],
[3, 5, 2, 3, 5, 8, 11, 22, 10, 42, 9, 4, 2])
ts = self._resample(ts, numpy.timedelta64(60, 's'), name)
self.assertEqual(3, len(ts))
self.assertEqual(v1, ts[datetime64(2014, 1, 1, 12, 0, 0)][1])
self.assertEqual(v2, ts[datetime64(2014, 1, 1, 12, 1, 0)][1])
self.assertEqual(v3, ts[datetime64(2014, 1, 1, 12, 2, 0)][1])
示例9: test_aggregation_std_with_unique
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import timedelta64 [as 别名]
def test_aggregation_std_with_unique(self):
ts = carbonara.TimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0)], [3])
ts = self._resample(ts, numpy.timedelta64(60, 's'), 'std')
self.assertEqual(0, len(ts), ts.values)
ts = carbonara.TimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 0, 4),
datetime64(2014, 1, 1, 12, 0, 9),
datetime64(2014, 1, 1, 12, 1, 6)],
[3, 6, 5, 9])
ts = self._resample(ts, numpy.timedelta64(60, 's'), "std")
self.assertEqual(1, len(ts))
self.assertEqual(1.5275252316519465,
ts[datetime64(2014, 1, 1, 12, 0, 0)][1])
示例10: test_serialize
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import timedelta64 [as 别名]
def test_serialize(self):
ts = {'sampling': numpy.timedelta64(500, 'ms'), 'agg': 'mean'}
tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
tsb.set_values(numpy.array([
(datetime64(2014, 1, 1, 12, 0, 0, 1234), 3),
(datetime64(2014, 1, 1, 12, 0, 0, 321), 6),
(datetime64(2014, 1, 1, 12, 1, 4, 234), 5),
(datetime64(2014, 1, 1, 12, 1, 9, 32), 7),
(datetime64(2014, 1, 1, 12, 2, 12, 532), 1)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=ts))
key = ts['return'].get_split_key()
o, s = ts['return'].serialize(key)
self.assertEqual(ts['return'],
carbonara.AggregatedTimeSerie.unserialize(
s, key, ts['return'].aggregation))
示例11: test_no_truncation
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import timedelta64 [as 别名]
def test_no_truncation(self):
ts = {'sampling': numpy.timedelta64(60, 's'), 'agg': 'mean'}
tsb = carbonara.BoundTimeSerie()
for i in six.moves.range(1, 11):
tsb.set_values(numpy.array([
(datetime64(2014, 1, 1, 12, i, i), float(i))],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=ts))
tsb.set_values(numpy.array([
(datetime64(2014, 1, 1, 12, i, i + 1), float(i + 1))],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=ts))
self.assertEqual(i, len(list(ts['return'].fetch())))
示例12: test_split_key
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import timedelta64 [as 别名]
def test_split_key(self):
self.assertEqual(
numpy.datetime64("2014-10-07"),
carbonara.SplitKey.from_timestamp_and_sampling(
numpy.datetime64("2015-01-01T15:03"),
numpy.timedelta64(3600, 's')))
self.assertEqual(
numpy.datetime64("2014-12-31 18:00"),
carbonara.SplitKey.from_timestamp_and_sampling(
numpy.datetime64("2015-01-01 15:03:58"),
numpy.timedelta64(58, 's')))
key = carbonara.SplitKey.from_timestamp_and_sampling(
numpy.datetime64("2015-01-01 15:03"),
numpy.timedelta64(3600, 's'))
self.assertGreater(key, numpy.datetime64("1970"))
self.assertGreaterEqual(key, numpy.datetime64("1970"))
示例13: test_split
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import timedelta64 [as 别名]
def test_split(self):
sampling = numpy.timedelta64(5, 's')
points = 100000
ts = carbonara.TimeSerie.from_data(
timestamps=list(map(datetime.datetime.utcfromtimestamp,
six.moves.range(points))),
values=list(six.moves.range(points)))
agg = self._resample(ts, sampling, 'mean')
grouped_points = list(agg.split())
self.assertEqual(
math.ceil((points / sampling.astype(float))
/ carbonara.SplitKey.POINTS_PER_SPLIT),
len(grouped_points))
self.assertEqual("0.0",
str(carbonara.SplitKey(grouped_points[0][0], 0)))
# 3600 × 5s = 5 hours
self.assertEqual(datetime64(1970, 1, 1, 5),
grouped_points[1][0])
self.assertEqual(carbonara.SplitKey.POINTS_PER_SPLIT,
len(grouped_points[0][1]))
示例14: test_aggregated_different_archive_no_overlap
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import timedelta64 [as 别名]
def test_aggregated_different_archive_no_overlap(self):
tsc1 = {'sampling': numpy.timedelta64(60, 's'),
'size': 50, 'agg': 'mean', "name": "all"}
tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling'])
tsc2 = {'sampling': numpy.timedelta64(60, 's'),
'size': 50, 'agg': 'mean', "name": "all"}
tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling'])
tsb1.set_values(numpy.array([(datetime64(2014, 1, 1, 11, 46, 4), 4)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=tsc1))
tsb2.set_values(numpy.array([(datetime64(2014, 1, 1, 9, 1, 4), 4)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=tsc2))
dtfrom = datetime64(2014, 1, 1, 11, 0, 0)
self.assertRaises(exceptions.UnAggregableTimeseries,
processor.aggregated,
[tsc1['return'], tsc2['return']],
from_timestamp=dtfrom,
operations=["aggregate", "mean", [
"metric", ["all", "mean"]]])
示例15: test_aggregated_different_archive_no_overlap2
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import timedelta64 [as 别名]
def test_aggregated_different_archive_no_overlap2(self):
tsc1 = {'sampling': numpy.timedelta64(60, 's'),
'size': 50, 'agg': 'mean'}
tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling'])
tsc2 = carbonara.AggregatedTimeSerie(
carbonara.Aggregation('mean', numpy.timedelta64(60, 's'), None))
tsb1.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 3, 0), 4)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=tsc1))
metric = mock.Mock(id=str(uuid.uuid4()))
ref = processor.MetricReference(metric, "mean")
self.assertRaises(exceptions.UnAggregableTimeseries,
processor.aggregated,
[tsc1['return'], (ref, tsc2)],
operations=["aggregate", "mean",
["metric", tsc1['return'][0].lookup_key,
ref.lookup_key]])