本文整理汇总了Python中sentry.utils.dates.to_timestamp函数的典型用法代码示例。如果您正苦于以下问题:Python to_timestamp函数的具体用法?Python to_timestamp怎么用?Python to_timestamp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了to_timestamp函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_get_optimal_rollup_series_offset_intervals
def test_get_optimal_rollup_series_offset_intervals(self, now):
# This test is a funny one (notice it doesn't return a range that
# includes the start position.) This occurs because the algorithm for
# determining the series to be returned will attempt to return the same
# duration of time as represented by the start and end timestamps, but
# doesn't necessarily return data *from that specific interval* (the
# end timestamp is always included.)
now.return_value = datetime(2016, 8, 1, 0, 0, 15, tzinfo=pytz.utc)
start = now() - timedelta(seconds=19)
assert self.tsdb.get_optimal_rollup_series(start, rollup=10) == (
10,
[
to_timestamp(datetime(2016, 8, 1, 0, 0, 0, tzinfo=pytz.utc)),
to_timestamp(datetime(2016, 8, 1, 0, 0, 10, tzinfo=pytz.utc)),
]
)
now.return_value = datetime(2016, 8, 1, 0, 0, 30, tzinfo=pytz.utc)
start = now() - timedelta(seconds=ONE_MINUTE - 1)
assert self.tsdb.get_optimal_rollup_series(start, rollup=ONE_MINUTE) == (
ONE_MINUTE,
[to_timestamp(datetime(2016, 8, 1, 0, 0, 0, tzinfo=pytz.utc))]
)
now.return_value = datetime(2016, 8, 1, 12, tzinfo=pytz.utc)
start = now() - timedelta(seconds=ONE_DAY - 1)
assert self.tsdb.get_optimal_rollup_series(start, rollup=ONE_DAY) == (
ONE_DAY,
[to_timestamp(datetime(2016, 8, 1, 0, tzinfo=pytz.utc))]
)
示例2: test_integration
def test_integration(self):
Project.objects.all().delete()
now = datetime(2016, 9, 12, tzinfo=pytz.utc)
project = self.create_project(
organization=self.organization,
team=self.team,
date_added=now - timedelta(days=90),
)
tsdb.incr(
tsdb.models.project,
project.id,
now - timedelta(days=1),
)
member_set = set(project.team.member_set.all())
with self.tasks(), \
mock.patch.object(tsdb, 'get_earliest_timestamp') as get_earliest_timestamp:
# Ensure ``get_earliest_timestamp`` is relative to the fixed
# "current" timestamp -- this prevents filtering out data points
# that would be considered expired relative to the *actual* current
# timestamp.
get_earliest_timestamp.return_value = to_timestamp(now - timedelta(days=60))
prepare_reports(timestamp=to_timestamp(now))
assert len(mail.outbox) == len(member_set) == 1
message = mail.outbox[0]
assert self.organization.name in message.subject
示例3: test_get_optimal_rollup_series_aligned_intervals
def test_get_optimal_rollup_series_aligned_intervals(self, now):
now.return_value = datetime(2016, 8, 1, tzinfo=pytz.utc)
start = now() - timedelta(seconds=30)
assert self.tsdb.get_optimal_rollup_series(start) == (
10,
[to_timestamp(start + timedelta(seconds=10) * i) for i in xrange(4)],
)
start = now() - timedelta(minutes=30)
assert self.tsdb.get_optimal_rollup_series(start) == (
ONE_MINUTE,
[to_timestamp(start + timedelta(minutes=1) * i) for i in xrange(31)],
)
start = now() - timedelta(hours=5)
assert self.tsdb.get_optimal_rollup_series(start) == (
ONE_HOUR,
[to_timestamp(start + timedelta(hours=1) * i) for i in xrange(6)],
)
start = now() - timedelta(days=7)
assert self.tsdb.get_optimal_rollup_series(start) == (
ONE_DAY,
[to_timestamp(start + timedelta(hours=24) * i) for i in xrange(8)],
)
示例4: merge_distinct_counts
def merge_distinct_counts(self, model, destination, sources, timestamp=None):
rollups = self.get_active_series(timestamp=timestamp)
temporary_id = uuid.uuid1().hex
def make_temporary_key(key):
return '{}{}:{}'.format(self.prefix, temporary_id, key)
data = {}
for rollup, series in rollups.items():
data[rollup] = {timestamp: [] for timestamp in series}
with self.cluster.fanout() as client:
for source in sources:
c = client.target_key(source)
for rollup, series in data.items():
for timestamp, results in series.items():
key = self.make_key(
model,
rollup,
to_timestamp(timestamp),
source,
)
results.append(c.get(key))
c.delete(key)
with self.cluster.fanout() as client:
c = client.target_key(destination)
temporary_key_sequence = itertools.count()
for rollup, series in data.items():
for timestamp, results in series.items():
values = {}
for result in results:
if result.value is None:
continue
k = make_temporary_key(next(temporary_key_sequence))
values[k] = result.value
if values:
key = self.make_key(
model,
rollup,
to_timestamp(timestamp),
destination,
)
c.mset(values)
c.pfmerge(key, key, *values.keys())
c.delete(*values.keys())
c.expireat(
key,
self.calculate_expiry(
rollup,
self.rollups[rollup],
timestamp,
),
)
示例5: merge_frequencies
def merge_frequencies(self, model, destination, sources, timestamp=None):
if not self.enable_frequency_sketches:
return
rollups = []
for rollup, samples in self.rollups.items():
_, series = self.get_optimal_rollup_series(
to_datetime(self.get_earliest_timestamp(rollup, timestamp=timestamp)),
end=None,
rollup=rollup,
)
rollups.append((
rollup,
map(to_datetime, series),
))
exports = defaultdict(list)
for source in sources:
for rollup, series in rollups:
for timestamp in series:
keys = self.make_frequency_table_keys(
model,
rollup,
to_timestamp(timestamp),
source,
)
arguments = ['EXPORT'] + list(self.DEFAULT_SKETCH_PARAMETERS)
exports[source].extend([
(CountMinScript, keys, arguments),
('DEL',) + tuple(keys),
])
imports = []
for source, results in self.cluster.execute_commands(exports).items():
results = iter(results)
for rollup, series in rollups:
for timestamp in series:
imports.append((
CountMinScript,
self.make_frequency_table_keys(
model,
rollup,
to_timestamp(timestamp),
destination,
),
['IMPORT'] + list(self.DEFAULT_SKETCH_PARAMETERS) + next(results).value,
))
next(results) # pop off the result of DEL
self.cluster.execute_commands({
destination: imports,
})
示例6: get_optimal_rollup
def get_optimal_rollup(self, start_timestamp, end_timestamp):
"""
Identify the lowest granularity rollup available within the given time
range.
"""
num_seconds = int(to_timestamp(end_timestamp)) - int(to_timestamp(start_timestamp))
# calculate the highest rollup within time range
for rollup, samples in self.rollups:
if rollup * samples >= num_seconds:
return rollup
return self.rollups[-1][0]
示例7: test_integration
def test_integration(self, has_feature):
Project.objects.all().delete()
now = datetime(2016, 9, 12, tzinfo=pytz.utc)
has_feature.side_effect = lambda name, *a, **k: {
'organizations:reports:deliver': True,
'organizations:reports:prepare': True,
}.get(name, False)
project = self.create_project(
organization=self.organization,
team=self.team,
)
tsdb.incr(
tsdb.models.project,
project.id,
now - timedelta(days=1),
)
member_set = set(project.team.member_set.all())
with self.tasks():
prepare_reports(timestamp=to_timestamp(now))
assert len(mail.outbox) == len(member_set) == 1
message = mail.outbox[0]
assert self.organization.name in message.subject
示例8: record_frequency_multi
def record_frequency_multi(self, requests, timestamp=None):
if timestamp is None:
timestamp = timezone.now()
ts = int(to_timestamp(timestamp)) # ``timestamp`` is not actually a timestamp :(
commands = {}
for model, request in requests:
for key, items in request.iteritems():
keys = []
expirations = {}
# Figure out all of the keys we need to be incrementing, as
# well as their expiration policies.
for rollup, max_values in self.rollups:
chunk = self.make_frequency_table_keys(model, rollup, ts, key)
keys.extend(chunk)
expiry = self.calculate_expiry(rollup, max_values, timestamp)
for k in chunk:
expirations[k] = expiry
arguments = ['INCR'] + list(self.DEFAULT_SKETCH_PARAMETERS)
for member, score in items.items():
arguments.extend((score, member))
# Since we're essentially merging dictionaries, we need to
# append this to any value that already exists at the key.
cmds = commands.setdefault(key, [])
cmds.append((CountMinScript, keys, arguments))
for k, t in expirations.items():
cmds.append(('EXPIREAT', k, t))
self.cluster.execute_commands(commands)
示例9: test_range_rollups
def test_range_rollups(self):
# Daily
daystart = self.now.replace(hour=0) # day buckets start on day boundaries
dts = [daystart + timedelta(days=i) for i in range(2)]
assert self.db.get_range(
TSDBModel.project,
[self.proj1.id],
dts[0], dts[-1],
rollup=86400
) == {
self.proj1.id: [
(timestamp(dts[0]), 24),
(timestamp(dts[1]), 0)
]
}
# Minutely
dts = [self.now + timedelta(minutes=i) for i in range(120)]
# Expect every 10th minute to have a 1, else 0
expected = [(to_timestamp(d), int(i % 10 == 0)) for i, d in enumerate(dts)]
assert self.db.get_range(
TSDBModel.project,
[self.proj1.id],
dts[0], dts[-1],
rollup=60
) == {
self.proj1.id: expected
}
示例10: make_group_generator
def make_group_generator(random, project):
epoch = to_timestamp(datetime(2016, 6, 1, 0, 0, 0, tzinfo=timezone.utc))
for id in itertools.count(1):
first_seen = epoch + random.randint(0, 60 * 60 * 24 * 30)
last_seen = random.randint(first_seen, first_seen + (60 * 60 * 24 * 30))
culprit = make_culprit(random)
level = random.choice(LOG_LEVELS.keys())
message = make_message(random)
group = Group(
id=id,
project=project,
culprit=culprit,
level=level,
message=message,
first_seen=to_datetime(first_seen),
last_seen=to_datetime(last_seen),
status=random.choice((GroupStatus.UNRESOLVED, GroupStatus.RESOLVED, )),
data={
'type': 'default',
'metadata': {
'title': message,
}
}
)
if random.random() < 0.8:
group.data = make_group_metadata(random, group)
yield group
示例11: serialize
def serialize(self):
return {
'uuid': b64encode(self.uuid.bytes),
'timestamp': to_timestamp(self.datetime),
'type': self.type,
'data': self.data,
}
示例12: zerofill
def zerofill(data, start, end, rollup):
rv = []
start = ((int(to_timestamp(start)) / rollup) * rollup) - rollup
end = ((int(to_timestamp(end)) / rollup) * rollup) + rollup
i = 0
for key in six.moves.xrange(start, end, rollup):
try:
if data[i][0] == key:
rv.append(data[i])
i += 1
continue
except IndexError:
pass
rv.append((key, []))
return rv
示例13: get_range
def get_range(self, model, keys, start, end, rollup=None, environment_id=None):
"""
To get a range of data for group ID=[1, 2, 3]:
>>> now = timezone.now()
>>> get_keys(TimeSeriesModel.group, [1, 2, 3],
>>> start=now - timedelta(days=1),
>>> end=now)
"""
self.validate_arguments([model], [environment_id])
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
series = map(to_datetime, series)
results = []
cluster, _ = self.get_cluster(environment_id)
with cluster.map() as client:
for key in keys:
for timestamp in series:
hash_key, hash_field = self.make_counter_key(
model, rollup, timestamp, key, environment_id)
results.append(
(to_timestamp(timestamp), key, client.hget(
hash_key, hash_field)))
results_by_key = defaultdict(dict)
for epoch, key, count in results:
results_by_key[key][epoch] = int(count.value or 0)
for key, points in six.iteritems(results_by_key):
results_by_key[key] = sorted(points.items())
return dict(results_by_key)
示例14: record_multi
def record_multi(self, items, timestamp=None, environment_id=None):
"""
Record an occurence of an item in a distinct counter.
"""
self.validate_arguments([model for model, key, values in items], [environment_id])
if timestamp is None:
timestamp = timezone.now()
ts = int(to_timestamp(timestamp)) # ``timestamp`` is not actually a timestamp :(
for cluster, environment_ids in self.get_cluster_groups(set([None, environment_id])):
with cluster.fanout() as client:
for model, key, values in items:
c = client.target_key(key)
for rollup, max_values in six.iteritems(self.rollups):
for environment_id in environment_ids:
k = self.make_key(
model,
rollup,
ts,
key,
environment_id,
)
c.pfadd(k, *values)
c.expireat(
k,
self.calculate_expiry(
rollup,
max_values,
timestamp,
),
)
示例15: test_environment_request
def test_environment_request(self):
now = parse_datetime('2018-03-09T01:00:00Z')
project = self.create_project()
env = self.create_environment(project=project, name="prod")
dts = [now + timedelta(hours=i) for i in range(4)]
with responses.RequestsMock() as rsps:
def snuba_response(request):
body = json.loads(request.body)
assert body['aggregations'] == [['count()', None, 'aggregate']]
assert body['project'] == [project.id]
assert body['groupby'] == ['project_id', 'time']
assert ['environment', 'IN', ['prod']] in body['conditions']
return (200, {}, json.dumps({
'data': [{'project_id': project.id, 'time': '2018-03-09T01:00:00Z', 'aggregate': 100}],
'meta': [{'name': 'project_id'}, {'name': 'time'}, {'name': 'aggregate'}]
}))
rsps.add_callback(
responses.POST,
settings.SENTRY_SNUBA + '/query',
callback=snuba_response)
results = self.db.get_range(TSDBModel.project, [project.id],
dts[0], dts[-1], environment_id=env.id, rollup=3600)
assert results == {
project.id: [
(int(to_timestamp(d)), 100 if d == now else 0)
for d in dts]
}