当前位置: 首页>>代码示例>>Python>>正文


Python dates.to_datetime函数代码示例

本文整理汇总了Python中sentry.utils.dates.to_datetime函数的典型用法代码示例。如果您正苦于以下问题:Python to_datetime函数的具体用法?Python to_datetime怎么用?Python to_datetime使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了to_datetime函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _parse_args

    def _parse_args(self, request, environment_id=None):
        resolution = request.GET.get('resolution')
        if resolution:
            resolution = self._parse_resolution(resolution)
            assert resolution in tsdb.get_rollups()

        end = request.GET.get('until')
        if end:
            end = to_datetime(float(end))
        else:
            end = datetime.utcnow().replace(tzinfo=utc)

        start = request.GET.get('since')
        if start:
            start = to_datetime(float(start))
            assert start <= end, 'start must be before or equal to end'
        else:
            start = end - timedelta(days=1, seconds=-1)

        return {
            'start': start,
            'end': end,
            'rollup': resolution,
            'environment_ids': environment_id and [environment_id],
        }
开发者ID:Kayle009,项目名称:sentry,代码行数:25,代码来源:base.py

示例2: test_clean_series_trims_extra

def test_clean_series_trims_extra():
    rollup = 60
    n = 5
    start = to_datetime(rollup * 0)
    stop = to_datetime(rollup * n)
    series = [(rollup * i, i) for i in xrange(0, n + 1)]
    assert clean_series(start, stop, rollup, series) == series[:n]
开发者ID:alexm92,项目名称:sentry,代码行数:7,代码来源:test_reports.py

示例3: _parse_args

    def _parse_args(self, request):
        resolution = request.GET.get('resolution')
        if resolution:
            resolution = self._parse_resolution(resolution)

            assert any(r for r in tsdb.rollups if r[0] == resolution)

        end = request.GET.get('until')
        if end:
            end = to_datetime(float(end))
        else:
            end = datetime.utcnow().replace(tzinfo=utc)

        start = request.GET.get('since')
        if start:
            start = to_datetime(float(start))
            assert start <= end, 'start must be before or equal to end'
        else:
            start = end - timedelta(days=1, seconds=-1)

        return {
            'start': start,
            'end': end,
            'rollup': resolution,
        }
开发者ID:ForkRepo,项目名称:sentry,代码行数:25,代码来源:base.py

示例4: make_group_generator

def make_group_generator(random, project):
    epoch = to_timestamp(datetime(2016, 6, 1, 0, 0, 0, tzinfo=timezone.utc))
    for id in itertools.count(1):
        first_seen = epoch + random.randint(0, 60 * 60 * 24 * 30)
        last_seen = random.randint(first_seen, first_seen + (60 * 60 * 24 * 30))

        culprit = make_culprit(random)
        level = random.choice(LOG_LEVELS.keys())
        message = make_message(random)

        group = Group(
            id=id,
            project=project,
            culprit=culprit,
            level=level,
            message=message,
            first_seen=to_datetime(first_seen),
            last_seen=to_datetime(last_seen),
            status=random.choice((GroupStatus.UNRESOLVED, GroupStatus.RESOLVED, )),
            data={
                'type': 'default',
                'metadata': {
                    'title': message,
                }
            }
        )

        if random.random() < 0.8:
            group.data = make_group_metadata(random, group)

        yield group
开发者ID:Kayle009,项目名称:sentry,代码行数:31,代码来源:mail.py

示例5: test_clean_series_rejects_offset_timestamp

def test_clean_series_rejects_offset_timestamp():
    rollup = 60
    n = 5
    start = to_datetime(rollup * 0)
    stop = to_datetime(rollup * n)
    series = [(rollup * (i * 1.1), i) for i in xrange(0, n)]
    with pytest.raises(AssertionError):
        clean_series(start, stop, rollup, series)
开发者ID:alexm92,项目名称:sentry,代码行数:8,代码来源:test_reports.py

示例6: get_data

    def get_data(self, model, keys, start, end, rollup=None, environment_ids=None,
                 aggregation='count()', group_on_model=True, group_on_time=False):
        """
        Normalizes all the TSDB parameters and sends a query to snuba.

        `group_on_time`: whether to add a GROUP BY clause on the 'time' field.
        `group_on_model`: whether to add a GROUP BY clause on the primary model.
        """
        model_columns = self.model_columns.get(model)

        if model_columns is None:
            raise Exception(u"Unsupported TSDBModel: {}".format(model.name))

        model_group, model_aggregate = model_columns

        groupby = []
        if group_on_model and model_group is not None:
            groupby.append(model_group)
        if group_on_time:
            groupby.append('time')
        if aggregation == 'count()' and model_aggregate is not None:
            # Special case, because count has different semantics, we change:
            # `COUNT(model_aggregate)` to `COUNT() GROUP BY model_aggregate`
            groupby.append(model_aggregate)
            model_aggregate = None

        keys_map = dict(zip(model_columns, self.flatten_keys(keys)))
        keys_map = {k: v for k, v in six.iteritems(keys_map) if k is not None and v is not None}
        if environment_ids is not None:
            keys_map['environment'] = environment_ids

        aggregations = [[aggregation, model_aggregate, 'aggregate']]

        # For historical compatibility with bucket-counted TSDB implementations
        # we grab the original bucketed series and add the rollup time to the
        # timestamp of the last bucket to get the end time.
        rollup, series = self.get_optimal_rollup_series(start, end, rollup)
        start = to_datetime(series[0])
        end = to_datetime(series[-1] + rollup)

        if keys:
            result = snuba.query(start, end, groupby, None, keys_map,
                                 aggregations, rollup, referrer='tsdb',
                                 is_grouprelease=(model == TSDBModel.frequent_releases_by_group))
        else:
            result = {}

        if group_on_time:
            keys_map['time'] = series

        self.zerofill(result, groupby, keys_map)
        self.trim(result, groupby, keys)

        return result
开发者ID:Kayle009,项目名称:sentry,代码行数:54,代码来源:snuba.py

示例7: test_clean_series

def test_clean_series():
    rollup = 60
    n = 5
    start = to_datetime(rollup * 0)
    stop = to_datetime(rollup * n)
    series = [(rollup * i, i) for i in xrange(0, n)]
    assert clean_series(
        start,
        stop,
        rollup,
        series,
    ) == series
开发者ID:alshopov,项目名称:sentry,代码行数:12,代码来源:test_reports.py

示例8: test_make_counter_key

    def test_make_counter_key(self):
        result = self.db.make_counter_key(TSDBModel.project, 1, to_datetime(1368889980), 1, None)
        assert result == ('ts:1:1368889980:1', 1)

        result = self.db.make_counter_key(
            TSDBModel.project, 1, to_datetime(1368889980), 'foo', None)
        assert result == ('ts:1:1368889980:46', self.db.get_model_key('foo'))

        result = self.db.make_counter_key(TSDBModel.project, 1, to_datetime(1368889980), 1, 1)
        assert result == ('ts:1:1368889980:1', '1?e=1')

        result = self.db.make_counter_key(TSDBModel.project, 1, to_datetime(1368889980), 'foo', 1)
        assert result == ('ts:1:1368889980:46', self.db.get_model_key('foo') + '?e=1')
开发者ID:Kayle009,项目名称:sentry,代码行数:13,代码来源:test_redis.py

示例9: remove_invalid_values

 def remove_invalid_values(item):
     timestamp, value = item
     if timestamp < earliest:
         value = None
     elif to_datetime(timestamp) < project.date_added:
         value = None
     return (timestamp, value)
开发者ID:NuttasitBoonwat,项目名称:sentry,代码行数:7,代码来源:reports.py

示例10: get_recent_mentions

def get_recent_mentions(tenant):
    client = cluster.get_routing_client()
    key = get_key(tenant)
    ids = [x for x in client.zrangebyscore(
        key, time.time() - (RECENT_HOURS * 60), '+inf')][-MAX_RECENT:]

    with cluster.map() as map_client:
        items = [map_client.get('%s:%s' % (key, id)) for id in ids]
    items = [json.loads(x.value) for x in items if x.value is not None]

    projects = items and dict((x.id, x) for x in Project.objects.filter(
        pk__in=[x['project'] for x in items],
    )) or {}
    groups = items and dict((x.id, x) for x in Group.objects.filter(
        pk__in=[x['group'] for x in items],
    )) or {}
    events = items and dict((x.id, x) for x in Event.objects.filter(
        pk__in=[x['event'] for x in items if x['event'] is not None],
    )) or {}

    for item in items:
        item['project'] = projects.get(item['project'])
        item['group'] = groups.get(item['group'])
        item['event'] = events.get(item['event'])
        if item['event'] is None and item['group'] is not None:
            item['event'] = item['group'].get_latest_event()
        item['last_mentioned'] = to_datetime(item['last_mentioned'])

    return items
开发者ID:leifcr,项目名称:sentry-hipchat-ac,代码行数:29,代码来源:mentions.py

示例11: test_hash_discarded_raised

    def test_hash_discarded_raised(self, mock_refund, mock_incr):
        project = self.create_project()

        data = {
            'project': project.id,
            'platform': 'NOTMATTLANG',
            'logentry': {
                'formatted': 'test',
            },
            'event_id': uuid.uuid4().hex,
            'extra': {
                'foo': 'bar'
            },
        }

        now = time()
        mock_save = mock.Mock()
        mock_save.side_effect = HashDiscarded
        with mock.patch.object(EventManager, 'save', mock_save):
            save_event(data=data, start_time=now)
            mock_incr.assert_called_with([
                (tsdb.models.project_total_received_discarded, project.id),
                (tsdb.models.project_total_blacklisted, project.id),
                (tsdb.models.organization_total_blacklisted, project.organization_id),
            ],
                timestamp=to_datetime(now),
            )
开发者ID:Kayle009,项目名称:sentry,代码行数:27,代码来源:test_store.py

示例12: create_failed_event

def create_failed_event(cache_key, project_id, issues, event_id, start_time=None):
    """If processing failed we put the original data from the cache into a
    raw event.  Returns `True` if a failed event was inserted
    """
    reprocessing_active = ProjectOption.objects.get_value(
        project_id, 'sentry:reprocessing_active', REPROCESSING_DEFAULT
    )

    # The first time we encounter a failed event and the hint was cleared
    # we send a notification.
    sent_notification = ProjectOption.objects.get_value(
        project_id, 'sentry:sent_failed_event_hint', False
    )
    if not sent_notification:
        project = Project.objects.get_from_cache(id=project_id)
        Activity.objects.create(
            type=Activity.NEW_PROCESSING_ISSUES,
            project=project,
            datetime=to_datetime(start_time),
            data={'reprocessing_active': reprocessing_active,
                  'issues': issues},
        ).send_notification()
        ProjectOption.objects.set_value(project, 'sentry:sent_failed_event_hint', True)

    # If reprocessing is not active we bail now without creating the
    # processing issues
    if not reprocessing_active:
        return False

    # We need to get the original data here instead of passing the data in
    # from the last processing step because we do not want any
    # modifications to take place.
    delete_raw_event(project_id, event_id)
    data = default_cache.get(cache_key)
    if data is None:
        metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'raw'})
        error_logger.error('process.failed_raw.empty', extra={'cache_key': cache_key})
        return True

    from sentry.models import RawEvent, ProcessingIssue
    raw_event = RawEvent.objects.create(
        project_id=project_id,
        event_id=event_id,
        datetime=datetime.utcfromtimestamp(data['timestamp']).replace(tzinfo=timezone.utc),
        data=data
    )

    for issue in issues:
        ProcessingIssue.objects.record_processing_issue(
            raw_event=raw_event,
            scope=issue['scope'],
            object=issue['object'],
            type=issue['type'],
            data=issue['data'],
        )

    default_cache.delete(cache_key)

    return True
开发者ID:alshopov,项目名称:sentry,代码行数:59,代码来源:store.py

示例13: get

    def get(self, request, group, environment):
        try:
            environment = Environment.objects.get(
                project_id=group.project_id,
                # XXX(dcramer): we have no great way to pass the empty env
                name='' if environment == 'none' else environment,
            )
        except Environment.DoesNotExist:
            raise ResourceDoesNotExist

        first_release = GroupRelease.objects.filter(
            group_id=group.id,
            environment=environment.name,
        ).order_by('first_seen').first()

        last_release = GroupRelease.objects.filter(
            group_id=group.id,
            environment=environment.name,
        ).order_by('-first_seen').first()

        # the current release is the 'latest seen' release within the
        # environment even if it hasnt affected this issue
        current_release = GroupRelease.objects.filter(
            group_id=group.id,
            environment=environment.name,
            release_id=ReleaseEnvironment.objects.filter(
                project_id=group.project_id,
                environment_id=environment.id,
            ).order_by('-first_seen').values_list('release_id', flat=True).first(),
        ).first()

        last_seen = GroupRelease.objects.filter(
            group_id=group.id,
            environment=environment.name,
        ).order_by('-last_seen').values_list('last_seen', flat=True).first()

        until = request.GET.get('until')
        if until:
            until = to_datetime(float(until))

        context = {
            'environment': serialize(
                environment, request.user, GroupEnvironmentWithStatsSerializer(
                    group=group,
                    until=until,
                )
            ),
            'firstRelease': serialize(first_release, request.user),
            'lastRelease': serialize(last_release, request.user),
            'currentRelease': serialize(
                current_release, request.user, GroupReleaseWithStatsSerializer(
                    until=until,
                )
            ),
            'lastSeen': last_seen,
            'firstSeen': first_release.first_seen if first_release else None,
        }
        return Response(context)
开发者ID:ForkRepo,项目名称:sentry,代码行数:58,代码来源:group_environment_details.py

示例14: _convert

 def _convert(x):
     return {
         'type': x['type'],
         'timestamp': to_datetime(x['timestamp']),
         'level': x.get('level', 'info'),
         'message': x.get('message'),
         'category': x.get('category'),
         'data': x.get('data') or None,
         'event_id': x.get('event_id'),
     }
开发者ID:thierryxing,项目名称:sentry,代码行数:10,代码来源:breadcrumbs.py

示例15: merge_frequencies

    def merge_frequencies(self, model, destination, sources, timestamp=None):
        if not self.enable_frequency_sketches:
            return

        rollups = []
        for rollup, samples in self.rollups.items():
            _, series = self.get_optimal_rollup_series(
                to_datetime(self.get_earliest_timestamp(rollup, timestamp=timestamp)),
                end=None,
                rollup=rollup,
            )
            rollups.append((
                rollup,
                map(to_datetime, series),
            ))

        exports = defaultdict(list)

        for source in sources:
            for rollup, series in rollups:
                for timestamp in series:
                    keys = self.make_frequency_table_keys(
                        model,
                        rollup,
                        to_timestamp(timestamp),
                        source,
                    )
                    arguments = ['EXPORT'] + list(self.DEFAULT_SKETCH_PARAMETERS)
                    exports[source].extend([
                        (CountMinScript, keys, arguments),
                        ('DEL',) + tuple(keys),
                    ])

        imports = []

        for source, results in self.cluster.execute_commands(exports).items():
            results = iter(results)
            for rollup, series in rollups:
                for timestamp in series:
                    imports.append((
                        CountMinScript,
                        self.make_frequency_table_keys(
                            model,
                            rollup,
                            to_timestamp(timestamp),
                            destination,
                        ),
                        ['IMPORT'] + list(self.DEFAULT_SKETCH_PARAMETERS) + next(results).value,
                    ))
                    next(results)  # pop off the result of DEL

        self.cluster.execute_commands({
            destination: imports,
        })
开发者ID:duanshuaimin,项目名称:sentry,代码行数:54,代码来源:redis.py


注:本文中的sentry.utils.dates.to_datetime函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。