当前位置: 首页>>代码示例>>Python>>正文


Python buffer.incr函数代码示例

本文整理汇总了Python中sentry.app.buffer.incr函数的典型用法代码示例。如果您正苦于以下问题:Python incr函数的具体用法?Python incr怎么用?Python incr使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了incr函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: add_tags

    def add_tags(self, group, tags):
        from sentry.models import TagValue, GroupTagValue

        project_id = group.project_id
        date = group.last_seen

        for tag_item in tags:
            if len(tag_item) == 2:
                (key, value), data = tag_item, None
            else:
                key, value, data = tag_item

            buffer.incr(TagValue, {
                'times_seen': 1,
            }, {
                'project_id': project_id,
                'key': key,
                'value': value,
            }, {
                'last_seen': date,
                'data': data,
            })

            buffer.incr(GroupTagValue, {
                'times_seen': 1,
            }, {
                'group_id': group.id,
                'project_id': project_id,
                'key': key,
                'value': value,
            }, {
                'last_seen': date,
            })
开发者ID:JJediny,项目名称:sentry,代码行数:33,代码来源:group.py

示例2: add_tags

    def add_tags(self, group, tags):
        from sentry.models import TagValue, GroupTagValue

        project_id = group.project_id
        date = group.last_seen

        for tag_item in tags:
            if len(tag_item) == 2:
                (key, value), data = tag_item, None
            else:
                key, value, data = tag_item

            buffer.incr(
                TagValue,
                {"times_seen": 1},
                {"project_id": project_id, "key": key, "value": value},
                {"last_seen": date, "data": data},
            )

            buffer.incr(
                GroupTagValue,
                {"times_seen": 1},
                {"group_id": group.id, "project_id": project_id, "key": key, "value": value},
                {"last_seen": date},
            )
开发者ID:Qwiz,项目名称:sentry,代码行数:25,代码来源:group.py

示例3: _process_existing_aggregate

    def _process_existing_aggregate(self, group, event, data, release):
        date = max(event.datetime, group.last_seen)
        extra = {
            'last_seen': date,
            'score': ScoreClause(group),
            'data': data['data'],
        }
        if event.message and event.message != group.message:
            extra['message'] = event.message
        if group.level != data['level']:
            extra['level'] = data['level']
        if group.culprit != data['culprit']:
            extra['culprit'] = data['culprit']

        is_regression = self._handle_regression(group, event, release)

        group.last_seen = extra['last_seen']

        update_kwargs = {
            'times_seen': 1,
        }

        buffer.incr(Group, update_kwargs, {
            'id': group.id,
        }, extra)

        return is_regression
开发者ID:mvaled,项目名称:sentry,代码行数:27,代码来源:event_manager.py

示例4: add_tags

    def add_tags(self, group, tags):
        from sentry.models import TagValue, GroupTagValue

        project = group.project
        date = group.last_seen

        tsdb_keys = []

        for tag_item in tags:
            if len(tag_item) == 2:
                (key, value), data = tag_item, None
            else:
                key, value, data = tag_item

            if not value:
                continue

            value = six.text_type(value)
            if len(value) > MAX_TAG_VALUE_LENGTH:
                continue

            tsdb_id = u'%s=%s' % (key, value)

            tsdb_keys.extend([
                (tsdb.models.project_tag_value, tsdb_id),
            ])

            buffer.incr(TagValue, {
                'times_seen': 1,
            }, {
                'project': project,
                'key': key,
                'value': value,
            }, {
                'last_seen': date,
                'data': data,
            })

            buffer.incr(GroupTagValue, {
                'times_seen': 1,
            }, {
                'group': group,
                'project': project,
                'key': key,
                'value': value,
            }, {
                'last_seen': date,
            })

        if tsdb_keys:
            tsdb.incr_multi(tsdb_keys)
开发者ID:abensrhir,项目名称:sentry,代码行数:51,代码来源:manager.py

示例5: _process_existing_aggregate

    def _process_existing_aggregate(self, group, event, data):
        date = max(event.datetime, group.last_seen)
        extra = {
            'last_seen': date,
            'score': ScoreClause(group),
        }
        if event.message and event.message != group.message:
            extra['message'] = event.message
        if group.level != data['level']:
            extra['level'] = data['level']
        if group.culprit != data['culprit']:
            extra['culprit'] = data['culprit']

        is_regression = False
        if group.is_resolved() and plugin_is_regression(group, event):
            is_regression = bool(Group.objects.filter(
                id=group.id,
                # ensure we cant update things if the status has been set to
                # muted
                status__in=[GroupStatus.RESOLVED, GroupStatus.UNRESOLVED],
            ).exclude(
                # add to the regression window to account for races here
                active_at__gte=date - timedelta(seconds=5),
            ).update(
                active_at=date,
                # explicitly set last_seen here as ``is_resolved()`` looks
                # at the value
                last_seen=date,
                status=GroupStatus.UNRESOLVED
            ))
            group.active_at = date
            group.status = GroupStatus.UNRESOLVED

        group.last_seen = extra['last_seen']

        update_kwargs = {
            'times_seen': 1,
        }
        if event.time_spent:
            update_kwargs.update({
                'time_spent_total': event.time_spent,
                'time_spent_count': 1,
            })

        buffer.incr(Group, update_kwargs, {
            'id': group.id,
        }, extra)

        return is_regression
开发者ID:haojiang1,项目名称:sentry,代码行数:49,代码来源:event_manager.py

示例6: _process_existing_aggregate

    def _process_existing_aggregate(self, group, event, data):
        date = max(event.datetime, group.last_seen)

        extra = {
            'last_seen': date,
            'score': ScoreClause(group),
        }
        if event.message and event.message != group.message:
            extra['message'] = event.message
        if group.level != data['level']:
            extra['level'] = data['level']
        if group.culprit != data['culprit']:
            extra['culprit'] = data['culprit']

        if group.status == STATUS_RESOLVED or group.is_over_resolve_age():
            # Making things atomic
            is_regression = bool(Group.objects.filter(
                id=group.id,
                status=STATUS_RESOLVED,
            ).exclude(
                active_at__gte=date,
            ).update(active_at=date, status=STATUS_UNRESOLVED))

            transaction.commit_unless_managed(using=group._state.db)

            group.active_at = date
            group.status = STATUS_UNRESOLVED
        else:
            is_regression = False

        group.last_seen = extra['last_seen']

        update_kwargs = {
            'times_seen': 1,
        }
        if event.time_spent:
            update_kwargs.update({
                'time_spent_total': event.time_spent,
                'time_spent_count': 1,
            })

        buffer.incr(Group, update_kwargs, {
            'id': group.id,
        }, extra)

        return is_regression
开发者ID:fourks,项目名称:sentry,代码行数:46,代码来源:event_manager.py

示例7: _process_existing_aggregate

    def _process_existing_aggregate(self, group, event, data):
        date = max(event.datetime, group.last_seen)
        extra = {
            'last_seen': date,
            'score': ScoreClause(group),
        }
        if event.message and event.message != group.message:
            extra['message'] = event.message
        if group.level != data['level']:
            extra['level'] = data['level']
        if group.culprit != data['culprit']:
            extra['culprit'] = data['culprit']

        is_regression = False
        if group.is_resolved() and plugin_is_regression(group, event):
            is_regression = bool(Group.objects.filter(
                id=group.id,
            ).exclude(
                # add 30 seconds to the regression window to account for
                # races here
                active_at__gte=date - timedelta(seconds=5),
            ).update(active_at=date, status=STATUS_UNRESOLVED))

            transaction.commit_unless_managed(using=group._state.db)

            group.active_at = date
            group.status = STATUS_UNRESOLVED

        group.last_seen = extra['last_seen']

        update_kwargs = {
            'times_seen': 1,
        }
        if event.time_spent:
            update_kwargs.update({
                'time_spent_total': event.time_spent,
                'time_spent_count': 1,
            })

        buffer.incr(Group, update_kwargs, {
            'id': group.id,
        }, extra)

        return is_regression
开发者ID:PostPCEra,项目名称:sentry,代码行数:44,代码来源:event_manager.py

示例8: add_tags

    def add_tags(self, group, tags):
        from sentry.models import TagValue, GroupTagValue

        project_id = group.project_id
        date = group.last_seen

        tsdb_keys = []

        for tag_item in tags:
            if len(tag_item) == 2:
                (key, value), data = tag_item, None
            else:
                key, value, data = tag_item

            tsdb_id = u'%s=%s' % (key, value)

            tsdb_keys.extend([
                (tsdb.models.project_tag_value, tsdb_id),
            ])

            buffer.incr(TagValue, {
                'times_seen': 1,
            }, {
                'project_id': project_id,
                'key': key,
                'value': value,
            }, {
                'last_seen': date,
                'data': data,
            })

            buffer.incr(GroupTagValue, {
                'times_seen': 1,
            }, {
                'group_id': group.id,
                'project_id': project_id,
                'key': key,
                'value': value,
            }, {
                'last_seen': date,
            })

        if tsdb_keys:
            tsdb.incr_multi(tsdb_keys)
开发者ID:Juraldinio,项目名称:sentry,代码行数:44,代码来源:group.py

示例9: _process_existing_aggregate

    def _process_existing_aggregate(self, group, event, data, release):
        date = max(event.datetime, group.last_seen)
        extra = {"last_seen": date, "score": ScoreClause(group), "data": data["data"]}
        if event.message and event.message != group.message:
            extra["message"] = event.message
        if group.level != data["level"]:
            extra["level"] = data["level"]
        if group.culprit != data["culprit"]:
            extra["culprit"] = data["culprit"]

        is_regression = self._handle_regression(group, event, release)

        group.last_seen = extra["last_seen"]

        update_kwargs = {"times_seen": 1}

        buffer.incr(Group, update_kwargs, {"id": group.id}, extra)

        return is_regression
开发者ID:yoachum,项目名称:sentry,代码行数:19,代码来源:event_manager.py

示例10: add_tags

    def add_tags(self, group, tags):
        from sentry.models import TagValue, GroupTagValue

        project = group.project
        date = group.last_seen

        tsdb_keys = []

        for tag_item in tags:
            if len(tag_item) == 2:
                (key, value), data = tag_item, None
            else:
                key, value, data = tag_item

            if not value:
                continue

            value = six.text_type(value)
            if len(value) > MAX_TAG_VALUE_LENGTH:
                continue

            tsdb_id = u"%s=%s" % (key, value)

            tsdb_keys.extend([(tsdb.models.project_tag_value, tsdb_id)])

            buffer.incr(
                TagValue,
                {"times_seen": 1},
                {"project": project, "key": key, "value": value},
                {"last_seen": date, "data": data},
            )

            buffer.incr(
                GroupTagValue,
                {"times_seen": 1},
                {"group": group, "project": project, "key": key, "value": value},
                {"last_seen": date},
            )

        if tsdb_keys:
            tsdb.incr_multi(tsdb_keys)
开发者ID:achun2080,项目名称:sentry,代码行数:41,代码来源:manager.py

示例11: save


#.........这里部分代码省略.........
                    project=project, group=group, event_id=event_id)
        except IntegrityError:
            self.logger.info('duplicate.found', extra={'event_id': event.id}, exc_info=True)
            return event

        environment = Environment.get_or_create(
            project=project,
            name=environment,
        )

        if release:
            ReleaseEnvironment.get_or_create(
                project=project,
                release=release,
                environment=environment,
                datetime=date,
            )

            grouprelease = GroupRelease.get_or_create(
                group=group,
                release=release,
                environment=environment,
                datetime=date,
            )

        counters = [
            (tsdb.models.group, group.id),
            (tsdb.models.project, project.id),
        ]

        if release:
            counters.append((tsdb.models.release, release.id))

        tsdb.incr_multi(counters, timestamp=event.datetime)

        frequencies = [
            # (tsdb.models.frequent_projects_by_organization, {
            #     project.organization_id: {
            #         project.id: 1,
            #     },
            # }),
            # (tsdb.models.frequent_issues_by_project, {
            #     project.id: {
            #         group.id: 1,
            #     },
            # })
            (tsdb.models.frequent_environments_by_group, {
                group.id: {
                    environment.id: 1,
                },
            })
        ]

        if release:
            frequencies.append(
                (tsdb.models.frequent_releases_by_group, {
                    group.id: {
                        grouprelease.id: 1,
                    },
                })
            )

        tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)

        UserReport.objects.filter(
            project=project, event_id=event_id,
开发者ID:mvaled,项目名称:sentry,代码行数:67,代码来源:event_manager.py

示例12: save


#.........这里部分代码省略.........
            hashes = [checksum]
        else:
            hashes = map(md5_from_hash, get_hashes_for_event(event))

        group_kwargs = kwargs.copy()
        group_kwargs.update({
            'culprit': culprit,
            'logger': logger_name,
            'level': level,
            'last_seen': date,
            'first_seen': date,
            'time_spent_total': time_spent or 0,
            'time_spent_count': time_spent and 1 or 0,
        })

        if release:
            release = Release.get_or_create(
                project=project,
                version=release,
                date_added=date,
            )

            group_kwargs['first_release'] = release

            Activity.objects.create(
                type=Activity.RELEASE,
                project=project,
                ident=release,
                data={'version': release},
                datetime=date,
            )

        group, is_new, is_regression, is_sample = self._save_aggregate(
            event=event,
            hashes=hashes,
            release=release,
            **group_kwargs
        )

        event.group = group
        event.group_id = group.id
        # store a reference to the group id to guarantee validation of isolation
        event.data.bind_ref(event)

        try:
            with transaction.atomic():
                EventMapping.objects.create(
                    project=project, group=group, event_id=event_id)
        except IntegrityError:
            self.logger.info('Duplicate EventMapping found for event_id=%s', event_id)
            return event

        UserReport.objects.filter(
            project=project, event_id=event_id,
        ).update(group=group)

        # save the event unless its been sampled
        if not is_sample:
            try:
                with transaction.atomic():
                    event.save()
            except IntegrityError:
                self.logger.info('Duplicate Event found for event_id=%s', event_id)
                return event

        if event_user:
            tsdb.record_multi((
                (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value,)),
                (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value,)),
            ), timestamp=event.datetime)

        if is_new and release:
            buffer.incr(Release, {'new_groups': 1}, {
                'id': release.id,
            })

        safe_execute(Group.objects.add_tags, group, tags,
                     _with_transaction=False)

        if not raw:
            if not project.first_event:
                project.update(first_event=date)

            post_process_group.delay(
                group=group,
                event=event,
                is_new=is_new,
                is_sample=is_sample,
                is_regression=is_regression,
            )
        else:
            self.logger.info('Raw event passed; skipping post process for event_id=%s', event_id)

        index_event.delay(event)

        # TODO: move this to the queue
        if is_regression and not raw:
            regression_signal.send_robust(sender=Group, instance=group)

        return event
开发者ID:delkyd,项目名称:sentry,代码行数:101,代码来源:event_manager.py

示例13: save


#.........这里部分代码省略.........
                "level": level,
                "last_seen": date,
                "first_seen": date,
                "data": {
                    "last_received": event.data.get("received") or float(event.datetime.strftime("%s")),
                    "type": event_type.key,
                    # we cache the events metadata on the group to ensure its
                    # accessible in the stream
                    "metadata": event_metadata,
                },
            }
        )

        if release:
            release = Release.get_or_create(project=project, version=release, date_added=date)

            group_kwargs["first_release"] = release

        group, is_new, is_regression, is_sample = self._save_aggregate(
            event=event, hashes=hashes, release=release, **group_kwargs
        )

        event.group = group
        # store a reference to the group id to guarantee validation of isolation
        event.data.bind_ref(event)

        try:
            with transaction.atomic(using=router.db_for_write(EventMapping)):
                EventMapping.objects.create(project=project, group=group, event_id=event_id)
        except IntegrityError:
            self.logger.info("Duplicate EventMapping found for event_id=%s", event_id, exc_info=True)
            return event

        if release:
            grouprelease = GroupRelease.get_or_create(
                group=group, release=release, environment=environment, datetime=date
            )

        tsdb.incr_multi([(tsdb.models.group, group.id), (tsdb.models.project, project.id)], timestamp=event.datetime)

        frequencies = [
            # (tsdb.models.frequent_projects_by_organization, {
            #     project.organization_id: {
            #         project.id: 1,
            #     },
            # }),
            # (tsdb.models.frequent_issues_by_project, {
            #     project.id: {
            #         group.id: 1,
            #     },
            # })
        ]
        if release:
            frequencies.append((tsdb.models.frequent_releases_by_groups, {group.id: {grouprelease.id: 1}}))

        tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)

        UserReport.objects.filter(project=project, event_id=event_id).update(group=group)

        # save the event unless its been sampled
        if not is_sample:
            try:
                with transaction.atomic(using=router.db_for_write(Event)):
                    event.save()
            except IntegrityError:
                self.logger.info("Duplicate Event found for event_id=%s", event_id, exc_info=True)
                return event

            index_event_tags.delay(project_id=project.id, group_id=group.id, event_id=event.id, tags=tags)

        if event_user:
            tsdb.record_multi(
                (
                    (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value,)),
                    (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value,)),
                ),
                timestamp=event.datetime,
            )

        if is_new and release:
            buffer.incr(Release, {"new_groups": 1}, {"id": release.id})

        safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False)

        if not raw:
            if not project.first_event:
                project.update(first_event=date)
                first_event_received.send(project=project, group=group, sender=Project)

            post_process_group.delay(
                group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression
            )
        else:
            self.logger.info("Raw event passed; skipping post process for event_id=%s", event_id)

        # TODO: move this to the queue
        if is_regression and not raw:
            regression_signal.send_robust(sender=Group, instance=group)

        return event
开发者ID:yoachum,项目名称:sentry,代码行数:101,代码来源:event_manager.py

示例14: save


#.........这里部分代码省略.........
        group_kwargs = kwargs.copy()
        group_kwargs.update({
            'culprit': culprit,
            'logger': logger_name,
            'level': level,
            'last_seen': date,
            'first_seen': date,
            'data': {
                'last_received': event.data.get('received') or float(event.datetime.strftime('%s')),
                'type': event_type.key,
                # we cache the events metadata on the group to ensure its
                # accessible in the stream
                'metadata': event_metadata,
            },
        })

        if release:
            release = Release.get_or_create(
                project=project,
                version=release,
                date_added=date,
            )

            group_kwargs['first_release'] = release

        group, is_new, is_regression, is_sample = self._save_aggregate(
            event=event,
            hashes=hashes,
            release=release,
            **group_kwargs
        )

        event.group = group
        # store a reference to the group id to guarantee validation of isolation
        event.data.bind_ref(event)

        try:
            with transaction.atomic(using=router.db_for_write(EventMapping)):
                EventMapping.objects.create(
                    project=project, group=group, event_id=event_id)
        except IntegrityError:
            self.logger.info('Duplicate EventMapping found for event_id=%s', event_id,
                             exc_info=True)
            return event

        UserReport.objects.filter(
            project=project, event_id=event_id,
        ).update(group=group)

        # save the event unless its been sampled
        if not is_sample:
            try:
                with transaction.atomic(using=router.db_for_write(Event)):
                    event.save()
            except IntegrityError:
                self.logger.info('Duplicate Event found for event_id=%s', event_id,
                                 exc_info=True)
                return event

            index_event_tags.delay(
                project_id=project.id,
                group_id=group.id,
                event_id=event.id,
                tags=tags,
            )

        if event_user:
            tsdb.record_multi((
                (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value,)),
                (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value,)),
            ), timestamp=event.datetime)

        if is_new and release:
            buffer.incr(Release, {'new_groups': 1}, {
                'id': release.id,
            })

        safe_execute(Group.objects.add_tags, group, tags,
                     _with_transaction=False)

        if not raw:
            if not project.first_event:
                project.update(first_event=date)
                first_event_received.send(project=project, group=group, sender=Project)

            post_process_group.delay(
                group=group,
                event=event,
                is_new=is_new,
                is_sample=is_sample,
                is_regression=is_regression,
            )
        else:
            self.logger.info('Raw event passed; skipping post process for event_id=%s', event_id)

        # TODO: move this to the queue
        if is_regression and not raw:
            regression_signal.send_robust(sender=Group, instance=group)

        return event
开发者ID:Akashguharoy,项目名称:sentry,代码行数:101,代码来源:event_manager.py

示例15: save


#.........这里部分代码省略.........
            if added_tags:
                tags.extend(added_tags)
        # XXX(dcramer): we're relying on mutation of the data object to ensure
        # this propagates into Event
        data['tags'] = tags

        # Calculate the checksum from the first highest scoring interface
        if checksum:
            hashes = [checksum]
        elif fingerprint:
            hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint))
        else:
            hashes = map(md5_from_hash, get_hashes_for_event(event))

        group_kwargs = kwargs.copy()
        group_kwargs.update({
            'culprit': culprit,
            'logger': logger_name,
            'level': level,
            'last_seen': date,
            'first_seen': date,
            'time_spent_total': time_spent or 0,
            'time_spent_count': time_spent and 1 or 0,
        })

        if release:
            release = Release.get_or_create(
                project=project,
                version=release,
                date_added=date,
            )

            group_kwargs['first_release'] = release

            Activity.objects.create(
                type=Activity.RELEASE,
                project=project,
                ident=release,
                data={'version': release},
                datetime=date,
            )

        group, is_new, is_regression, is_sample = safe_execute(
            self._save_aggregate,
            event=event,
            hashes=hashes,
            **group_kwargs
        )

        using = group._state.db

        event.group = group

        try:
            with transaction.atomic():
                EventMapping.objects.create(
                    project=project, group=group, event_id=event_id)
        except IntegrityError:
            self.logger.info('Duplicate EventMapping found for event_id=%s', event_id)
            return event

        UserReport.objects.filter(
            project=project, event_id=event_id,
        ).update(group=group)

        # save the event unless its been sampled
        if not is_sample:
            try:
                with transaction.atomic():
                    event.save()
            except IntegrityError:
                self.logger.info('Duplicate Event found for event_id=%s', event_id)
                return event

        if is_new and release:
            buffer.incr(Release, {'new_groups': 1}, {
                'id': release.id,
            })

        safe_execute(Group.objects.add_tags, group, tags,
                     _with_transaction=False)

        if not raw:
            post_process_group.delay(
                group=group,
                event=event,
                is_new=is_new,
                is_sample=is_sample,
                is_regression=is_regression,
            )
        else:
            self.logger.info('Raw event passed; skipping post process for event_id=%s', event_id)

        index_event.delay(event)

        # TODO: move this to the queue
        if is_regression and not raw:
            regression_signal.send_robust(sender=Group, instance=group)

        return event
开发者ID:GpadillaQ,项目名称:sentry,代码行数:101,代码来源:event_manager.py


注:本文中的sentry.app.buffer.incr函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。