本文整理汇总了Python中sentry.signals.regression_signal.send_robust函数的典型用法代码示例。如果您正苦于以下问题:Python send_robust函数的具体用法?Python send_robust怎么用?Python send_robust使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了send_robust函数的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: save
#.........这里部分代码省略.........
frequencies = [
# (tsdb.models.frequent_projects_by_organization, {
# project.organization_id: {
# project.id: 1,
# },
# }),
# (tsdb.models.frequent_issues_by_project, {
# project.id: {
# group.id: 1,
# },
# })
(tsdb.models.frequent_environments_by_group, {
group.id: {
environment.id: 1,
},
})
]
if release:
frequencies.append(
(tsdb.models.frequent_releases_by_group, {
group.id: {
grouprelease.id: 1,
},
})
)
tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)
UserReport.objects.filter(
project=project,
event_id=event_id,
).update(group=group)
# save the event unless its been sampled
if not is_sample:
try:
with transaction.atomic(using=router.db_for_write(Event)):
event.save()
except IntegrityError:
self.logger.info(
'duplicate.found',
exc_info=True,
extra={
'event_uuid': event_id,
'project_id': project.id,
'group_id': group.id,
'model': Event.__name__,
}
)
return event
index_event_tags.delay(
organization_id=project.organization_id,
project_id=project.id,
group_id=group.id,
event_id=event.id,
tags=tags,
)
if event_user:
tsdb.record_multi(
(
(tsdb.models.users_affected_by_group, group.id, (event_user.tag_value, )),
(tsdb.models.users_affected_by_project, project.id, (event_user.tag_value, )),
),
timestamp=event.datetime
)
if is_new and release:
buffer.incr(
ReleaseProject, {'new_groups': 1}, {
'release_id': release.id,
'project_id': project.id,
}
)
safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False)
if not raw:
if not project.first_event:
project.update(first_event=date)
first_event_received.send(project=project, group=group, sender=Project)
post_process_group.delay(
group=group,
event=event,
is_new=is_new,
is_sample=is_sample,
is_regression=is_regression,
)
else:
self.logger.info('post_process.skip.raw_event', extra={'event_id': event.id})
# TODO: move this to the queue
if is_regression and not raw:
regression_signal.send_robust(sender=Group, instance=group)
return event
示例2: save
#.........这里部分代码省略.........
}
event = Event(
project=project,
event_id=event_id,
data=data,
time_spent=time_spent,
datetime=date,
**kwargs
)
# Calculate the checksum from the first highest scoring interface
if checksum:
hashes = [checksum]
else:
hashes = get_hashes_for_event(event)
# TODO(dcramer): remove checksum usage
event.checksum = hashes[0]
group_kwargs = kwargs.copy()
group_kwargs.update({
'culprit': culprit,
'logger': logger_name,
'level': level,
'last_seen': date,
'first_seen': date,
'time_spent_total': time_spent or 0,
'time_spent_count': time_spent and 1 or 0,
})
tags = data['tags']
tags.append(('level', LOG_LEVELS[level]))
if logger_name:
tags.append(('logger', logger_name))
if server_name:
tags.append(('server_name', server_name))
if site:
tags.append(('site', site))
if release:
# TODO(dcramer): we should ensure we create Release objects
tags.append(('sentry:release', release))
for plugin in plugins.for_project(project):
added_tags = safe_execute(plugin.get_tags, event)
if added_tags:
tags.extend(added_tags)
result = safe_execute(
self._save_aggregate,
event=event,
tags=tags,
hashes=hashes,
**group_kwargs
)
if result is None:
return
group, is_new, is_regression, is_sample = result
using = group._state.db
event.group = group
# save the event unless its been sampled
if not is_sample:
sid = transaction.savepoint(using=using)
try:
event.save()
except IntegrityError:
transaction.savepoint_rollback(sid, using=using)
return event
transaction.savepoint_commit(sid, using=using)
sid = transaction.savepoint(using=using)
try:
EventMapping.objects.create(
project=project, group=group, event_id=event_id)
except IntegrityError:
transaction.savepoint_rollback(sid, using=using)
return event
transaction.savepoint_commit(sid, using=using)
transaction.commit_unless_managed(using=using)
if not raw:
post_process_group.delay(
group=group,
event=event,
is_new=is_new,
is_sample=is_sample,
is_regression=is_regression,
)
index_event.delay(event)
# TODO: move this to the queue
if is_regression and not raw:
regression_signal.send_robust(sender=Group, instance=group)
return event
示例3: save
#.........这里部分代码省略.........
hashes = [checksum]
else:
hashes = map(md5_from_hash, get_hashes_for_event(event))
group_kwargs = kwargs.copy()
group_kwargs.update({
'culprit': culprit,
'logger': logger_name,
'level': level,
'last_seen': date,
'first_seen': date,
'time_spent_total': time_spent or 0,
'time_spent_count': time_spent and 1 or 0,
})
if release:
release = Release.get_or_create(
project=project,
version=release,
date_added=date,
)
group_kwargs['first_release'] = release
Activity.objects.create(
type=Activity.RELEASE,
project=project,
ident=release,
data={'version': release},
datetime=date,
)
group, is_new, is_regression, is_sample = self._save_aggregate(
event=event,
hashes=hashes,
release=release,
**group_kwargs
)
event.group = group
event.group_id = group.id
# store a reference to the group id to guarantee validation of isolation
event.data.bind_ref(event)
try:
with transaction.atomic():
EventMapping.objects.create(
project=project, group=group, event_id=event_id)
except IntegrityError:
self.logger.info('Duplicate EventMapping found for event_id=%s', event_id)
return event
UserReport.objects.filter(
project=project, event_id=event_id,
).update(group=group)
# save the event unless its been sampled
if not is_sample:
try:
with transaction.atomic():
event.save()
except IntegrityError:
self.logger.info('Duplicate Event found for event_id=%s', event_id)
return event
if event_user:
tsdb.record_multi((
(tsdb.models.users_affected_by_group, group.id, (event_user.tag_value,)),
(tsdb.models.users_affected_by_project, project.id, (event_user.tag_value,)),
), timestamp=event.datetime)
if is_new and release:
buffer.incr(Release, {'new_groups': 1}, {
'id': release.id,
})
safe_execute(Group.objects.add_tags, group, tags,
_with_transaction=False)
if not raw:
if not project.first_event:
project.update(first_event=date)
post_process_group.delay(
group=group,
event=event,
is_new=is_new,
is_sample=is_sample,
is_regression=is_regression,
)
else:
self.logger.info('Raw event passed; skipping post process for event_id=%s', event_id)
index_event.delay(event)
# TODO: move this to the queue
if is_regression and not raw:
regression_signal.send_robust(sender=Group, instance=group)
return event
示例4: save_data
#.........这里部分代码省略.........
'level': level,
'message': message,
'platform': platform,
'culprit': culprit or '',
'logger': logger_name,
}
event = Event(
project=project,
event_id=event_id,
data=data,
server_name=server_name,
site=site,
time_spent=time_spent,
datetime=date,
**kwargs
)
# Calculate the checksum from the first highest scoring interface
if not checksum:
checksum = get_checksum_from_event(event)
event.checksum = checksum
group_kwargs = kwargs.copy()
group_kwargs.update({
'last_seen': date,
'first_seen': date,
'time_spent_total': time_spent or 0,
'time_spent_count': time_spent and 1 or 0,
})
tags = data['tags']
tags.append(('level', LOG_LEVELS[level]))
if logger:
tags.append(('logger', logger_name))
if server_name:
tags.append(('server_name', server_name))
if site:
tags.append(('site', site))
for plugin in plugins.for_project(project):
added_tags = safe_execute(plugin.get_tags, event)
if added_tags:
tags.extend(added_tags)
try:
group, is_new, is_sample = self._create_group(
event=event,
tags=data['tags'],
**group_kwargs
)
except Exception as exc:
# TODO: should we mail admins when there are failures?
try:
logger.exception(u'Unable to process log entry: %s', exc)
except Exception as exc:
warnings.warn(u'Unable to process log entry: %s', exc)
return
using = group._state.db
event.group = group
# save the event unless its been sampled
if not is_sample:
sid = transaction.savepoint(using=using)
try:
event.save()
except IntegrityError:
transaction.savepoint_rollback(sid, using=using)
return event
transaction.savepoint_commit(sid, using=using)
sid = transaction.savepoint(using=using)
try:
EventMapping.objects.create(
project=project, group=group, event_id=event_id)
except IntegrityError:
transaction.savepoint_rollback(sid, using=using)
return event
transaction.savepoint_commit(sid, using=using)
transaction.commit_unless_managed(using=using)
if not raw:
send_group_processors(
group=group,
event=event,
is_new=is_new,
is_sample=is_sample
)
if getattr(settings, 'SENTRY_INDEX_SEARCH', settings.SENTRY_USE_SEARCH):
index_event.delay(event)
# TODO: move this to the queue
if is_new and not raw:
regression_signal.send_robust(sender=self.model, instance=group)
return event
示例5: save
#.........这里部分代码省略.........
if site:
tags.append(('site', site))
if release:
# TODO(dcramer): we should ensure we create Release objects
tags.append(('sentry:release', release))
for plugin in plugins.for_project(project, version=None):
added_tags = safe_execute(plugin.get_tags, event,
_with_transaction=False)
if added_tags:
tags.extend(added_tags)
# XXX(dcramer): we're relying on mutation of the data object to ensure
# this propagates into Event
data['tags'] = tags
# Calculate the checksum from the first highest scoring interface
if checksum:
hashes = [checksum]
else:
hashes = get_hashes_for_event(event)
group_kwargs = kwargs.copy()
group_kwargs.update({
'culprit': culprit,
'logger': logger_name,
'level': level,
'last_seen': date,
'first_seen': date,
'time_spent_total': time_spent or 0,
'time_spent_count': time_spent and 1 or 0,
})
if release:
group_kwargs['first_release'] = Release.get_or_create(
project=project,
version=release,
date_added=date,
)
Activity.objects.create(
type=Activity.RELEASE,
project=project,
ident=release,
data={'version': release},
datetime=date,
)
group, is_new, is_regression, is_sample = safe_execute(
self._save_aggregate,
event=event,
hashes=hashes,
**group_kwargs
)
using = group._state.db
event.group = group
try:
with transaction.atomic():
EventMapping.objects.create(
project=project, group=group, event_id=event_id)
except IntegrityError:
self.logger.info('Duplicate EventMapping found for event_id=%s', event_id)
return event
UserReport.objects.filter(
project=project, event_id=event_id,
).update(group=group)
# save the event unless its been sampled
if not is_sample:
try:
with transaction.atomic():
event.save()
except IntegrityError:
self.logger.info('Duplicate Event found for event_id=%s', event_id)
return event
safe_execute(Group.objects.add_tags, group, tags,
_with_transaction=False)
if not raw:
post_process_group.delay(
group=group,
event=event,
is_new=is_new,
is_sample=is_sample,
is_regression=is_regression,
)
else:
self.logger.info('Raw event passed; skipping post process for event_id=%s', event_id)
index_event.delay(event)
# TODO: move this to the queue
if is_regression and not raw:
regression_signal.send_robust(sender=Group, instance=group)
return event
示例6: maybe_delay
try:
maybe_delay(index_event, event)
except Exception, e:
transaction.rollback_unless_managed(using=group._state.db)
logger.exception(u'Error indexing document: %s', e)
if settings.SCRAPE_JAVASCRIPT_CONTEXT and event.platform == 'javascript' and not is_sample:
try:
maybe_delay(fetch_javascript_source, event)
except Exception, e:
transaction.rollback_unless_managed(using=group._state.db)
logger.exception(u'Error fetching javascript source: %s', e)
if is_new:
try:
regression_signal.send_robust(sender=self.model, instance=group)
except Exception, e:
transaction.rollback_unless_managed(using=group._state.db)
logger.exception(u'Error sending regression signal: %s', e)
send_group_processors(group=group, event=event, is_new=is_new, is_sample=is_sample)
return event
def _create_group(self, event, tags=None, **kwargs):
from sentry.models import ProjectCountByMinute, GroupCountByMinute
date = event.datetime
time_spent = event.time_spent
project = event.project
示例7: save
#.........这里部分代码省略.........
try:
with transaction.atomic(using=router.db_for_write(EventMapping)):
EventMapping.objects.create(
project=project, group=group, event_id=event_id)
except IntegrityError:
self.logger.info('Duplicate EventMapping found for event_id=%s', event_id,
exc_info=True)
return event
if release:
grouprelease = GroupRelease.get_or_create(
group=group,
release=release,
environment=environment,
datetime=date,
)
tsdb.incr_multi([
(tsdb.models.group, group.id),
(tsdb.models.project, project.id),
], timestamp=event.datetime)
frequencies = [
(tsdb.models.frequent_projects_by_organization, {
project.organization_id: {
project.id: 1,
},
}),
(tsdb.models.frequent_issues_by_project, {
project.id: {
group.id: 1,
},
})
]
if release:
frequencies.append(
(tsdb.models.frequent_releases_by_groups, {
group.id: {
grouprelease.id: 1,
},
})
)
tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)
UserReport.objects.filter(
project=project, event_id=event_id,
).update(group=group)
# save the event unless its been sampled
if not is_sample:
try:
with transaction.atomic(using=router.db_for_write(Event)):
event.save()
except IntegrityError:
self.logger.info('Duplicate Event found for event_id=%s', event_id,
exc_info=True)
return event
index_event_tags.delay(
project_id=project.id,
group_id=group.id,
event_id=event.id,
tags=tags,
)
if event_user:
tsdb.record_multi((
(tsdb.models.users_affected_by_group, group.id, (event_user.tag_value,)),
(tsdb.models.users_affected_by_project, project.id, (event_user.tag_value,)),
), timestamp=event.datetime)
if is_new and release:
buffer.incr(Release, {'new_groups': 1}, {
'id': release.id,
})
safe_execute(Group.objects.add_tags, group, tags,
_with_transaction=False)
if not raw:
if not project.first_event:
project.update(first_event=date)
first_event_received.send(project=project, group=group, sender=Project)
post_process_group.delay(
group=group,
event=event,
is_new=is_new,
is_sample=is_sample,
is_regression=is_regression,
)
else:
self.logger.info('Raw event passed; skipping post process for event_id=%s', event_id)
# TODO: move this to the queue
if is_regression and not raw:
regression_signal.send_robust(sender=Group, instance=group)
return event
示例8: save
#.........这里部分代码省略.........
if interface.ephemeral:
data.pop(interface.get_path(), None)
# prioritize fingerprint over checksum as its likely the client defaulted
# a checksum whereas the fingerprint was explicit
if fingerprint:
hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint))
elif checksum:
hashes = [checksum]
else:
hashes = map(md5_from_hash, get_hashes_for_event(event))
# TODO(dcramer): temp workaround for complexity
data["message"] = message
event_type = eventtypes.get(data.get("type", "default"))(data)
group_kwargs = kwargs.copy()
group_kwargs.update(
{
"culprit": culprit,
"logger": logger_name,
"level": level,
"last_seen": date,
"first_seen": date,
"data": {
"last_received": event.data.get("received") or float(event.datetime.strftime("%s")),
"type": event_type.key,
# we cache the events metadata on the group to ensure its
# accessible in the stream
"metadata": event_type.get_metadata(),
},
}
)
# TODO(dcramer): temp workaround for complexity
del data["message"]
if release:
release = Release.get_or_create(project=project, version=release, date_added=date)
group_kwargs["first_release"] = release
group, is_new, is_regression, is_sample = self._save_aggregate(
event=event, hashes=hashes, release=release, **group_kwargs
)
event.group = group
# store a reference to the group id to guarantee validation of isolation
event.data.bind_ref(event)
try:
with transaction.atomic(using=router.db_for_write(EventMapping)):
EventMapping.objects.create(project=project, group=group, event_id=event_id)
except IntegrityError:
self.logger.info("Duplicate EventMapping found for event_id=%s", event_id, exc_info=True)
return event
UserReport.objects.filter(project=project, event_id=event_id).update(group=group)
# save the event unless its been sampled
if not is_sample:
try:
with transaction.atomic(using=router.db_for_write(Event)):
event.save()
except IntegrityError:
self.logger.info("Duplicate Event found for event_id=%s", event_id, exc_info=True)
return event
index_event_tags.delay(project_id=project.id, event_id=event.id, tags=tags)
if event_user:
tsdb.record_multi(
(
(tsdb.models.users_affected_by_group, group.id, (event_user.tag_value,)),
(tsdb.models.users_affected_by_project, project.id, (event_user.tag_value,)),
),
timestamp=event.datetime,
)
if is_new and release:
buffer.incr(Release, {"new_groups": 1}, {"id": release.id})
safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False)
if not raw:
if not project.first_event:
project.update(first_event=date)
first_event_received.send(project=project, group=group, sender=Project)
post_process_group.delay(
group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression
)
else:
self.logger.info("Raw event passed; skipping post process for event_id=%s", event_id)
# TODO: move this to the queue
if is_regression and not raw:
regression_signal.send_robust(sender=Group, instance=group)
return event