本文整理汇总了Python中sentry.utils.db.get_db_engine函数的典型用法代码示例。如果您正苦于以下问题:Python get_db_engine函数的具体用法?Python get_db_engine怎么用?Python get_db_engine使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_db_engine函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_get_db_engine
def test_get_db_engine(self):
from sentry.utils.db import get_db_engine
_databases = getattr(django_settings, 'DATABASES', {}).copy()
django_settings.DATABASES['default'] = {'ENGINE': 'blah.sqlite3'}
self.assertEquals(get_db_engine(), 'sqlite3')
django_settings.DATABASES['default'] = {'ENGINE': 'blah.mysql'}
self.assertEquals(get_db_engine(), 'mysql')
django_settings.DATABASES = _databases
示例2: get_priority_sort_expression
def get_priority_sort_expression(model):
engine = get_db_engine(router.db_for_read(model))
table = get_sql_table(model)
if 'postgres' in engine:
return u'log({table}.times_seen) * 600 + {table}.last_seen::abstime::int'.format(table=table)
else:
# TODO: This should be improved on other databases where possible.
# (This doesn't work on some databases: SQLite for example doesn't
# have a built-in logarithm function.)
return u'{}.times_seen'.format(table)
示例3: get_sql_date_trunc
def get_sql_date_trunc(col, db='default', grouper='hour'):
conn = connections[db]
engine = get_db_engine(db)
# TODO: does extract work for sqlite?
if engine.startswith('oracle'):
method = DATE_TRUNC_GROUPERS['oracle'].get(grouper, DATE_TRUNC_GROUPERS['default'][grouper])
else:
method = DATE_TRUNC_GROUPERS['default'][grouper]
return conn.ops.date_trunc_sql(method, col)
示例4: as_sql
def as_sql(self, compiler, connection, function=None, template=None):
engine = get_db_engine(getattr(connection, 'alias', 'default'))
if engine.startswith('postgresql'):
sql = 'log(times_seen) * 600 + last_seen::abstime::int'
elif engine.startswith('mysql'):
sql = 'log(times_seen) * 600 + unix_timestamp(last_seen)'
else:
# XXX: if we cant do it atomically let's do it the best we can
sql = int(self)
return (sql, [])
示例5: evaluate
def evaluate(self, node, qn, connection):
engine = get_db_engine(getattr(connection, 'alias', 'default'))
if engine.startswith('postgresql'):
sql = 'log(times_seen) * 600 + last_seen::abstime::int'
elif engine.startswith('mysql'):
sql = 'log(times_seen) * 600 + unix_timestamp(last_seen)'
else:
# XXX: if we cant do it atomically let's do it the best we can
sql = int(self)
return (sql, [])
示例6: evaluate
def evaluate(self, node, qn, connection):
engine = get_db_engine(getattr(connection, "alias", "default"))
if engine.startswith("postgresql"):
sql = "log(times_seen) * 600 + last_seen::abstime::int"
elif engine.startswith("mysql"):
sql = "log(times_seen) * 600 + unix_timestamp(last_seen)"
else:
# XXX: if we cant do it atomicly let's do it the best we can
sql = self.group.get_score()
return (sql, [])
示例7: get_sql_date_trunc
def get_sql_date_trunc(col, db='default'):
conn = connections[db]
engine = get_db_engine(db)
# TODO: does extract work for sqlite?
if engine.startswith('oracle'):
method = conn.ops.date_trunc_sql('hh24', col)
else:
method = conn.ops.date_trunc_sql('hour', col)
return method
示例8: get_sql_date_trunc
def get_sql_date_trunc(col, db="default", grouper="hour"):
conn = connections[db]
engine = get_db_engine(db)
# TODO: does extract work for sqlite?
if engine.startswith("oracle"):
method = DATE_TRUNC_GROUPERS["oracle"].get(grouper, DATE_TRUNC_GROUPERS["default"][grouper])
if '"' not in col:
col = '"%s"' % col.upper()
else:
method = DATE_TRUNC_GROUPERS["default"][grouper]
return conn.ops.date_trunc_sql(method, col)
示例9: get_sort_clause
def get_sort_clause(sort_by):
engine = get_db_engine('default')
if engine.startswith('sqlite'):
return SQLITE_SORT_CLAUSES[sort_by]
elif engine.startswith('mysql'):
return MYSQL_SORT_CLAUSES[sort_by]
elif engine.startswith('oracle'):
return ORACLE_SORT_CLAUSES[sort_by]
elif engine in MSSQL_ENGINES:
return MSSQL_SORT_CLAUSES[sort_by]
else:
return SORT_CLAUSES[sort_by]
示例10: get_accelerated
def get_accelerated(self, queryset=None, minutes=15):
# mintues should
from sentry.models import MessageCountByMinute
mcbm_tbl = MessageCountByMinute._meta.db_table
if queryset is None:
queryset = self
assert minutes >= settings.MINUTE_NORMALIZATION
engine = get_db_engine(queryset.db)
if engine.startswith('mysql'):
minute_clause = "interval %s minute"
else:
minute_clause = "interval '%s minutes'"
queryset = queryset.extra(
where=["%s.date >= now() - %s" % (mcbm_tbl, minute_clause % (minutes, ))],
).annotate(x=Sum('messagecountbyminute__times_seen')).order_by('id')
sql, params = queryset.query.get_compiler(queryset.db).as_sql()
before_select, after_select = str(sql).split('SELECT ', 1)
before_where, after_where = after_select.split(' WHERE ', 1)
before_group, after_group = after_where.split(' GROUP BY ', 1)
# Ensure we remove any ordering clause
after_group = after_group.split(' ORDER BY ')[0]
query = """
SELECT (SUM(%(mcbm_tbl)s.times_seen) + 1.0) / (COALESCE(z.accel, 0) + 1.0) as accel,
z.accel as prev_accel,
%(before_where)s
LEFT JOIN (SELECT a.group_id, SUM(a.times_seen) / 3.0 as accel
FROM %(mcbm_tbl)s as a
WHERE a.date BETWEEN now() - %(min_time)s
AND now() - %(min_time)s
GROUP BY a.group_id) as z
ON z.group_id = %(mcbm_tbl)s.group_id
WHERE %(before_group)s
GROUP BY prev_accel, %(after_group)s
HAVING SUM(%(mcbm_tbl)s.times_seen) > 0
ORDER BY accel DESC
""" % dict(
mcbm_tbl=mcbm_tbl,
before_where=before_where,
before_group=before_group,
after_group=after_group,
min_time=minute_clause % (minutes + 1,),
max_time=minute_clause % (minutes * 4,),
)
return RawQuerySet(self, query, params)
示例11: get
def get(self, request, team):
"""
Return a list of the newest groups for a given team.
The resulting query will find groups which have been seen since the
cutoff date, and then sort those by score, returning the highest scoring
groups first.
"""
minutes = int(request.REQUEST.get('minutes', 15))
limit = min(100, int(request.REQUEST.get('limit', 10)))
project_list = Project.objects.get_for_user(user=request.user, team=team)
project_dict = dict((p.id, p) for p in project_list)
cutoff = timedelta(minutes=minutes)
cutoff_dt = timezone.now() - cutoff
if get_db_engine('default') == 'sqlite':
sort_value = 'times_seen'
else:
sort_value = 'score'
group_list = list(
Group.objects.filter(
project__in=project_dict.keys(),
status=GroupStatus.UNRESOLVED,
active_at__gte=cutoff_dt,
).extra(
select={'sort_value': sort_value},
).order_by('-{}'.format(sort_value), '-first_seen')[:limit]
)
for group in group_list:
group._project_cache = project_dict.get(group.project_id)
return Response(
serialize(
group_list,
request.user,
GroupSerializer(
environment_func=self._get_environment_func(request, team.organization_id)
)
)
)
示例12: get_group_tags
def get_group_tags(request, team, project, group_id, tag_name):
# XXX(dcramer): Consider this API deprecated as soon as it was implemented
cutoff = timezone.now() - timedelta(days=7)
engine = get_db_engine('default')
if 'postgres' in engine:
# This doesnt guarantee percentage is accurate, but it does ensure
# that the query has a maximum cost
cursor = connections['default'].cursor()
cursor.execute("""
SELECT SUM(t)
FROM (
SELECT times_seen as t
FROM sentry_messagefiltervalue
WHERE group_id = %s
AND key = %s
AND last_seen > NOW() - INTERVAL '7 days'
LIMIT 10000
) as a
""", [group_id, tag_name])
total = cursor.fetchone()[0] or 0
else:
total = GroupTagValue.objects.filter(
group=group_id,
key=tag_name,
last_seen__gte=cutoff,
).aggregate(t=Sum('times_seen'))['t'] or 0
unique_tags = GroupTagValue.objects.filter(
group=group_id,
key=tag_name,
last_seen__gte=cutoff,
).values_list('value', 'times_seen').order_by('-times_seen')[:10]
return json.dumps({
'name': tag_name,
'values': list(unique_tags),
'total': total,
})
示例13: has_trending
try:
if not filter_.is_set():
continue
event_list = filter_.get_query_set(event_list)
except Exception, e:
logger = logging.getLogger('sentry.filters')
logger.exception('Error processing filter %r: %s', cls, e)
sort = request.GET.get('sort')
if sort not in SORT_OPTIONS:
sort = settings.DEFAULT_SORT_OPTION
if sort.startswith('accel_') and not has_trending():
sort = settings.DEFAULT_SORT_OPTION
engine = get_db_engine('default')
if engine.startswith('sqlite'):
sort_clause = SQLITE_SORT_CLAUSES.get(sort)
elif engine.startswith('mysql'):
sort_clause = MYSQL_SORT_CLAUSES.get(sort)
else:
sort_clause = SORT_CLAUSES.get(sort)
if sort == 'tottime':
event_list = event_list.filter(time_spent_count__gt=0)
elif sort == 'avgtime':
event_list = event_list.filter(time_spent_count__gt=0)
elif sort.startswith('accel_'):
event_list = Group.objects.get_accelerated(event_list, minutes=int(sort.split('_', 1)[1]))
date_from = request.GET.get('df')
示例14: query
def query(self, project, query=None, status=None, tags=None,
bookmarked_by=None, sort_by='date', date_filter='last_seen',
date_from=None, date_to=None, cursor=None, limit=100):
from sentry.models import Group
queryset = Group.objects.filter(project=project)
if query:
queryset = queryset.filter(message__icontains=query)
if status is not None:
queryset = queryset.filter(status=status)
if bookmarked_by:
queryset = queryset.filter(
bookmark_set__project=project,
bookmark_set__user=bookmarked_by,
)
if tags:
for k, v in tags.iteritems():
queryset = queryset.filter(**dict(
grouptag__key=k,
grouptag__value=v,
))
if date_filter == 'first_seen':
if date_from:
queryset = queryset.filter(first_seen__gte=date_from)
elif date_to:
queryset = queryset.filter(first_seen__lte=date_to)
elif date_filter == 'last_seen':
if date_from and date_to:
queryset = queryset.filter(
first_seen__gte=date_from,
last_seen__lte=date_to,
)
elif date_from:
queryset = queryset.filter(last_seen__gte=date_from)
elif date_to:
queryset = queryset.filter(last_seen__lte=date_to)
engine = get_db_engine('default')
if engine.startswith('sqlite'):
score_clause = SQLITE_SORT_CLAUSES.get(sort_by)
filter_clause = SQLITE_SCORE_CLAUSES.get(sort_by)
elif engine.startswith('mysql'):
score_clause = MYSQL_SORT_CLAUSES.get(sort_by)
filter_clause = MYSQL_SCORE_CLAUSES.get(sort_by)
elif engine.startswith('oracle'):
score_clause = ORACLE_SORT_CLAUSES.get(sort_by)
filter_clause = ORACLE_SCORE_CLAUSES.get(sort_by)
elif engine in MSSQL_ENGINES:
score_clause = MSSQL_SORT_CLAUSES.get(sort_by)
filter_clause = MSSQL_SCORE_CLAUSES.get(sort_by)
else:
score_clause = SORT_CLAUSES.get(sort_by)
filter_clause = SCORE_CLAUSES.get(sort_by)
if sort_by == 'tottime':
queryset = queryset.filter(time_spent_count__gt=0)
elif sort_by == 'avgtime':
queryset = queryset.filter(time_spent_count__gt=0)
if score_clause:
queryset = queryset.extra(
select={'sort_value': score_clause},
)
# HACK: don't sort by the same column twice
if sort_by == 'date':
queryset = queryset.order_by('-last_seen')
else:
queryset = queryset.order_by('-sort_value', '-last_seen')
if cursor:
queryset = queryset.extra(
where=['%s > %%s' % filter_clause],
params=[float(cursor)],
)
# HACK:
return SearchResult(instances=list(queryset[:limit]))
示例15: _get_group_list
def _get_group_list(request, project):
filters = []
for cls in get_filters(Group, project):
try:
filters.append(cls(request, project))
except Exception as e:
logger = logging.getLogger('sentry.filters')
logger.exception('Error initializing filter %r: %s', cls, e)
event_list = Group.objects
if request.user.is_authenticated() and request.GET.get('bookmarks'):
event_list = event_list.filter(
bookmark_set__project=project,
bookmark_set__user=request.user,
)
else:
event_list = event_list.filter(project=project)
for filter_ in filters:
try:
if not filter_.is_set():
continue
event_list = filter_.get_query_set(event_list)
except Exception as e:
logger = logging.getLogger('sentry.filters')
logger.exception('Error processing filter %r: %s', cls, e)
date_from = request.GET.get('df')
time_from = request.GET.get('tf')
date_to = request.GET.get('dt')
time_to = request.GET.get('tt')
date_type = request.GET.get('date_type')
today = timezone.now()
# date format is Y-m-d
if any(x is not None for x in [date_from, time_from, date_to, time_to]):
date_from, date_to = parse_date(date_from, time_from), parse_date(date_to, time_to)
else:
date_from = today - datetime.timedelta(days=5)
date_to = None
if date_type == 'first_seen':
if date_from:
event_list = event_list.filter(first_seen__gte=date_from)
elif date_to:
event_list = event_list.filter(first_seen__lte=date_to)
else:
if date_from and date_to:
event_list = event_list.filter(
first_seen__gte=date_from,
last_seen__lte=date_to,
)
elif date_from:
event_list = event_list.filter(last_seen__gte=date_from)
elif date_to:
event_list = event_list.filter(last_seen__lte=date_to)
sort = request.GET.get('sort') or request.session.get('streamsort')
if sort not in SORT_OPTIONS:
sort = DEFAULT_SORT_OPTION
# Save last sort in session
if sort != request.session.get('streamsort'):
request.session['streamsort'] = sort
engine = get_db_engine('default')
if engine.startswith('sqlite'):
score_clause = SQLITE_SORT_CLAUSES.get(sort)
filter_clause = SQLITE_SCORE_CLAUSES.get(sort)
elif engine.startswith('mysql'):
score_clause = MYSQL_SORT_CLAUSES.get(sort)
filter_clause = MYSQL_SCORE_CLAUSES.get(sort)
elif engine.startswith('oracle'):
score_clause = ORACLE_SORT_CLAUSES.get(sort)
filter_clause = ORACLE_SCORE_CLAUSES.get(sort)
elif engine in ('django_pytds', 'sqlserver_ado', 'sql_server.pyodbc'):
score_clause = MSSQL_SORT_CLAUSES.get(sort)
filter_clause = MSSQL_SCORE_CLAUSES.get(sort)
else:
score_clause = SORT_CLAUSES.get(sort)
filter_clause = SCORE_CLAUSES.get(sort)
# IMPORTANT: All filters must already be applied once we reach this point
if sort == 'tottime':
event_list = event_list.filter(time_spent_count__gt=0)
elif sort == 'avgtime':
event_list = event_list.filter(time_spent_count__gt=0)
if score_clause:
event_list = event_list.extra(
select={'sort_value': score_clause},
)
# HACK: don't sort by the same column twice
if sort == 'date':
event_list = event_list.order_by('-last_seen')
else:
event_list = event_list.order_by('-sort_value', '-last_seen')
cursor = request.GET.get('cursor', request.GET.get('c'))
#.........这里部分代码省略.........