本文整理汇总了Python中fjord.feedback.models.ResponseMappingType类的典型用法代码示例。如果您正苦于以下问题:Python ResponseMappingType类的具体用法?Python ResponseMappingType怎么用?Python ResponseMappingType使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ResponseMappingType类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_index_chunk_task
def test_index_chunk_task(self):
responses = [response(save=True) for i in range(10)]
# With live indexing, that'll create items in the index. Since
# we want to test index_chunk_test, we need a clean index to
# start with so we delete and recreate it.
self.setup_indexes(empty=True)
# Verify there's nothing in the index.
eq_(len(ResponseMappingType.search()), 0)
# Create the record and the chunk and then run it through
# celery.
batch_id = 'ou812'
rec = record(batch_id=batch_id, save=True)
chunk = (ResponseMappingType, [item.id for item in responses])
index_chunk_task.delay(get_index(), batch_id, rec.id, chunk)
ResponseMappingType.refresh_index()
# Verify everything is in the index now.
eq_(len(ResponseMappingType.search()), 10)
# Verify the record was marked succeeded.
rec = Record.objects.get(pk=rec.id)
eq_(rec.status, Record.STATUS_SUCCESS)
示例2: test_purge
def test_purge(self):
now = datetime.datetime.now()
cutoff = now - datetime.timedelta(days=5)
# Create 10 ResponseEmail objs--one for each day for the last
# 10 days.
for i in range(10):
ResponseEmailFactory(
opinion__created=(now - datetime.timedelta(days=i))
)
ResponseContextFactory(
opinion__created=(now - datetime.timedelta(days=i))
)
# Since creating the objects and indexing them happens very
# quickly in tests, we hit a race condition and the has_email
# column ends up being false. So instead we just drop the
# index and rebuild it.
self.setup_indexes()
# Make sure everything is in the db
eq_(Response.objects.count(), 20)
eq_(ResponseEmail.objects.count(), 10)
eq_(ResponseContext.objects.count(), 10)
# Make sure everything is in the index
resp_s = ResponseMappingType.search()
eq_(resp_s.count(), 20)
eq_(resp_s.filter(has_email=True).count(), 10)
# Now purge everything older than 5 days and make sure things
# got removed that should have gotten removed
cutoff = now - datetime.timedelta(days=5)
purge_data(cutoff=cutoff)
self.refresh()
eq_(Response.objects.count(), 20)
eq_(ResponseEmail.objects.count(), 5)
eq_(ResponseEmail.objects.filter(
opinion__created__gte=cutoff).count(),
5)
eq_(ResponseContext.objects.count(), 5)
eq_(ResponseContext.objects.filter(
opinion__created__gte=cutoff).count(),
5)
# Everything should still be in the index, but the number of
# things with has_email=True should go down
resp_s = ResponseMappingType.search()
eq_(resp_s.count(), 20)
eq_(resp_s.filter(has_email=True).count(), 5)
示例3: response_view
def response_view(request, responseid, template):
response = get_object_or_404(Response, id=responseid)
try:
prod = Product.objects.get(db_name=response.product)
if (not prod.on_dashboard
and (not request.user.is_authenticated()
or not request.user.has_perm(
'analytics.can_view_dashboard'))):
# If this is a response for a hidden product and the user
# isn't in the analytics group, then they can't see it.
return HttpResponseForbidden()
except Product.DoesNotExist:
pass
mlt = None
records = None
errors = []
if (request.user.is_authenticated()
and request.user.has_perm('analytics.can_view_dashboard')):
try:
# Convert it to a list to force it to execute right now.
mlt = ResponseMappingType.reshape(
ResponseMappingType.morelikethis(response))
except ElasticsearchException as exc:
errors.append('Failed to do morelikethis: %s' % exc)
records = [
(u'Response records', Record.objects.records(response)),
]
jobs = GengoJob.objects.filter(
object_id=response.id,
content_type=ContentType.objects.get_for_model(response)
)
for job in jobs:
records.append(
(u'Gengo job record {0}'.format(job.id), job.records)
)
# We don't pass the response directly to the template and instead
# do some data tweaks here to make it more palatable for viewing.
return render(request, template, {
'errors': errors,
'response': response,
'mlt': mlt,
'records': records,
})
示例4: response_view
def response_view(request, responseid, template):
response = get_object_or_404(Response, id=responseid)
mlt = None
records = None
if (request.user.is_authenticated()
and request.user.has_perm('analytics.can_view_dashboard')):
mlt = ResponseMappingType.morelikethis(response)
records = [
(u'Response records', Record.objects.records(response)),
]
jobs = GengoJob.objects.filter(
object_id=response.id,
content_type=ContentType.objects.get_for_model(response)
)
for job in jobs:
records.append(
(u'Gengo job record {0}'.format(job.id), job.records)
)
# We don't pass the response directly to the template and instead
# do some data tweaks here to make it more palatable for viewing.
return render(request, template, {
'response': response,
'mlt': mlt,
'records': records,
})
示例5: timezone_view
def timezone_view(request):
"""Admin view showing times and timezones in data."""
# Note: This is an admin page that gets used once in a blue moon.
# As such, I'm taking some liberties (hand-indexing the response,
# time.sleep, etc) that I would never take if it was used more
# often or was viewable by users. If these two assumptions ever
# change, then this should be rewritten.
from elasticutils.contrib.django import get_es
from fjord.feedback.models import Response, ResponseMappingType
from fjord.feedback.tests import ResponseFactory
from fjord.search.index import get_index
server_time = datetime.now()
# Create a new response.
resp = ResponseFactory.create()
resp_time = resp.created
# Index the response by hand so we know it gets to Elasticsearch. Otherwise
# it gets done by celery and we don't know how long that'll take.
doc = ResponseMappingType.extract_document(resp.id)
ResponseMappingType.index(doc, resp.id)
# Fetch the response from the db.
resp = Response.objects.get(id=resp.id)
resp2_time = resp.created
# Refresh and sleep 5 seconds as a hand-wavey way to make sure
# that Elasticsearch has had time to refresh the index.
get_es().indices.refresh(get_index())
time.sleep(5)
es_time = ResponseMappingType.search().filter(id=resp.id)[0].created
# Delete the test response we created.
resp.delete()
return render(request, 'admin/timezone_view.html', {
'server_time': server_time,
'resp_time': resp_time,
'resp2_time': resp2_time,
'es_time': es_time
})
示例6: analytics_duplicates
def analytics_duplicates(request):
"""Shows all duplicate descriptions over the last n days"""
template = 'analytics/analyzer/duplicates.html'
n = 14
responses = (ResponseMappingType.search()
.filter(created__gte=datetime.now() - timedelta(days=n))
.values_dict('description', 'happy', 'created', 'locale',
'user_agent', 'id')
.order_by('created').everything())
responses = ResponseMappingType.reshape(responses)
total_count = len(responses)
response_dupes = {}
for resp in responses:
response_dupes.setdefault(resp['description'], []).append(resp)
response_dupes = [
(key, val) for key, val in response_dupes.items()
if len(val) > 1
]
# convert the dict into a list of tuples sorted by the number of
# responses per tuple largest number first
response_dupes = sorted(response_dupes, key=lambda item: len(item[1]) * -1)
# duplicate_count -> count
# i.e. "how many responses had 2 duplicates?"
summary_counts = defaultdict(int)
for desc, responses in response_dupes:
summary_counts[len(responses)] = summary_counts[len(responses)] + 1
summary_counts = sorted(summary_counts.items(), key=lambda item: item[0])
return render(request, template, {
'n': 14,
'response_dupes': response_dupes,
'render_time': datetime.now(),
'summary_counts': summary_counts,
'total_count': total_count,
})
示例7: analytics_hourly_histogram
def analytics_hourly_histogram(request):
"""Shows an hourly histogram for the last 5 days of all responses"""
template = 'analytics/analyzer/hourly_histogram.html'
date_end = smart_date(request.GET.get('date_end', None), fallback=None)
if date_end is None:
date_end = date.today()
date_start = date_end - timedelta(days=5)
search = ResponseMappingType.search()
filters = F(created__gte=date_start, created__lte=date_end)
search.filter(filters)
hourly_histogram = search.facet_raw(
hourly={
'date_histogram': {
'interval': 'hour',
'field': 'created'
},
'facet_filter': search._process_filters(filters.filters)
}).facet_counts()
hourly_data = dict(
(p['time'], p['count']) for p in hourly_histogram['hourly'])
hour = 60 * 60 * 1000.0
zero_fill(date_start, date_end, [hourly_data], spacing=hour)
# FIXME: This is goofy. After zero_fill, we end up with a bunch of
# trailing zeros for reasons I don't really understand, so instead
# of fixing that, I'm just going to remove them here.
hourly_data = sorted(hourly_data.items())
while hourly_data and hourly_data[-1][1] == 0:
hourly_data.pop(-1)
histogram = [
{
'label': 'Hourly',
'name': 'hourly',
'data': hourly_data
},
]
return render(request, template, {
'histogram': histogram,
'start_date': date_start,
'end_date': date_end
})
示例8: response_view
def response_view(request, responseid, template):
response = get_object_or_404(Response, id=responseid)
mlt = None
records = None
errors = []
if (request.user.is_authenticated()
and request.user.has_perm('analytics.can_view_dashboard')):
try:
# Convert it to a list to force it to execute right now.
mlt = ResponseMappingType.reshape(
ResponseMappingType.morelikethis(response))
except ElasticsearchException as exc:
errors.append('Failed to do morelikethis: %s' % exc)
records = [
(u'Response records', Record.objects.records(response)),
]
jobs = GengoJob.objects.filter(
object_id=response.id,
content_type=ContentType.objects.get_for_model(response)
)
for job in jobs:
records.append(
(u'Gengo job record {0}'.format(job.id), job.records)
)
# We don't pass the response directly to the template and instead
# do some data tweaks here to make it more palatable for viewing.
return render(request, template, {
'errors': errors,
'response': response,
'mlt': mlt,
'records': records,
})
示例9: response_view
def response_view(request, responseid, template):
response = get_object_or_404(Response, id=responseid)
mlt = None
if (request.user.is_authenticated()
and request.user.has_perm('analytics.can_view_dashboard')):
mlt = ResponseMappingType.morelikethis(response)
# We don't pass the response directly to the template and instead
# do some data tweaks here to make it more palatable for viewing.
return render(request, template, {
'response': response,
'mlt': mlt,
})
示例10: dashboard
def dashboard(request):
template = 'analytics/dashboard.html'
output_format = request.GET.get('format', None)
page = smart_int(request.GET.get('page', 1), 1)
# Note: If we add additional querystring fields, we need to add
# them to generate_dashboard_url.
search_happy = request.GET.get('happy', None)
search_platform = request.GET.get('platform', None)
search_locale = request.GET.get('locale', None)
search_product = request.GET.get('product', None)
search_version = request.GET.get('version', None)
search_query = request.GET.get('q', None)
search_date_start = smart_date(
request.GET.get('date_start', None), fallback=None)
search_date_end = smart_date(
request.GET.get('date_end', None), fallback=None)
search_bigram = request.GET.get('bigram', None)
selected = request.GET.get('selected', None)
filter_data = []
current_search = {'page': page}
search = ResponseMappingType.search()
f = F()
# If search happy is '0' or '1', set it to False or True, respectively.
search_happy = {'0': False, '1': True}.get(search_happy, None)
if search_happy in [False, True]:
f &= F(happy=search_happy)
current_search['happy'] = int(search_happy)
def unknown_to_empty(text):
"""Convert "Unknown" to "" to support old links"""
return u'' if text.lower() == u'unknown' else text
if search_platform is not None:
f &= F(platform=unknown_to_empty(search_platform))
current_search['platform'] = search_platform
if search_locale is not None:
f &= F(locale=unknown_to_empty(search_locale))
current_search['locale'] = search_locale
visible_products = [
prod.encode('utf-8')
for prod in Product.objects.public().values_list('db_name', flat=True)
]
# This covers the "unknown" product which is also visible.
visible_products.append('')
if search_product in visible_products:
f &= F(product=unknown_to_empty(search_product))
current_search['product'] = search_product
if search_version is not None:
# Note: We only filter on version if we're filtering on
# product.
f &= F(version=unknown_to_empty(search_version))
current_search['version'] = search_version
else:
f &= F(product__in=visible_products)
if search_date_start is None and search_date_end is None:
selected = '7d'
if search_date_end is None:
search_date_end = date.today()
if search_date_start is None:
search_date_start = search_date_end - timedelta(days=7)
# If the start and end dates are inverted, switch them into proper
# chronoligcal order
search_date_start, search_date_end = sorted(
[search_date_start, search_date_end])
# Restrict the frontpage dashboard to only show the last 6 months
# of data
six_months_ago = date.today() - timedelta(days=180)
search_date_start = max(six_months_ago, search_date_start)
search_date_end = max(search_date_start, search_date_end)
current_search['date_end'] = search_date_end.strftime('%Y-%m-%d')
f &= F(created__lte=search_date_end)
current_search['date_start'] = search_date_start.strftime('%Y-%m-%d')
f &= F(created__gte=search_date_start)
if search_query:
current_search['q'] = search_query
search = search.query(description__sqs=search_query)
if search_bigram is not None:
f &= F(description_bigrams=search_bigram)
filter_data.append({
'display': _('Bigram'),
'name': 'bigram',
'options': [{
'count': 'all',
'name': search_bigram,
#.........这里部分代码省略.........
示例11: test_purge
def test_purge(self):
now = datetime.datetime.now()
cutoff = now - datetime.timedelta(days=5)
# Create 10 objs of each type--one for each day for the last
# 10 days.
for i in range(10):
ResponseEmailFactory(
opinion__created=(now - datetime.timedelta(days=i))
)
ResponseContextFactory(
opinion__created=(now - datetime.timedelta(days=i))
)
ResponsePIFactory(
opinion__created=(now - datetime.timedelta(days=i))
)
# Note that this creates 30 Response objects.
# Since creating the objects and indexing them happens very
# quickly in tests, we hit a race condition and the has_email
# column ends up being false. So instead we just drop the
# index and rebuild it.
self.setup_indexes()
# Make sure everything is in the db
eq_(Response.objects.count(), 30)
eq_(ResponseEmail.objects.count(), 10)
eq_(ResponseContext.objects.count(), 10)
eq_(ResponsePI.objects.count(), 10)
# Make sure everything is in the index
resp_s = ResponseMappingType.search()
eq_(resp_s.count(), 30)
eq_(resp_s.filter(has_email=True).count(), 10)
# Now purge everything older than 5 days and make sure things
# got removed that should have gotten removed. Also check if
# there is a journal entry for the purge operation.
cutoff = now - datetime.timedelta(days=5)
purge_data(cutoff=cutoff)
self.refresh()
eq_(Response.objects.count(), 30)
eq_(ResponseEmail.objects.count(), 5)
eq_(ResponseEmail.objects.filter(
opinion__created__gte=cutoff).count(),
5)
eq_(ResponseContext.objects.count(), 5)
eq_(ResponseContext.objects.filter(
opinion__created__gte=cutoff).count(),
5)
eq_(ResponsePI.objects.count(), 5)
eq_(ResponsePI.objects.filter(
opinion__created__gte=cutoff).count(),
5)
eq_(1,
Record.objects.filter(action='purge_data').count())
expected_msg = ('feedback_responseemail: 5, '
'feedback_responsecontext: 5, '
'feedback_responsepi: 5')
eq_(expected_msg,
Record.objects.get(action='purge_data').msg)
# Everything should still be in the index, but the number of
# things with has_email=True should go down
resp_s = ResponseMappingType.search()
eq_(resp_s.count(), 30)
eq_(resp_s.filter(has_email=True).count(), 5)
示例12: dashboard
def dashboard(request, template):
page = smart_int(request.GET.get('page', 1), 1)
search_happy = request.GET.get('happy', None)
search_platform = request.GET.get('platform', None)
search_locale = request.GET.get('locale', None)
search_query = request.GET.get('q', None)
search_date_start = smart_datetime(request.GET.get('date_start', None),
fallback=None)
search_date_end = smart_datetime(request.GET.get('date_end', None),
fallback=None)
selected = request.GET.get('selected', None)
current_search = {'page': page}
search = ResponseMappingType.search()
f = F()
# If search happy is '0' or '1', set it to False or True, respectively.
search_happy = {'0': False, '1': True}.get(search_happy, None)
if search_happy in [False, True]:
f &= F(happy=search_happy)
current_search['happy'] = int(search_happy)
if search_platform:
f &= F(platform=search_platform)
current_search['platform'] = search_platform
if search_locale:
f &= F(locale=search_locale)
current_search['locale'] = search_locale
if search_date_start is None and search_date_end is None:
selected = '7d'
if search_date_end is None:
search_date_end = datetime.now()
if search_date_start is None:
search_date_start = search_date_end - timedelta(days=7)
current_search['date_end'] = search_date_end.strftime('%Y-%m-%d')
# Add one day, so that the search range includes the entire day.
end = search_date_end + timedelta(days=1)
# Note 'less than', not 'less than or equal', because of the added
# day above.
f &= F(created__lt=end)
current_search['date_start'] = search_date_start.strftime('%Y-%m-%d')
f &= F(created__gte=search_date_start)
if search_query:
fields = ['text', 'text_phrase', 'fuzzy']
query = dict(('description__%s' % f, search_query) for f in fields)
search = search.query(or_=query)
current_search['q'] = search_query
search = search.filter(f).order_by('-created')
facets = search.facet('happy', 'platform', 'locale',
filtered=bool(f.filters))
# This loop does two things. First it maps 'T' -> True and 'F' ->
# False. This is probably something EU should be doing for
# us. Second, it restructures the data into a more convenient
# form.
counts = {'happy': {}, 'platform': {}, 'locale': {}}
for param, terms in facets.facet_counts().items():
for term in terms:
name = term['term']
if name == 'T':
name = True
elif name == 'F':
name = False
counts[param][name] = term['count']
filter_data = [
counts_to_options(counts['happy'].items(), name='happy',
display=_('Sentiment'),
display_map={True: _('Happy'), False: _('Sad')},
value_map={True: 1, False: 0}, checked=search_happy),
counts_to_options(counts['platform'].items(),
name='platform', display=_('Platform'), checked=search_platform),
counts_to_options(counts['locale'].items(),
name='locale', display=_('Locale'), checked=search_locale,
display_map=locale_name)
]
# Histogram data
happy_data = []
sad_data = []
histograms = search.facet_raw(
happy={
'date_histogram': {'interval': 'day', 'field': 'created'},
'facet_filter': (f & F(happy=True)).filters
},
sad={
'date_histogram': {'interval': 'day', 'field': 'created'},
'facet_filter': (f & F(happy=False)).filters
},
).facet_counts()
# p['time'] is number of milliseconds since the epoch. Which is
#.........这里部分代码省略.........
示例13: product_dashboard_firefox
def product_dashboard_firefox(request, prod):
# Note: Not localized because it's ultra-alpha.
template = 'analytics/product_dashboard_firefox.html'
current_search = {}
search_query = request.GET.get('q', None)
if search_query:
current_search['q'] = search_query
search_date_end = smart_date(
request.GET.get('date_end', None), fallback=None)
if search_date_end is None:
search_date_end = date.today()
current_search['date_end'] = search_date_end.strftime('%Y-%m-%d')
search_date_start = smart_date(
request.GET.get('date_start', None), fallback=None)
if search_date_start is None:
search_date_start = search_date_end - timedelta(days=7)
current_search['date_start'] = search_date_start.strftime('%Y-%m-%d')
histogram = generate_totals_histogram(
search_date_start, search_date_end, search_query, prod)
# FIXME: This is lame, but we need to make sure the item we're
# looking at is the totals.
assert histogram[1]['name'] == 'total'
totals_sum = sum([p[1] for p in histogram[1]['data']])
search = ResponseMappingType.search()
if search_query:
search = search.query(description__sqs=search_query)
base_f = F()
base_f &= F(product=prod.db_name)
base_f &= F(created__gte=search_date_start)
base_f &= F(created__lt=search_date_end)
search = search.filter(base_f)
# Figure out the list of platforms and versions for this range.
plats_and_vers = (search
.facet('platform', 'version', size=50)
.facet_counts())
# Figure out the "by platform" histogram
platforms = [part['term'] for part in plats_and_vers['platform']]
platform_facet = {}
for plat in platforms:
plat_f = base_f & F(platform=plat)
platform_facet[plat if plat else 'unknown'] = {
'date_histogram': {'interval': 'day', 'field': 'created'},
'facet_filter': search._process_filters(plat_f.filters)
}
platform_counts = search.facet_raw(**platform_facet).facet_counts()
platforms_histogram = []
for key in platform_counts.keys():
data = dict((p['time'], p['count']) for p in platform_counts[key])
sum_counts = sum([p['count'] for p in platform_counts[key]])
if sum_counts < (totals_sum * 0.02):
# Skip platforms where the number of responses is less than
# 2% of the total.
continue
zero_fill(search_date_start, search_date_end, [data])
platforms_histogram.append({
'name': key,
'label': key,
'data': sorted(data.items()),
'lines': {'show': True, 'fill': False},
'points': {'show': True},
})
# Figure out the "by version" histogram
versions = [part['term'] for part in plats_and_vers['version']]
version_facet = {}
for vers in versions:
vers_f = base_f & F(version=vers)
version_facet['v' + vers if vers else 'unknown'] = {
'date_histogram': {'interval': 'day', 'field': 'created'},
'facet_filter': search._process_filters(vers_f.filters)
}
version_counts = search.facet_raw(**version_facet).facet_counts()
versions_histogram = []
for key in version_counts.keys():
data = dict((p['time'], p['count']) for p in version_counts[key])
sum_counts = sum([p['count'] for p in version_counts[key]])
if sum_counts < (totals_sum * 0.02):
# Skip versions where the number of responses is less than
# 2% of the total.
continue
zero_fill(search_date_start, search_date_end, [data])
versions_histogram.append({
'name': key,
'label': key,
#.........这里部分代码省略.........
示例14: dashboard
def dashboard(request, template):
output_format = request.GET.get('format', None)
page = smart_int(request.GET.get('page', 1), 1)
# Note: If we add additional querystring fields, we need to add
# them to generate_dashboard_url.
search_happy = request.GET.get('happy', None)
search_platform = request.GET.get('platform', None)
search_locale = request.GET.get('locale', None)
search_product = request.GET.get('product', None)
search_version = request.GET.get('browser_version', None)
search_query = request.GET.get('q', None)
search_date_start = smart_datetime(request.GET.get('date_start', None),
fallback=None)
search_date_end = smart_datetime(request.GET.get('date_end', None),
fallback=None)
selected = request.GET.get('selected', None)
current_search = {'page': page}
search = ResponseMappingType.search()
f = F()
# If search happy is '0' or '1', set it to False or True, respectively.
search_happy = {'0': False, '1': True}.get(search_happy, None)
if search_happy in [False, True]:
f &= F(happy=search_happy)
current_search['happy'] = int(search_happy)
if search_platform:
f &= F(platform=search_platform)
current_search['platform'] = search_platform
if search_locale:
f &= F(locale=search_locale)
current_search['locale'] = search_locale
if search_product:
f &= F(product=search_product)
current_search['product'] = search_product
if search_version:
# Note: We only filter on version if we're filtering on
# product.
f &= F(browser_version=search_version)
current_search['browser_version'] = search_version
if search_date_start is None and search_date_end is None:
selected = '7d'
if search_date_end is None:
search_date_end = datetime.now()
if search_date_start is None:
search_date_start = search_date_end - timedelta(days=7)
current_search['date_end'] = search_date_end.strftime('%Y-%m-%d')
# Add one day, so that the search range includes the entire day.
end = search_date_end + timedelta(days=1)
# Note 'less than', not 'less than or equal', because of the added
# day above.
f &= F(created__lt=end)
current_search['date_start'] = search_date_start.strftime('%Y-%m-%d')
f &= F(created__gte=search_date_start)
if search_query:
current_search['q'] = search_query
es_query = generate_query_parsed('description', search_query)
search = search.query_raw(es_query)
search = search.filter(f).order_by('-created')
# If the user asked for a feed, give him/her a feed!
if output_format == 'atom':
return generate_atom_feed(request, search)
elif output_format == 'json':
return generate_json_feed(request, search)
# Search results and pagination
if page < 1:
page = 1
page_count = 20
start = page_count * (page - 1)
end = start + page_count
search_count = search.count()
opinion_page = search[start:end]
# Navigation facet data
facets = search.facet(
'happy', 'platform', 'locale', 'product', 'browser_version',
filtered=bool(search._process_filters(f.filters)))
# This loop does two things. First it maps 'T' -> True and 'F' ->
# False. This is probably something EU should be doing for
# us. Second, it restructures the data into a more convenient
# form.
counts = {
'happy': {},
'platform': {},
'locale': {},
'product': {},
'browser_version': {}
#.........这里部分代码省略.........
示例15: analytics_search
def analytics_search(request):
template = 'analytics/analyzer/search.html'
output_format = request.GET.get('format', None)
page = smart_int(request.GET.get('page', 1), 1)
# Note: If we add additional querystring fields, we need to add
# them to generate_dashboard_url.
search_happy = request.GET.get('happy', None)
search_has_email = request.GET.get('has_email', None)
search_platform = request.GET.get('platform', None)
search_locale = request.GET.get('locale', None)
search_country = request.GET.get('country', None)
search_product = request.GET.get('product', None)
search_domain = request.GET.get('domain', None)
search_api = smart_int(request.GET.get('api', None), fallback=None)
search_version = request.GET.get('version', None)
search_query = request.GET.get('q', None)
search_date_start = smart_date(
request.GET.get('date_start', None), fallback=None)
search_date_end = smart_date(
request.GET.get('date_end', None), fallback=None)
search_bigram = request.GET.get('bigram', None)
search_source = request.GET.get('source', None)
search_campaign = request.GET.get('campaign', None)
search_organic = request.GET.get('organic', None)
selected = request.GET.get('selected', None)
filter_data = []
current_search = {'page': page}
search = ResponseMappingType.search()
f = F()
# If search happy is '0' or '1', set it to False or True, respectively.
search_happy = {'0': False, '1': True}.get(search_happy, None)
if search_happy in [False, True]:
f &= F(happy=search_happy)
current_search['happy'] = int(search_happy)
# If search has_email is '0' or '1', set it to False or True,
# respectively.
search_has_email = {'0': False, '1': True}.get(search_has_email, None)
if search_has_email in [False, True]:
f &= F(has_email=search_has_email)
current_search['has_email'] = int(search_has_email)
def unknown_to_empty(text):
"""Convert "Unknown" to "" to support old links"""
return u'' if text.lower() == u'unknown' else text
if search_platform is not None:
f &= F(platform=unknown_to_empty(search_platform))
current_search['platform'] = search_platform
if search_locale is not None:
f &= F(locale=unknown_to_empty(search_locale))
current_search['locale'] = search_locale
if search_product is not None:
f &= F(product=unknown_to_empty(search_product))
current_search['product'] = search_product
# Only show the version if there's a product.
if search_version is not None:
# Note: We only filter on version if we're filtering on
# product.
f &= F(version=unknown_to_empty(search_version))
current_search['version'] = search_version
# Only show the country if the product is Firefox OS.
if search_country is not None and search_product == 'Firefox OS':
f &= F(country=unknown_to_empty(search_country))
current_search['country'] = search_country
if search_domain is not None:
f &= F(url_domain=unknown_to_empty(search_domain))
current_search['domain'] = search_domain
if search_api is not None:
f &= F(api=search_api)
current_search['api'] = search_api
if search_date_start is None and search_date_end is None:
selected = '7d'
if search_date_end is None:
search_date_end = datetime.now()
if search_date_start is None:
search_date_start = search_date_end - timedelta(days=7)
current_search['date_end'] = search_date_end.strftime('%Y-%m-%d')
# Add one day, so that the search range includes the entire day.
end = search_date_end + timedelta(days=1)
# Note 'less than', not 'less than or equal', because of the added
# day above.
f &= F(created__lt=end)
current_search['date_start'] = search_date_start.strftime('%Y-%m-%d')
f &= F(created__gte=search_date_start)
if search_query:
current_search['q'] = search_query
search = search.query(description__sqs=search_query)
#.........这里部分代码省略.........