当前位置: 首页>>代码示例>>Python>>正文


Python ResponseMappingType.search方法代码示例

本文整理汇总了Python中fjord.feedback.models.ResponseMappingType.search方法的典型用法代码示例。如果您正苦于以下问题:Python ResponseMappingType.search方法的具体用法?Python ResponseMappingType.search怎么用?Python ResponseMappingType.search使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在fjord.feedback.models.ResponseMappingType的用法示例。


在下文中一共展示了ResponseMappingType.search方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_index_chunk_task

# 需要导入模块: from fjord.feedback.models import ResponseMappingType [as 别名]
# 或者: from fjord.feedback.models.ResponseMappingType import search [as 别名]
    def test_index_chunk_task(self):
        responses = [response(save=True) for i in range(10)]

        # With live indexing, that'll create items in the index. Since
        # we want to test index_chunk_test, we need a clean index to
        # start with so we delete and recreate it.
        self.setup_indexes(empty=True)

        # Verify there's nothing in the index.
        eq_(len(ResponseMappingType.search()), 0)

        # Create the record and the chunk and then run it through
        # celery.
        batch_id = 'ou812'
        rec = record(batch_id=batch_id, save=True)

        chunk = (ResponseMappingType, [item.id for item in responses])
        index_chunk_task.delay(get_index(), batch_id, rec.id, chunk)

        ResponseMappingType.refresh_index()

        # Verify everything is in the index now.
        eq_(len(ResponseMappingType.search()), 10)

        # Verify the record was marked succeeded.
        rec = Record.objects.get(pk=rec.id)
        eq_(rec.status, Record.STATUS_SUCCESS)
开发者ID:adityaputra,项目名称:fjord,代码行数:29,代码来源:test_tasks.py

示例2: test_purge

# 需要导入模块: from fjord.feedback.models import ResponseMappingType [as 别名]
# 或者: from fjord.feedback.models.ResponseMappingType import search [as 别名]
    def test_purge(self):
        now = datetime.datetime.now()
        cutoff = now - datetime.timedelta(days=5)

        # Create 10 ResponseEmail objs--one for each day for the last
        # 10 days.
        for i in range(10):
            ResponseEmailFactory(
                opinion__created=(now - datetime.timedelta(days=i))
            )
            ResponseContextFactory(
                opinion__created=(now - datetime.timedelta(days=i))
            )

        # Since creating the objects and indexing them happens very
        # quickly in tests, we hit a race condition and the has_email
        # column ends up being false. So instead we just drop the
        # index and rebuild it.
        self.setup_indexes()

        # Make sure everything is in the db
        eq_(Response.objects.count(), 20)
        eq_(ResponseEmail.objects.count(), 10)
        eq_(ResponseContext.objects.count(), 10)

        # Make sure everything is in the index
        resp_s = ResponseMappingType.search()
        eq_(resp_s.count(), 20)
        eq_(resp_s.filter(has_email=True).count(), 10)

        # Now purge everything older than 5 days and make sure things
        # got removed that should have gotten removed
        cutoff = now - datetime.timedelta(days=5)
        purge_data(cutoff=cutoff)

        self.refresh()

        eq_(Response.objects.count(), 20)
        eq_(ResponseEmail.objects.count(), 5)
        eq_(ResponseEmail.objects.filter(
            opinion__created__gte=cutoff).count(),
            5)
        eq_(ResponseContext.objects.count(), 5)
        eq_(ResponseContext.objects.filter(
            opinion__created__gte=cutoff).count(),
            5)

        # Everything should still be in the index, but the number of
        # things with has_email=True should go down
        resp_s = ResponseMappingType.search()
        eq_(resp_s.count(), 20)
        eq_(resp_s.filter(has_email=True).count(), 5)
开发者ID:DerekRies,项目名称:fjord,代码行数:54,代码来源:test_models.py

示例3: analytics_hourly_histogram

# 需要导入模块: from fjord.feedback.models import ResponseMappingType [as 别名]
# 或者: from fjord.feedback.models.ResponseMappingType import search [as 别名]
def analytics_hourly_histogram(request):
    """Shows an hourly histogram for the last 5 days of all responses"""
    template = 'analytics/analyzer/hourly_histogram.html'

    date_end = smart_date(request.GET.get('date_end', None), fallback=None)

    if date_end is None:
        date_end = date.today()

    date_start = date_end - timedelta(days=5)

    search = ResponseMappingType.search()
    filters = F(created__gte=date_start, created__lte=date_end)
    search.filter(filters)

    hourly_histogram = search.facet_raw(
        hourly={
            'date_histogram': {
                'interval': 'hour',
                'field': 'created'
            },
            'facet_filter': search._process_filters(filters.filters)
        }).facet_counts()

    hourly_data = dict(
        (p['time'], p['count']) for p in hourly_histogram['hourly'])

    hour = 60 * 60 * 1000.0
    zero_fill(date_start, date_end, [hourly_data], spacing=hour)

    # FIXME: This is goofy. After zero_fill, we end up with a bunch of
    # trailing zeros for reasons I don't really understand, so instead
    # of fixing that, I'm just going to remove them here.
    hourly_data = sorted(hourly_data.items())
    while hourly_data and hourly_data[-1][1] == 0:
        hourly_data.pop(-1)

    histogram = [
        {
            'label': 'Hourly',
            'name': 'hourly',
            'data': hourly_data
        },
    ]

    return render(request, template, {
        'histogram': histogram,
        'start_date': date_start,
        'end_date': date_end
    })
开发者ID:DerekRies,项目名称:fjord,代码行数:52,代码来源:analyzer_views.py

示例4: timezone_view

# 需要导入模块: from fjord.feedback.models import ResponseMappingType [as 别名]
# 或者: from fjord.feedback.models.ResponseMappingType import search [as 别名]
def timezone_view(request):
    """Admin view showing times and timezones in data."""
    # Note: This is an admin page that gets used once in a blue moon.
    # As such, I'm taking some liberties (hand-indexing the response,
    # time.sleep, etc) that I would never take if it was used more
    # often or was viewable by users. If these two assumptions ever
    # change, then this should be rewritten.

    from elasticutils.contrib.django import get_es

    from fjord.feedback.models import Response, ResponseMappingType
    from fjord.feedback.tests import ResponseFactory
    from fjord.search.index import get_index

    server_time = datetime.now()

    # Create a new response.
    resp = ResponseFactory.create()
    resp_time = resp.created

    # Index the response by hand so we know it gets to Elasticsearch. Otherwise
    # it gets done by celery and we don't know how long that'll take.
    doc = ResponseMappingType.extract_document(resp.id)
    ResponseMappingType.index(doc, resp.id)

    # Fetch the response from the db.
    resp = Response.objects.get(id=resp.id)
    resp2_time = resp.created

    # Refresh and sleep 5 seconds as a hand-wavey way to make sure
    # that Elasticsearch has had time to refresh the index.
    get_es().indices.refresh(get_index())
    time.sleep(5)

    es_time = ResponseMappingType.search().filter(id=resp.id)[0].created

    # Delete the test response we created.
    resp.delete()

    return render(request, 'admin/timezone_view.html', {
        'server_time': server_time,
        'resp_time': resp_time,
        'resp2_time': resp2_time,
        'es_time': es_time
    })
开发者ID:rlr,项目名称:fjord,代码行数:47,代码来源:admin.py

示例5: analytics_duplicates

# 需要导入模块: from fjord.feedback.models import ResponseMappingType [as 别名]
# 或者: from fjord.feedback.models.ResponseMappingType import search [as 别名]
def analytics_duplicates(request):
    """Shows all duplicate descriptions over the last n days"""
    template = 'analytics/analyzer/duplicates.html'

    n = 14

    responses = (ResponseMappingType.search()
                 .filter(created__gte=datetime.now() - timedelta(days=n))
                 .values_dict('description', 'happy', 'created', 'locale',
                              'user_agent', 'id')
                 .order_by('created').everything())

    responses = ResponseMappingType.reshape(responses)

    total_count = len(responses)

    response_dupes = {}
    for resp in responses:
        response_dupes.setdefault(resp['description'], []).append(resp)

    response_dupes = [
        (key, val) for key, val in response_dupes.items()
        if len(val) > 1
    ]

    # convert the dict into a list of tuples sorted by the number of
    # responses per tuple largest number first
    response_dupes = sorted(response_dupes, key=lambda item: len(item[1]) * -1)

    # duplicate_count -> count
    # i.e. "how many responses had 2 duplicates?"
    summary_counts = defaultdict(int)
    for desc, responses in response_dupes:
        summary_counts[len(responses)] = summary_counts[len(responses)] + 1
    summary_counts = sorted(summary_counts.items(), key=lambda item: item[0])

    return render(request, template, {
        'n': 14,
        'response_dupes': response_dupes,
        'render_time': datetime.now(),
        'summary_counts': summary_counts,
        'total_count': total_count,
    })
开发者ID:DerekRies,项目名称:fjord,代码行数:45,代码来源:analyzer_views.py

示例6: analytics_occurrences

# 需要导入模块: from fjord.feedback.models import ResponseMappingType [as 别名]
# 或者: from fjord.feedback.models.ResponseMappingType import search [as 别名]
def analytics_occurrences(request):
    template = 'analytics/analyzer/occurrences.html'

    first_facet_bi = None
    first_params = {}
    first_facet_total = 0

    second_facet_bi = None
    second_params = {}
    second_facet_total = 0

    if 'product' in request.GET:
        form = OccurrencesComparisonForm(request.GET)
        if form.is_valid():
            cleaned = form.cleaned_data

            # First item
            first_resp_s = (ResponseMappingType.search()
                            .filter(product=cleaned['product'])
                            .filter(locale__startswith='en'))

            first_params['product'] = cleaned['product']

            if cleaned['first_version']:
                first_resp_s = first_resp_s.filter(
                    version=cleaned['first_version'])
                first_params['version'] = cleaned['first_version']
            if cleaned['first_start_date']:
                first_resp_s = first_resp_s.filter(
                    created__gte=cleaned['first_start_date'])
                first_params['date_start'] = cleaned['first_start_date']
            if cleaned['first_end_date']:
                first_resp_s = first_resp_s.filter(
                    created__lte=cleaned['first_end_date'])
                first_params['date_end'] = cleaned['first_end_date']
            if cleaned['first_search_term']:
                first_resp_s = first_resp_s.query(
                    description__match=cleaned['first_search_term'])
                first_params['q'] = cleaned['first_search_term']

            if ('date_start' not in first_params
                    and 'date_end' not in first_params):

                # FIXME - If there's no start date, then we want
                # "everything" so we use a hard-coded 2013-01-01 date
                # here to hack that.
                #
                # Better way might be to change the dashboard to allow
                # for an "infinite" range, but there's no other use
                # case for that and the ranges are done in the ui--not
                # in the backend.
                first_params['date_start'] = '2013-01-01'

            # Have to do raw because we want a size > 10.
            first_resp_s = first_resp_s.facet('description_bigrams',
                                              size=30, filtered=True)
            first_resp_s = first_resp_s[0:0]

            first_facet_total = first_resp_s.count()
            first_facet = first_resp_s.facet_counts()

            first_facet_bi = first_facet['description_bigrams']
            first_facet_bi = sorted(
                first_facet_bi, key=lambda item: -item['count'])

            if (cleaned['second_version']
                    or cleaned['second_search_term']
                    or cleaned['second_start_date']):

                second_resp_s = (ResponseMappingType.search()
                                 .filter(product=cleaned['product'])
                                 .filter(locale__startswith='en'))

                second_params['product'] = cleaned['product']

                if cleaned['second_version']:
                    second_resp_s = second_resp_s.filter(
                        version=cleaned['second_version'])
                    second_params['version'] = cleaned['second_version']
                if cleaned['second_start_date']:
                    second_resp_s = second_resp_s.filter(
                        created__gte=cleaned['second_start_date'])
                    second_params['date_start'] = cleaned['second_start_date']
                if cleaned['second_end_date']:
                    second_resp_s = second_resp_s.filter(
                        created__lte=cleaned['second_end_date'])
                    second_params['date_end'] = cleaned['second_end_date']
                if form.cleaned_data['second_search_term']:
                    second_resp_s = second_resp_s.query(
                        description__match=cleaned['second_search_term'])
                    second_params['q'] = cleaned['second_search_term']

                if ('date_start' not in second_params
                        and 'date_end' not in second_params):

                    # FIXME - If there's no start date, then we want
                    # "everything" so we use a hard-coded 2013-01-01 date
                    # here to hack that.
                    #
                    # Better way might be to change the dashboard to allow
#.........这里部分代码省略.........
开发者ID:DerekRies,项目名称:fjord,代码行数:103,代码来源:analyzer_views.py

示例7: analytics_search

# 需要导入模块: from fjord.feedback.models import ResponseMappingType [as 别名]
# 或者: from fjord.feedback.models.ResponseMappingType import search [as 别名]
def analytics_search(request):
    template = 'analytics/analyzer/search.html'

    output_format = request.GET.get('format', None)
    page = smart_int(request.GET.get('page', 1), 1)

    # Note: If we add additional querystring fields, we need to add
    # them to generate_dashboard_url.
    search_happy = request.GET.get('happy', None)
    search_has_email = request.GET.get('has_email', None)
    search_platform = request.GET.get('platform', None)
    search_locale = request.GET.get('locale', None)
    search_country = request.GET.get('country', None)
    search_product = request.GET.get('product', None)
    search_domain = request.GET.get('domain', None)
    search_api = smart_int(request.GET.get('api', None), fallback=None)
    search_version = request.GET.get('version', None)
    search_query = request.GET.get('q', None)
    search_date_start = smart_date(
        request.GET.get('date_start', None), fallback=None)
    search_date_end = smart_date(
        request.GET.get('date_end', None), fallback=None)
    search_bigram = request.GET.get('bigram', None)
    search_source = request.GET.get('source', None)
    search_campaign = request.GET.get('campaign', None)
    search_organic = request.GET.get('organic', None)
    selected = request.GET.get('selected', None)

    filter_data = []
    current_search = {'page': page}

    search = ResponseMappingType.search()
    f = F()
    # If search happy is '0' or '1', set it to False or True, respectively.
    search_happy = {'0': False, '1': True}.get(search_happy, None)
    if search_happy in [False, True]:
        f &= F(happy=search_happy)
        current_search['happy'] = int(search_happy)

    # If search has_email is '0' or '1', set it to False or True,
    # respectively.
    search_has_email = {'0': False, '1': True}.get(search_has_email, None)
    if search_has_email in [False, True]:
        f &= F(has_email=search_has_email)
        current_search['has_email'] = int(search_has_email)

    def unknown_to_empty(text):
        """Convert "Unknown" to "" to support old links"""
        return u'' if text.lower() == u'unknown' else text

    if search_platform is not None:
        f &= F(platform=unknown_to_empty(search_platform))
        current_search['platform'] = search_platform
    if search_locale is not None:
        f &= F(locale=unknown_to_empty(search_locale))
        current_search['locale'] = search_locale
    if search_product is not None:
        f &= F(product=unknown_to_empty(search_product))
        current_search['product'] = search_product

        # Only show the version if there's a product.
        if search_version is not None:
            # Note: We only filter on version if we're filtering on
            # product.
            f &= F(version=unknown_to_empty(search_version))
            current_search['version'] = search_version

        # Only show the country if the product is Firefox OS.
        if search_country is not None and search_product == 'Firefox OS':
            f &= F(country=unknown_to_empty(search_country))
            current_search['country'] = search_country
    if search_domain is not None:
        f &= F(url_domain=unknown_to_empty(search_domain))
        current_search['domain'] = search_domain
    if search_api is not None:
        f &= F(api=search_api)
        current_search['api'] = search_api

    if search_date_start is None and search_date_end is None:
        selected = '7d'

    if search_date_end is None:
        search_date_end = datetime.now()
    if search_date_start is None:
        search_date_start = search_date_end - timedelta(days=7)

    current_search['date_end'] = search_date_end.strftime('%Y-%m-%d')
    # Add one day, so that the search range includes the entire day.
    end = search_date_end + timedelta(days=1)
    # Note 'less than', not 'less than or equal', because of the added
    # day above.
    f &= F(created__lt=end)

    current_search['date_start'] = search_date_start.strftime('%Y-%m-%d')
    f &= F(created__gte=search_date_start)

    if search_query:
        current_search['q'] = search_query
        search = search.query(description__sqs=search_query)

#.........这里部分代码省略.........
开发者ID:DerekRies,项目名称:fjord,代码行数:103,代码来源:analyzer_views.py

示例8: dashboard

# 需要导入模块: from fjord.feedback.models import ResponseMappingType [as 别名]
# 或者: from fjord.feedback.models.ResponseMappingType import search [as 别名]
def dashboard(request):
    template = 'analytics/dashboard.html'

    output_format = request.GET.get('format', None)
    page = smart_int(request.GET.get('page', 1), 1)

    # Note: If we add additional querystring fields, we need to add
    # them to generate_dashboard_url.
    search_happy = request.GET.get('happy', None)
    search_platform = request.GET.get('platform', None)
    search_locale = request.GET.get('locale', None)
    search_product = request.GET.get('product', None)
    search_version = request.GET.get('version', None)
    search_query = request.GET.get('q', None)
    search_date_start = smart_date(
        request.GET.get('date_start', None), fallback=None)
    search_date_end = smart_date(
        request.GET.get('date_end', None), fallback=None)
    search_bigram = request.GET.get('bigram', None)
    selected = request.GET.get('selected', None)

    filter_data = []
    current_search = {'page': page}

    search = ResponseMappingType.search()
    f = F()
    # If search happy is '0' or '1', set it to False or True, respectively.
    search_happy = {'0': False, '1': True}.get(search_happy, None)
    if search_happy in [False, True]:
        f &= F(happy=search_happy)
        current_search['happy'] = int(search_happy)

    def unknown_to_empty(text):
        """Convert "Unknown" to "" to support old links"""
        return u'' if text.lower() == u'unknown' else text

    if search_platform is not None:
        f &= F(platform=unknown_to_empty(search_platform))
        current_search['platform'] = search_platform
    if search_locale is not None:
        f &= F(locale=unknown_to_empty(search_locale))
        current_search['locale'] = search_locale

    visible_products = [
        prod.encode('utf-8')
        for prod in Product.objects.public().values_list('db_name', flat=True)
    ]

    # This covers the "unknown" product which is also visible.
    visible_products.append('')

    if search_product in visible_products:
        f &= F(product=unknown_to_empty(search_product))
        current_search['product'] = search_product

        if search_version is not None:
            # Note: We only filter on version if we're filtering on
            # product.
            f &= F(version=unknown_to_empty(search_version))
            current_search['version'] = search_version
    else:
        f &= F(product__in=visible_products)

    if search_date_start is None and search_date_end is None:
        selected = '7d'

    if search_date_end is None:
        search_date_end = date.today()
    if search_date_start is None:
        search_date_start = search_date_end - timedelta(days=7)

    # If the start and end dates are inverted, switch them into proper
    # chronoligcal order
    search_date_start, search_date_end = sorted(
        [search_date_start, search_date_end])

    # Restrict the frontpage dashboard to only show the last 6 months
    # of data
    six_months_ago = date.today() - timedelta(days=180)
    search_date_start = max(six_months_ago, search_date_start)
    search_date_end = max(search_date_start, search_date_end)

    current_search['date_end'] = search_date_end.strftime('%Y-%m-%d')
    f &= F(created__lte=search_date_end)

    current_search['date_start'] = search_date_start.strftime('%Y-%m-%d')
    f &= F(created__gte=search_date_start)

    if search_query:
        current_search['q'] = search_query
        search = search.query(description__sqs=search_query)

    if search_bigram is not None:
        f &= F(description_bigrams=search_bigram)
        filter_data.append({
            'display': _('Bigram'),
            'name': 'bigram',
            'options': [{
                'count': 'all',
                'name': search_bigram,
#.........这里部分代码省略.........
开发者ID:rlr,项目名称:fjord,代码行数:103,代码来源:views.py

示例9: test_purge

# 需要导入模块: from fjord.feedback.models import ResponseMappingType [as 别名]
# 或者: from fjord.feedback.models.ResponseMappingType import search [as 别名]
    def test_purge(self):
        now = datetime.datetime.now()
        cutoff = now - datetime.timedelta(days=5)

        # Create 10 objs of each type--one for each day for the last
        # 10 days.
        for i in range(10):
            ResponseEmailFactory(
                opinion__created=(now - datetime.timedelta(days=i))
            )
            ResponseContextFactory(
                opinion__created=(now - datetime.timedelta(days=i))
            )
            ResponsePIFactory(
                opinion__created=(now - datetime.timedelta(days=i))
            )

        # Note that this creates 30 Response objects.

        # Since creating the objects and indexing them happens very
        # quickly in tests, we hit a race condition and the has_email
        # column ends up being false. So instead we just drop the
        # index and rebuild it.
        self.setup_indexes()

        # Make sure everything is in the db
        eq_(Response.objects.count(), 30)
        eq_(ResponseEmail.objects.count(), 10)
        eq_(ResponseContext.objects.count(), 10)
        eq_(ResponsePI.objects.count(), 10)

        # Make sure everything is in the index
        resp_s = ResponseMappingType.search()
        eq_(resp_s.count(), 30)
        eq_(resp_s.filter(has_email=True).count(), 10)

        # Now purge everything older than 5 days and make sure things
        # got removed that should have gotten removed. Also check if
        # there is a journal entry for the purge operation.
        cutoff = now - datetime.timedelta(days=5)
        purge_data(cutoff=cutoff)

        self.refresh()

        eq_(Response.objects.count(), 30)
        eq_(ResponseEmail.objects.count(), 5)
        eq_(ResponseEmail.objects.filter(
            opinion__created__gte=cutoff).count(),
            5)
        eq_(ResponseContext.objects.count(), 5)
        eq_(ResponseContext.objects.filter(
            opinion__created__gte=cutoff).count(),
            5)
        eq_(ResponsePI.objects.count(), 5)
        eq_(ResponsePI.objects.filter(
            opinion__created__gte=cutoff).count(),
            5)
        eq_(1,
            Record.objects.filter(action='purge_data').count())
        expected_msg = ('feedback_responseemail: 5, '
                        'feedback_responsecontext: 5, '
                        'feedback_responsepi: 5')
        eq_(expected_msg,
            Record.objects.get(action='purge_data').msg)

        # Everything should still be in the index, but the number of
        # things with has_email=True should go down
        resp_s = ResponseMappingType.search()
        eq_(resp_s.count(), 30)
        eq_(resp_s.filter(has_email=True).count(), 5)
开发者ID:rlr,项目名称:fjord,代码行数:72,代码来源:test_models.py

示例10: dashboard

# 需要导入模块: from fjord.feedback.models import ResponseMappingType [as 别名]
# 或者: from fjord.feedback.models.ResponseMappingType import search [as 别名]
def dashboard(request, template):
    page = smart_int(request.GET.get('page', 1), 1)
    search_happy = request.GET.get('happy', None)
    search_platform = request.GET.get('platform', None)
    search_locale = request.GET.get('locale', None)
    search_query = request.GET.get('q', None)
    search_date_start = smart_datetime(request.GET.get('date_start', None),
                                       fallback=None)
    search_date_end = smart_datetime(request.GET.get('date_end', None),
                                     fallback=None)
    selected = request.GET.get('selected', None)

    current_search = {'page': page}

    search = ResponseMappingType.search()
    f = F()
    # If search happy is '0' or '1', set it to False or True, respectively.
    search_happy = {'0': False, '1': True}.get(search_happy, None)
    if search_happy in [False, True]:
        f &= F(happy=search_happy)
        current_search['happy'] = int(search_happy)
    if search_platform:
        f &= F(platform=search_platform)
        current_search['platform'] = search_platform
    if search_locale:
        f &= F(locale=search_locale)
        current_search['locale'] = search_locale

    if search_date_start is None and search_date_end is None:
        selected = '7d'

    if search_date_end is None:
        search_date_end = datetime.now()
    if search_date_start is None:
        search_date_start = search_date_end - timedelta(days=7)

    current_search['date_end'] = search_date_end.strftime('%Y-%m-%d')
    # Add one day, so that the search range includes the entire day.
    end = search_date_end + timedelta(days=1)
    # Note 'less than', not 'less than or equal', because of the added
    # day above.
    f &= F(created__lt=end)

    current_search['date_start'] = search_date_start.strftime('%Y-%m-%d')
    f &= F(created__gte=search_date_start)

    if search_query:
        fields = ['text', 'text_phrase', 'fuzzy']
        query = dict(('description__%s' % f, search_query) for f in fields)
        search = search.query(or_=query)
        current_search['q'] = search_query

    search = search.filter(f).order_by('-created')

    facets = search.facet('happy', 'platform', 'locale',
        filtered=bool(f.filters))

    # This loop does two things. First it maps 'T' -> True and 'F' ->
    # False.  This is probably something EU should be doing for
    # us. Second, it restructures the data into a more convenient
    # form.
    counts = {'happy': {}, 'platform': {}, 'locale': {}}
    for param, terms in facets.facet_counts().items():
        for term in terms:
            name = term['term']
            if name == 'T':
                name = True
            elif name == 'F':
                name = False

            counts[param][name] = term['count']

    filter_data = [
        counts_to_options(counts['happy'].items(), name='happy',
            display=_('Sentiment'),
            display_map={True: _('Happy'), False: _('Sad')},
            value_map={True: 1, False: 0}, checked=search_happy),
        counts_to_options(counts['platform'].items(),
            name='platform', display=_('Platform'), checked=search_platform),
        counts_to_options(counts['locale'].items(),
            name='locale', display=_('Locale'), checked=search_locale,
            display_map=locale_name)
    ]

    # Histogram data
    happy_data = []
    sad_data = []

    histograms = search.facet_raw(
        happy={
            'date_histogram': {'interval': 'day', 'field': 'created'},
            'facet_filter': (f & F(happy=True)).filters
        },
        sad={
            'date_histogram': {'interval': 'day', 'field': 'created'},
            'facet_filter': (f & F(happy=False)).filters
        },
    ).facet_counts()

    # p['time'] is number of milliseconds since the epoch. Which is
#.........这里部分代码省略.........
开发者ID:bobsilverberg,项目名称:fjord,代码行数:103,代码来源:views.py

示例11: product_dashboard_firefox

# 需要导入模块: from fjord.feedback.models import ResponseMappingType [as 别名]
# 或者: from fjord.feedback.models.ResponseMappingType import search [as 别名]
def product_dashboard_firefox(request, prod):
    # Note: Not localized because it's ultra-alpha.
    template = 'analytics/product_dashboard_firefox.html'
    current_search = {}

    search_query = request.GET.get('q', None)
    if search_query:
        current_search['q'] = search_query

    search_date_end = smart_date(
        request.GET.get('date_end', None), fallback=None)
    if search_date_end is None:
        search_date_end = date.today()
    current_search['date_end'] = search_date_end.strftime('%Y-%m-%d')

    search_date_start = smart_date(
        request.GET.get('date_start', None), fallback=None)
    if search_date_start is None:
        search_date_start = search_date_end - timedelta(days=7)
    current_search['date_start'] = search_date_start.strftime('%Y-%m-%d')

    histogram = generate_totals_histogram(
        search_date_start, search_date_end, search_query, prod)

    # FIXME: This is lame, but we need to make sure the item we're
    # looking at is the totals.
    assert histogram[1]['name'] == 'total'
    totals_sum = sum([p[1] for p in histogram[1]['data']])

    search = ResponseMappingType.search()
    if search_query:
        search = search.query(description__sqs=search_query)

    base_f = F()
    base_f &= F(product=prod.db_name)
    base_f &= F(created__gte=search_date_start)
    base_f &= F(created__lt=search_date_end)

    search = search.filter(base_f)

    # Figure out the list of platforms and versions for this range.
    plats_and_vers = (search
                      .facet('platform', 'version', size=50)
                      .facet_counts())

    # Figure out the "by platform" histogram
    platforms = [part['term'] for part in plats_and_vers['platform']]
    platform_facet = {}
    for plat in platforms:
        plat_f = base_f & F(platform=plat)
        platform_facet[plat if plat else 'unknown'] = {
            'date_histogram': {'interval': 'day', 'field': 'created'},
            'facet_filter': search._process_filters(plat_f.filters)
        }

    platform_counts = search.facet_raw(**platform_facet).facet_counts()
    platforms_histogram = []
    for key in platform_counts.keys():
        data = dict((p['time'], p['count']) for p in platform_counts[key])

        sum_counts = sum([p['count'] for p in platform_counts[key]])
        if sum_counts < (totals_sum * 0.02):
            # Skip platforms where the number of responses is less than
            # 2% of the total.
            continue

        zero_fill(search_date_start, search_date_end, [data])
        platforms_histogram.append({
            'name': key,
            'label': key,
            'data': sorted(data.items()),
            'lines': {'show': True, 'fill': False},
            'points': {'show': True},
        })

    # Figure out the "by version" histogram
    versions = [part['term'] for part in plats_and_vers['version']]
    version_facet = {}
    for vers in versions:
        vers_f = base_f & F(version=vers)
        version_facet['v' + vers if vers else 'unknown'] = {
            'date_histogram': {'interval': 'day', 'field': 'created'},
            'facet_filter': search._process_filters(vers_f.filters)
        }

    version_counts = search.facet_raw(**version_facet).facet_counts()
    versions_histogram = []
    for key in version_counts.keys():
        data = dict((p['time'], p['count']) for p in version_counts[key])

        sum_counts = sum([p['count'] for p in version_counts[key]])
        if sum_counts < (totals_sum * 0.02):
            # Skip versions where the number of responses is less than
            # 2% of the total.
            continue

        zero_fill(search_date_start, search_date_end, [data])
        versions_histogram.append({
            'name': key,
            'label': key,
#.........这里部分代码省略.........
开发者ID:gregglind,项目名称:fjord,代码行数:103,代码来源:views.py

示例12: generate_totals_histogram

# 需要导入模块: from fjord.feedback.models import ResponseMappingType [as 别名]
# 或者: from fjord.feedback.models.ResponseMappingType import search [as 别名]
def generate_totals_histogram(search_date_start, search_date_end,
                              search_query, prod):
    # Note: Not localized because it's ultra-alpha.
    search_date_start = search_date_start - timedelta(days=1)

    search = ResponseMappingType.search()

    if search_query:
        search = search.query(description__sqs=search_query)

    f = F()
    f &= F(product=prod.db_name)

    f &= F(created__gte=search_date_start)
    f &= F(created__lt=search_date_end)

    happy_f = f & F(happy=True)

    totals_histogram = search.facet_raw(
        total={
            'date_histogram': {'interval': 'day', 'field': 'created'},
            'facet_filter': search._process_filters(f.filters)
        },
        happy={
            'date_histogram': {'interval': 'day', 'field': 'created'},
            'facet_filter': search._process_filters(happy_f.filters)
        },
    ).facet_counts()

    totals_data = dict((p['time'], p['count'])
                       for p in totals_histogram['total'])
    zero_fill(search_date_start, search_date_end, [totals_data])
    totals_data = sorted(totals_data.items())

    happy_data = dict((p['time'], p['count'])
                      for p in totals_histogram['happy'])
    zero_fill(search_date_start, search_date_end, [happy_data])
    happy_data = sorted(happy_data.items())

    up_deltas = []
    down_deltas = []
    for i, hap in enumerate(happy_data):
        if i == 0:
            continue

        yesterday = 0
        today = 0

        # Figure out yesterday and today as a percent to one
        # significant digit.
        if happy_data[i-1][1] and totals_data[i-1][1]:
            yesterday = (
                int(happy_data[i-1][1] * 1.0
                    / totals_data[i-1][1] * 1000)
                / 10.0
            )

        if happy_data[i][1] and totals_data[i][1]:
            today = (
                int(happy_data[i][1] * 1.0
                    / totals_data[i][1] * 1000)
                / 10.0
            )

        if (today - yesterday) >= 0:
            up_deltas.append((happy_data[i][0], today - yesterday))
        else:
            down_deltas.append((happy_data[i][0], today - yesterday))

    # Nix the first total because it's not in our date range
    totals_data = totals_data[1:]

    histogram = [
        {
            'name': 'zero',
            'data': [(totals_data[0][0], 0), (totals_data[-1][0], 0)],
            'yaxis': 2,
            'lines': {'show': True, 'fill': False, 'lineWidth': 1,
                      'shadowSize': 0},
            'color': '#dddddd',
        },
        {
            'name': 'total',
            'label': 'Total # responses',
            'data': totals_data,
            'yaxis': 1,
            'lines': {'show': True, 'fill': False},
            'points': {'show': True},
            'color': '#3E72BF',
        },
        {
            'name': 'updeltas',
            'label': 'Percent change in sentiment upwards',
            'data': up_deltas,
            'yaxis': 2,
            'bars': {'show': True, 'lineWidth': 3},
            'points': {'show': True},
            'color': '#55E744',
        },
        {
#.........这里部分代码省略.........
开发者ID:gregglind,项目名称:fjord,代码行数:103,代码来源:views.py

示例13: dashboard

# 需要导入模块: from fjord.feedback.models import ResponseMappingType [as 别名]
# 或者: from fjord.feedback.models.ResponseMappingType import search [as 别名]
def dashboard(request, template):
    output_format = request.GET.get('format', None)
    page = smart_int(request.GET.get('page', 1), 1)

    # Note: If we add additional querystring fields, we need to add
    # them to generate_dashboard_url.
    search_happy = request.GET.get('happy', None)
    search_platform = request.GET.get('platform', None)
    search_locale = request.GET.get('locale', None)
    search_product = request.GET.get('product', None)
    search_version = request.GET.get('browser_version', None)
    search_query = request.GET.get('q', None)
    search_date_start = smart_datetime(request.GET.get('date_start', None),
                                       fallback=None)
    search_date_end = smart_datetime(request.GET.get('date_end', None),
                                     fallback=None)
    selected = request.GET.get('selected', None)

    current_search = {'page': page}

    search = ResponseMappingType.search()
    f = F()
    # If search happy is '0' or '1', set it to False or True, respectively.
    search_happy = {'0': False, '1': True}.get(search_happy, None)
    if search_happy in [False, True]:
        f &= F(happy=search_happy)
        current_search['happy'] = int(search_happy)
    if search_platform:
        f &= F(platform=search_platform)
        current_search['platform'] = search_platform
    if search_locale:
        f &= F(locale=search_locale)
        current_search['locale'] = search_locale
    if search_product:
        f &= F(product=search_product)
        current_search['product'] = search_product

        if search_version:
            # Note: We only filter on version if we're filtering on
            # product.
            f &= F(browser_version=search_version)
            current_search['browser_version'] = search_version

    if search_date_start is None and search_date_end is None:
        selected = '7d'

    if search_date_end is None:
        search_date_end = datetime.now()
    if search_date_start is None:
        search_date_start = search_date_end - timedelta(days=7)

    current_search['date_end'] = search_date_end.strftime('%Y-%m-%d')
    # Add one day, so that the search range includes the entire day.
    end = search_date_end + timedelta(days=1)
    # Note 'less than', not 'less than or equal', because of the added
    # day above.
    f &= F(created__lt=end)

    current_search['date_start'] = search_date_start.strftime('%Y-%m-%d')
    f &= F(created__gte=search_date_start)

    if search_query:
        current_search['q'] = search_query
        es_query = generate_query_parsed('description', search_query)
        search = search.query_raw(es_query)

    search = search.filter(f).order_by('-created')

    # If the user asked for a feed, give him/her a feed!
    if output_format == 'atom':
        return generate_atom_feed(request, search)

    elif output_format == 'json':
        return generate_json_feed(request, search)

    # Search results and pagination
    if page < 1:
        page = 1
    page_count = 20
    start = page_count * (page - 1)
    end = start + page_count

    search_count = search.count()
    opinion_page = search[start:end]

    # Navigation facet data
    facets = search.facet(
        'happy', 'platform', 'locale', 'product', 'browser_version',
        filtered=bool(search._process_filters(f.filters)))

    # This loop does two things. First it maps 'T' -> True and 'F' ->
    # False.  This is probably something EU should be doing for
    # us. Second, it restructures the data into a more convenient
    # form.
    counts = {
        'happy': {},
        'platform': {},
        'locale': {},
        'product': {},
        'browser_version': {}
#.........这里部分代码省略.........
开发者ID:senicar,项目名称:fjord,代码行数:103,代码来源:views.py


注:本文中的fjord.feedback.models.ResponseMappingType.search方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。