本文整理汇总了Python中elasticsearch_dsl.A属性的典型用法代码示例。如果您正苦于以下问题:Python elasticsearch_dsl.A属性的具体用法?Python elasticsearch_dsl.A怎么用?Python elasticsearch_dsl.A使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类elasticsearch_dsl
的用法示例。
在下文中一共展示了elasticsearch_dsl.A属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_markets_with_dsl
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import A [as 别名]
def _get_markets_with_dsl(self, from_date, to_date):
# TODO: This could be fixed now as ES has closed the issue:
# https://github.com/elastic/elasticsearch-dsl-py/issues/963
s = Search(using='operations', index="bitshares-*")
s = s.extra(size=0)
s = s.query('bool', filter = [
Q('term', operation_type=4),
Q("range", block_data__block_time={'gte': from_date, 'lte': to_date})
])
sources = [
{ 'base': A('terms', field='operation_history.op_object.fill_price.base.asset_id.keyword') },
{ 'quote': A('terms', field='operation_history.op_object.fill_price.quote.asset_id.keyword') }
]
# Bug here as 'sources' does not support a list.
a = A('composite', sources=sources, size=10000).metric('volume', 'sum', field='operation_history.op_object.fill_price.quote.amount')
s.aggs.bucket('pairs', a)
response = s.execute()
# TODO...
示例2: get_daily_volume
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import A [as 别名]
def get_daily_volume(self, from_date, to_date):
s = Search(using='operations', index="bitshares-*")
s = s.extra(size=0)
s = s.query('bool', filter = [
Q('term', operation_type=4),
Q('range', block_data__block_time={'gte': from_date, 'lte': to_date}),
Q('term', operation_history__op_object__fill_price__quote__asset_id__keyword=config.CORE_ASSET_ID)
])
a = A('date_histogram', field='block_data.block_time', interval='1d', format='yyyy-MM-dd') \
.metric('volume', 'sum', field='operation_history.op_object.fill_price.quote.amount')
s.aggs.bucket('volume_over_time', a)
response = s.execute()
daily_volumes = []
for daily_volume in response.aggregations.volume_over_time.buckets:
daily_volumes.append({ 'date': daily_volume.key_as_string, 'volume': daily_volume.volume.value })
return daily_volumes
示例3: get_elasticsearch_index_samples
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import A [as 别名]
def get_elasticsearch_index_samples(elasticsearch_index, dataset_type=Sample.DATASET_TYPE_VARIANT_CALLS):
es_client = get_es_client()
index_metadata = get_index_metadata(elasticsearch_index, es_client).get(elasticsearch_index)
s = elasticsearch_dsl.Search(using=es_client, index=elasticsearch_index)
s = s.params(size=0)
s.aggs.bucket('sample_ids', elasticsearch_dsl.A('terms', field=SAMPLE_FIELDS_MAP[dataset_type], size=10000))
response = s.execute()
return [agg['key'] for agg in response.aggregations.sample_ids.buckets], index_metadata
示例4: get_most_loaned_documents
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import A [as 别名]
def get_most_loaned_documents(from_date, to_date, bucket_size):
"""Return aggregation of document_pids with most loans."""
search_cls = current_circulation.loan_search_cls
# Query
states = (
current_app.config["CIRCULATION_STATES_LOAN_ACTIVE"]
+ current_app.config["CIRCULATION_STATES_LOAN_COMPLETED"]
)
from_date = from_date or None
to_date = to_date or None
search = search_cls().query(
"bool",
must=[
Q("terms", state=states),
Q("range", start_date=dict(gte=from_date, lte=to_date)),
],
)
# Aggregation with sub-aggregation to calculate the extension count sum
aggs = A("terms", field="document_pid", size=bucket_size)
aggs = aggs.metric("extensions", "sum", field="extension_count")
search.aggs.bucket("most_loaned_documents", aggs)
# No need for the loan hits
search = search[:0]
return search
示例5: get_aggregation
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import A [as 别名]
def get_aggregation(self):
field = self._params['field']
a = A('nested', path=field)
a.metric('min_start', 'min', field='{0}.start'.format(field))
a.metric('max_end', 'max', field='{0}.end'.format(field))
return a
示例6: as_request_parser_kwargs
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import A [as 别名]
def as_request_parser_kwargs(self):
return {
'type': self.validate_parameter,
'help': _('A date range expressed as start-end '
'where both dates are in iso format '
'(ie. YYYY-MM-DD-YYYY-MM-DD)')
}
示例7: test_aggregation
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import A [as 别名]
def test_aggregation(self):
expected = A({'terms': {'field': 'boolean'}})
assert self.facet.get_aggregation() == expected
示例8: test_get_aggregation
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import A [as 别名]
def test_get_aggregation(self):
expected = A({
'nested': {
'path': 'some_field'
},
'aggs': {
'min_start': {'min': {'field': 'some_field.start'}},
'max_end': {'max': {'field': 'some_field.end'}}
}
})
assert self.facet.get_aggregation() == expected
示例9: add_default_aggregation
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import A [as 别名]
def add_default_aggregation(s):
a = A("terms", field="request.url.keyword", size=args.size)
s.aggs.bucket("urls", a)
示例10: get_statistics
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import A [as 别名]
def get_statistics(index,pattern=None):
conn = connections.get_connection()
stats = {}
fields = {
'computer_name.keyword':'computers',
'strings_parsed.source_user_name.keyword': 'srcuser',
'strings_parsed.target_user_name.keyword': 'dstuser',
'strings_parsed.target_machine_name.keyword': 'dstsrvname',
'strings_parsed.target_machine_ip.keyword': 'dstsrvip',
}
scheme = {
"size" : 0,
"aggs" : {
"count" : {
"cardinality" : {
"field" : None
}
}
}
}
s = Search(using=conn,index=index)
for f in fields.keys():
s.aggs.bucket(fields[f],A('cardinality',field=f))
resp = s.execute()
res = resp.aggregations.to_dict()
for agg in res.keys():
stats[agg] = res[agg]['value']
stats['total'] = resp['hits']['total']
return stats
示例11: test_scan_aggs_exhausts_all_files
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import A [as 别名]
def test_scan_aggs_exhausts_all_files(data_client):
s = Search(index='flat-git')
key_aggs = {'files': A('terms', field='files')}
file_list = list(scan_aggs(s, key_aggs))
assert len(file_list) == 26
示例12: test_scan_aggs_with_multiple_aggs
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import A [as 别名]
def test_scan_aggs_with_multiple_aggs(data_client):
s = Search(index='flat-git')
key_aggs = [
{'files': A('terms', field='files')},
{'months': {'date_histogram': {'field': 'committed_date', 'interval': 'month'}}},
]
file_list = list(scan_aggs(s, key_aggs))
assert len(file_list) == 47
示例13: _get_aggregation
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import A [as 别名]
def _get_aggregation(self, **extra):
params = {'field': self.field}
params.update(self.kwargs)
params.update(extra)
return A('terms', **params)
示例14: apply
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import A [as 别名]
def apply(self, search, **extra):
top = A('global')
top[self.field] = self._get_aggregation(**extra)
search.aggs[self.field] = top
return search
示例15: handle_composition_aggregation
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import A [as 别名]
def handle_composition_aggregation(search: Search, aggregation_dict: dict, after: dict):
s = Search().from_dict(search).using(Elasticsearch(es_url))
sources = aggregation_dict["sources"]
size = aggregation_dict.get("size", 10)
aggregations = [{source["bucket_name"]: A(source["agg_type"], field="{}.keyword".format(source["field"]))} for source in sources]
if after:
s.aggs.bucket(aggregation_dict["bucket_name"], "composite", size=size, sources=aggregations, after=after)
return s
else:
s.aggs.bucket(aggregation_dict["bucket_name"], "composite", size=size, sources=aggregations)
return s