本文整理汇总了Python中elasticsearch_dsl.Q属性的典型用法代码示例。如果您正苦于以下问题:Python elasticsearch_dsl.Q属性的具体用法?Python elasticsearch_dsl.Q怎么用?Python elasticsearch_dsl.Q使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类elasticsearch_dsl
的用法示例。
在下文中一共展示了elasticsearch_dsl.Q属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: filter_by_frequency
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Q [as 别名]
def filter_by_frequency(self, frequencies):
q = Q()
for pop, freqs in sorted(frequencies.items()):
if freqs.get('af') is not None:
filter_field = next(
(field_key for field_key in POPULATIONS[pop]['filter_AF']
if any(field_key in index_metadata['fields'] for index_metadata in self.index_metadata.values())),
POPULATIONS[pop]['AF'])
q &= _pop_freq_filter(filter_field, freqs['af'])
elif freqs.get('ac') is not None:
q &= _pop_freq_filter(POPULATIONS[pop]['AC'], freqs['ac'])
if freqs.get('hh') is not None:
q &= _pop_freq_filter(POPULATIONS[pop]['Hom'], freqs['hh'])
q &= _pop_freq_filter(POPULATIONS[pop]['Hemi'], freqs['hh'])
self.filter(q)
示例2: _pathogenicity_filter
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Q [as 别名]
def _pathogenicity_filter(pathogenicity):
clinvar_filters = pathogenicity.get('clinvar', [])
hgmd_filters = pathogenicity.get('hgmd', [])
pathogenicity_filter = None
if clinvar_filters:
clinvar_clinical_significance_terms = set()
for clinvar_filter in clinvar_filters:
clinvar_clinical_significance_terms.update(CLINVAR_SIGNFICANCE_MAP.get(clinvar_filter, []))
pathogenicity_filter = Q('terms', clinvar_clinical_significance=sorted(list(clinvar_clinical_significance_terms)))
if hgmd_filters:
hgmd_class = set()
for hgmd_filter in hgmd_filters:
hgmd_class.update(HGMD_CLASS_MAP.get(hgmd_filter, []))
hgmd_q = Q('terms', hgmd_class=sorted(list(hgmd_class)))
pathogenicity_filter = pathogenicity_filter | hgmd_q if pathogenicity_filter else hgmd_q
return pathogenicity_filter
示例3: searchport
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Q [as 别名]
def searchport(port, protocol='tcp', state='open', neg=False):
"""Filters (if `neg` == True, filters out) records with
specified protocol/port at required state. Be aware that when
a host has a lot of ports filtered or closed, it will not
report all of them, but only a summary, and thus the filter
might not work as expected. This filter will always work to
find open ports.
"""
if port == "host":
res = Q("nested", path="ports", query=Q("match", ports__port=-1))
elif state == "open":
res = Q("match", **{"openports.%s.ports" % protocol: port})
else:
res = Q("nested", path="ports", query=(
Q("match", ports__port=port) &
Q("match", ports__protocol=protocol) &
Q("match", ports__state_state=state)
))
if neg:
return ~res
return res
示例4: search_filter_record_permissions
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Q [as 别名]
def search_filter_record_permissions():
"""Filter list of results by `_access` and `restricted` fields."""
if not has_request_context() or backoffice_permission().allows(g.identity):
return Q()
# A record is public if `restricted` field False or missing
restricted_field_missing = ~Q("exists", field="restricted")
is_restricted = restricted_field_missing | Q("term", restricted=False)
combined_filter = is_restricted
if current_app.config.get("ILS_RECORDS_EXPLICIT_PERMISSIONS_ENABLED"):
# if `_access`, check `_access.read` against the user. It takes
# precedence over `restricted`.
# if not `_access`, check if open access as before.
_access_field_exists = Q("exists", field="_access.read")
provides = _get_user_provides()
user_can_read = _access_field_exists & Q(
"terms", **{"_access.read": provides}
)
combined_filter = user_can_read | (
~_access_field_exists & ~is_restricted
)
return Q("bool", filter=[combined_filter])
示例5: get_value_filter
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Q [as 别名]
def get_value_filter(self, filter_value):
'''
Fix here until upstream PR is merged
https://github.com/elastic/elasticsearch-dsl-py/pull/473
'''
self.validate_parameter(filter_value)
f, t = self._ranges[filter_value]
limits = {}
# lt and gte to ensure non-overlapping ranges
if f is not None:
limits['gte'] = f
if t is not None:
limits['lt'] = t
return Q('range', **{
self._params['field']: limits
})
示例6: query_missing
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Q [as 别名]
def query_missing(s, field, name, methods=None, responsecodes=None, invert=False):
# main query
q = Q("match", ** { field: name })
if not invert:
q = ~q
s.query = q
# add filters
## method
if methods:
s = s.filter("terms", ** { 'request.method': methods })
## response codes
if responsecodes:
for rc in responsecodes:
rcrange = rc.split("-")
if len(rcrange) == 2:
s = s.filter("range", ** { 'response.status': { "gte": int(rcrange[0]), "lte": int(rcrange[1]) } })
else:
s = s.filter("term", ** { 'response.status': rc })
print_debug(s.to_dict())
return s
示例7: query_vals
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Q [as 别名]
def query_vals(s, field, name, values, invert):
# match documents where given field value name is present, if required
if values:
q = Q("nested", path=field, query=Q("wildcard", ** { field + ".value.keyword": values }))
if invert:
s.query = ~q
else:
s.query = q
else:
s.query = Q()
# 1. descent into response.headers/request.parameters
# 2. filter given header
# 3. aggregate values
# 4. jump back into main document
# 5. aggregate URLs
s.aggs.bucket("field", "nested", path=field)\
.bucket("valuefilter", "filter", Q("match", ** { field + ".name": name }))\
.bucket("values", "terms", field=field + ".value.keyword", size=args.size)\
.bucket("main", "reverse_nested")\
.bucket("urls", "terms", field="request.url.keyword", size=args.size)
return s
示例8: get_trade_history
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Q [as 别名]
def get_trade_history(size=10, from_date='2015-10-10', to_date='now', sort_by='-operation_id_num',
search_after=None, base="1.3.0", quote="1.3.121"):
s = Search(using=es, index="bitshares-*")
s = s.extra(size=size)
if search_after and search_after != '':
s = s.extra(search_after=search_after.split(','))
q = Q()
q = q & Q("match", operation_type=4)
q = q & Q("match", operation_history__op_object__is_maker=True)
q = q & Q("match", operation_history__op_object__fill_price__base__asset_id=base)
q = q & Q("match", operation_history__op_object__fill_price__quote__asset_id=quote)
range_query = Q("range", block_data__block_time={'gte': from_date, 'lte': to_date})
s.query = q & range_query
s = s.sort(*sort_by.split(','))
response = s.execute()
verify_es_response(response)
return [hit.to_dict() for hit in response]
示例9: test_dsl_search
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Q [as 别名]
def test_dsl_search(self):
results = Restaurant.objects.dsl_search(ElasticsearchQ(
'bool',
must=[
ElasticsearchQ('match', street='Skillman Ave'),
ElasticsearchQ('match', categories='French')
]
))
self.assertCountEqual(results, [self.soleil])
results = Restaurant.objects.dsl_search(ElasticsearchQ(
'bool',
must=[
ElasticsearchQ('match', street='Skillman Ave'),
ElasticsearchQ('match', zip_code='11377')
]
))
self.assertCountEqual(results, [self.tj, self.soleil])
results = Restaurant.objects.dsl_search(Term(email='alcove@example.org'))
self.assertCountEqual(results, [self.alcove])
示例10: test_dsl_search_no_limit
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Q [as 别名]
def test_dsl_search_no_limit(self):
# duplicate tj and soleil
self.tj.pk = None
self.tj.save()
self.soleil.pk = None
self.soleil.save()
results = Restaurant.objects.dsl_search(
ElasticsearchQ('match', street='skillman'),
sort=True,
limit=None)
self.assertEqual(len(results), 4)
self.assertEqual(
[r.name for r in results],
[self.soleil.name, self.soleil.name, self.tj.name, self.tj.name])
示例11: process
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Q [as 别名]
def process(self, items_block):
"""Process a DataFrame to compute Onion.
:param items_block: items to be processed. Expects to find a pandas DataFrame.
"""
logger.debug("{} Authors to process: {}".format(self.__log_prefix, len(items_block)))
onion_enrich = Onion(items_block)
df_onion = onion_enrich.enrich(member_column=ESOnionConnector.AUTHOR_UUID,
events_column=ESOnionConnector.CONTRIBUTIONS)
# Get and store Quarter as String
df_onion['quarter'] = df_onion[ESOnionConnector.TIMEFRAME].map(lambda x: str(pandas.Period(x, 'Q')))
# Add metadata: enriched on timestamp
df_onion['metadata__enriched_on'] = datetime.utcnow().isoformat()
df_onion['data_source'] = self.data_source
df_onion['grimoire_creation_date'] = df_onion[ESOnionConnector.TIMEFRAME]
logger.debug("{} Final new events: {}".format(self.__log_prefix, len(df_onion)))
return self.ProcessResults(processed=len(df_onion), out_items=df_onion)
示例12: get_pictures
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Q [as 别名]
def get_pictures(client):
es_data = (
Search(using=client, index='politicians')
.query(Q('bool', must=[Q('match', ano_eleicao=ANO_ELEICAO)]))
.source(['sq_candidato', 'sg_ue'])
.scan()
)
data = []
for hit in es_data:
url = 'http://divulgacandcontas.tse.jus.br/divulga'
url = f'{url}/rest/v1/candidatura/buscar'
url = f'{url}/{ANO_ELEICAO}/{hit.sg_ue}/2022802018'
url = f'{url}/candidato/{hit.sq_candidato}'
data.append(
{
'doc_id': hit.meta.id,
'url': url,
'sg_ue': hit.sg_ue,
'sq_candidato': hit.sq_candidato,
}
)
return data
示例13: about
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Q [as 别名]
def about(request):
"""Information about the current site, its goals, and what content is loaded"""
# Provider counts
providers = cache.get_or_set(CACHE_STATS_NAME, [], CACHE_STATS_DURATION)
if not providers:
for provider in sorted(settings.PROVIDERS.keys()):
s = Search()
q = Q('term', provider=provider)
s = s.query(q)
response = s.execute()
if response.hits.total > 0:
data = settings.PROVIDERS[provider]
total = intcomma(response.hits.total)
data.update({'hits': total})
providers.append(data)
# All results
s = Search()
response = s.execute()
total = intcomma(response.hits.total)
providers.append({'display_name': 'Total', 'hits': total})
cache.set(CACHE_STATS_NAME, providers)
return render(request, "about.html", {'providers': providers})
示例14: correct_orphan_records
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Q [as 别名]
def correct_orphan_records(self, provider='europeana', end=None):
"""[#185] Delete records from the search engine which aren't found in the database"""
s = Search()
q = Q('term', provider=provider)
s = s.query(q)
response = s.execute()
total = response.hits.total
# A file extracted from the production database listing all of the europeana identifiers
identifier_file = '/tmp/europeana-identifiers.json'
db_identifiers = set(json.load(open(identifier_file)))
total_in_db = len(db_identifiers)
log.info("Using search engine instance %s", settings.ELASTICSEARCH_URL)
log.info("Total records: %d (search engine), %d (database) [diff=%d]", total, total_in_db, total - total_in_db)
deleted_count = 0
for r in s.scan():
if r.identifier not in db_identifiers:
img = search.Image.get(id=r.identifier)
log.debug("Going to delete image %s", img)
deleted_count += 1
log.info("Deleted %d from search engine", deleted_count)
示例15: correct_license_capitalization
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Q [as 别名]
def correct_license_capitalization(self, provider='europeana', end=None):
"""[#186] Correct license capitalization"""
s = Search()
q = Q('term', provider=provider)
s = s.query(q)
response = s.execute()
total = response.hits.total
log.info("Using search engine instance %s", settings.ELASTICSEARCH_URL)
mod_count = 0
count = 0
for r in s.scan():
if not r.license.islower():
img = search.Image.get(id=r.identifier)
log.debug("[%d] Changing license %s to %s", count, img.license, img.license.lower())
img.update(license=img.license.lower())
mod_count += 1
count += 1
log.info("Modified %d records in search engine", mod_count)