本文整理汇总了Python中elasticsearch_dsl.Nested方法的典型用法代码示例。如果您正苦于以下问题:Python elasticsearch_dsl.Nested方法的具体用法?Python elasticsearch_dsl.Nested怎么用?Python elasticsearch_dsl.Nested使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类elasticsearch_dsl
的用法示例。
在下文中一共展示了elasticsearch_dsl.Nested方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_field_sort
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Nested [as 别名]
def get_field_sort(self, field_name):
"""
Given a field name, returns the field name that should be used for sorting. If a mapping defines
a .raw sub-field, that is used, otherwise the field name itself is used if index=not_analyzed.
"""
if field_name.endswith('.raw'):
return field_name
if field_name in self.sort_fields:
return self.sort_fields[field_name]
if field_name in self.document._doc_type.mapping:
dsl_field = self.document._doc_type.mapping[field_name]
if isinstance(dsl_field, (dsl.Object, dsl.Nested)):
return None
if not isinstance(dsl_field, dsl.String):
return field_name
if 'raw' in dsl_field.fields:
return '%s.raw' % field_name
elif getattr(dsl_field, 'index', None) == 'not_analyzed':
return field_name
return None
示例2: test_properties_can_iterate_over_all_the_fields
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Nested [as 别名]
def test_properties_can_iterate_over_all_the_fields():
m = mapping.Mapping()
m.field('f1', 'text', test_attr='f1', fields={'f2': Keyword(test_attr='f2')})
m.field('f3', Nested(test_attr='f3', properties={
'f4': Text(test_attr='f4')}))
assert {'f1', 'f2', 'f3', 'f4'} == {f.test_attr for f in m.properties._collect_fields()}
示例3: test_resolve_nested
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Nested [as 别名]
def test_resolve_nested():
m = mapping.Mapping()
m.field('n1', 'nested', properties={'n2': Nested(properties={'k1': Keyword()})})
m.field('k2', 'keyword')
nested, field = m.resolve_nested('n1.n2.k1')
assert nested == ['n1', 'n1.n2']
assert isinstance(field, Keyword)
nested, field = m.resolve_nested('k2')
assert nested == []
assert isinstance(field, Keyword)
示例4: get_field_highlight
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Nested [as 别名]
def get_field_highlight(self, field_name):
if field_name in self.highlight_fields:
return self.highlight_fields[field_name]
if field_name in self.document._doc_type.mapping:
dsl_field = self.document._doc_type.mapping[field_name]
if isinstance(dsl_field, (dsl.Object, dsl.Nested)):
return '%s.*' % field_name
return field_name
return None
示例5: test_mapping_can_collect_all_analyzers_and_normalizers
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Nested [as 别名]
def test_mapping_can_collect_all_analyzers_and_normalizers():
a1 = analysis.analyzer('my_analyzer1',
tokenizer='keyword',
filter=['lowercase', analysis.token_filter('my_filter1', 'stop', stopwords=['a', 'b'])],
)
a2 = analysis.analyzer('english')
a3 = analysis.analyzer('unknown_custom')
a4 = analysis.analyzer('my_analyzer2',
tokenizer=analysis.tokenizer('trigram', 'nGram', min_gram=3, max_gram=3),
filter=[analysis.token_filter('my_filter2', 'stop', stopwords=['c', 'd'])],
)
a5 = analysis.analyzer('my_analyzer3', tokenizer='keyword')
n1 = analysis.normalizer('my_normalizer1',
filter=['lowercase']
)
n2 = analysis.normalizer('my_normalizer2',
filter=['my_filter1', 'my_filter2', analysis.token_filter('my_filter3', 'stop', stopwords=['e', 'f'])]
)
n3 = analysis.normalizer('unknown_custom')
m = mapping.Mapping()
m.field('title', 'text', analyzer=a1,
fields={
'english': Text(analyzer=a2),
'unknown': Keyword(search_analyzer=a3),
}
)
m.field('comments', Nested(properties={
'author': Text(analyzer=a4)
}))
m.field('normalized_title', 'keyword', normalizer=n1)
m.field('normalized_comment', 'keyword', normalizer=n2)
m.field('unknown', 'keyword', normalizer=n3)
m.meta('_all', analyzer=a5)
assert {
'analyzer': {
'my_analyzer1': {'filter': ['lowercase', 'my_filter1'], 'tokenizer': 'keyword', 'type': 'custom'},
'my_analyzer2': {'filter': ['my_filter2'], 'tokenizer': 'trigram', 'type': 'custom'},
'my_analyzer3': {'tokenizer': 'keyword', 'type': 'custom'},
},
'normalizer': {
'my_normalizer1': {'filter': ['lowercase'], 'type': 'custom'},
'my_normalizer2': {'filter': ['my_filter1', 'my_filter2', 'my_filter3'], 'type': 'custom'},
},
'filter': {
'my_filter1': {'stopwords': ['a', 'b'], 'type': 'stop'},
'my_filter2': {'stopwords': ['c', 'd'], 'type': 'stop'},
'my_filter3': {'stopwords': ['e', 'f'], 'type': 'stop'},
},
'tokenizer': {
'trigram': {'max_gram': 3, 'min_gram': 3, 'type': 'nGram'},
}
} == m._collect_analysis()
assert json.loads(json.dumps(m.to_dict())) == m.to_dict()