本文整理匯總了Python中elasticsearch_dsl.Text方法的典型用法代碼示例。如果您正苦於以下問題:Python elasticsearch_dsl.Text方法的具體用法?Python elasticsearch_dsl.Text怎麽用?Python elasticsearch_dsl.Text使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類elasticsearch_dsl
的用法示例。
在下文中一共展示了elasticsearch_dsl.Text方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: create_doctype
# 需要導入模塊: import elasticsearch_dsl [as 別名]
# 或者: from elasticsearch_dsl import Text [as 別名]
def create_doctype(index_name, similarity):
if similarity == 'default':
wiki_content_field = Text()
qb_content_field = Text()
else:
wiki_content_field = Text(similarity=similarity)
qb_content_field = Text(similarity=similarity)
class Answer(Document):
page = Text(fields={'raw': Keyword()})
wiki_content = wiki_content_field
qb_content = qb_content_field
class Meta:
index = index_name
return Answer
示例2: test_multiple_doc_types_will_combine_mappings
# 需要導入模塊: import elasticsearch_dsl [as 別名]
# 或者: from elasticsearch_dsl import Text [as 別名]
def test_multiple_doc_types_will_combine_mappings():
class User(Document):
username = Text()
i = Index('i')
i.document(Post)
i.document(User)
assert {
'mappings': {
'properties': {
'title': {'type': 'text'},
'username': {'type': 'text'},
'published_from': {'type': 'date'}
}
}
} == i.to_dict()
示例3: _query
# 需要導入模塊: import elasticsearch_dsl [as 別名]
# 或者: from elasticsearch_dsl import Text [as 別名]
def _query(self, quantity, o, value, nested=None):
field = self._field(quantity, nested=nested)
if o in _cmp_operators:
return Q("range", **{field: {_cmp_operators[o]: value}})
if quantity.elastic_mapping_type == Text:
query_type = "match"
elif quantity.elastic_mapping_type in [Keyword, Integer]:
query_type = "term"
else:
raise NotImplementedError("Quantity has unsupported ES field type")
if o in ["=", ""]:
return Q(query_type, **{field: value})
if o == "!=":
return ~Q(
query_type, **{field: value}
) # pylint: disable=invalid-unary-operand-type
raise Exception("Unknown operator %s" % o)
示例4: setup_collection
# 需要導入模塊: import elasticsearch_dsl [as 別名]
# 或者: from elasticsearch_dsl import Text [as 別名]
def setup_collection(self):
class WebLink(DocType):
url = Text()
html = Text()
headers = Text()
status = Integer()
created = Date()
class Meta:
index = self.database_name
doc_type = self.collection_name
return WebLink
示例5: setup_collection
# 需要導入模塊: import elasticsearch_dsl [as 別名]
# 或者: from elasticsearch_dsl import Text [as 別名]
def setup_collection(self):
class WebLinkExtracted(DocType):
url = Text()
body = Text()
headers = Text()
status = Integer()
created = Date()
class Meta:
index = self.database_name
doc_type = self.collection_name
return WebLinkExtracted
示例6: test_properties_can_iterate_over_all_the_fields
# 需要導入模塊: import elasticsearch_dsl [as 別名]
# 或者: from elasticsearch_dsl import Text [as 別名]
def test_properties_can_iterate_over_all_the_fields():
m = mapping.Mapping()
m.field('f1', 'text', test_attr='f1', fields={'f2': Keyword(test_attr='f2')})
m.field('f3', Nested(test_attr='f3', properties={
'f4': Text(test_attr='f4')}))
assert {'f1', 'f2', 'f3', 'f4'} == {f.test_attr for f in m.properties._collect_fields()}
示例7: test_mapping_can_collect_multiple_analyzers
# 需要導入模塊: import elasticsearch_dsl [as 別名]
# 或者: from elasticsearch_dsl import Text [as 別名]
def test_mapping_can_collect_multiple_analyzers():
a1 = analysis.analyzer(
'my_analyzer1',
tokenizer='keyword',
filter=['lowercase', analysis.token_filter('my_filter1', 'stop', stopwords=['a', 'b'])],
)
a2 = analysis.analyzer(
'my_analyzer2',
tokenizer=analysis.tokenizer('trigram', 'nGram', min_gram=3, max_gram=3),
filter=[analysis.token_filter('my_filter2', 'stop', stopwords=['c', 'd'])],
)
m = mapping.Mapping()
m.field('title', 'text', analyzer=a1, search_analyzer=a2)
m.field(
'text', 'text', analyzer=a1,
fields={
'english': Text(analyzer=a1),
'unknown': Keyword(analyzer=a1, search_analyzer=a2),
}
)
assert {
'analyzer': {
'my_analyzer1': {'filter': ['lowercase', 'my_filter1'],
'tokenizer': 'keyword',
'type': 'custom'},
'my_analyzer2': {'filter': ['my_filter2'],
'tokenizer': 'trigram',
'type': 'custom'}},
'filter': {
'my_filter1': {'stopwords': ['a', 'b'], 'type': 'stop'},
'my_filter2': {'stopwords': ['c', 'd'], 'type': 'stop'}},
'tokenizer': {'trigram': {'max_gram': 3, 'min_gram': 3, 'type': 'nGram'}}
} == m._collect_analysis()
示例8: test_mapping_can_collect_all_analyzers_and_normalizers
# 需要導入模塊: import elasticsearch_dsl [as 別名]
# 或者: from elasticsearch_dsl import Text [as 別名]
def test_mapping_can_collect_all_analyzers_and_normalizers():
a1 = analysis.analyzer('my_analyzer1',
tokenizer='keyword',
filter=['lowercase', analysis.token_filter('my_filter1', 'stop', stopwords=['a', 'b'])],
)
a2 = analysis.analyzer('english')
a3 = analysis.analyzer('unknown_custom')
a4 = analysis.analyzer('my_analyzer2',
tokenizer=analysis.tokenizer('trigram', 'nGram', min_gram=3, max_gram=3),
filter=[analysis.token_filter('my_filter2', 'stop', stopwords=['c', 'd'])],
)
a5 = analysis.analyzer('my_analyzer3', tokenizer='keyword')
n1 = analysis.normalizer('my_normalizer1',
filter=['lowercase']
)
n2 = analysis.normalizer('my_normalizer2',
filter=['my_filter1', 'my_filter2', analysis.token_filter('my_filter3', 'stop', stopwords=['e', 'f'])]
)
n3 = analysis.normalizer('unknown_custom')
m = mapping.Mapping()
m.field('title', 'text', analyzer=a1,
fields={
'english': Text(analyzer=a2),
'unknown': Keyword(search_analyzer=a3),
}
)
m.field('comments', Nested(properties={
'author': Text(analyzer=a4)
}))
m.field('normalized_title', 'keyword', normalizer=n1)
m.field('normalized_comment', 'keyword', normalizer=n2)
m.field('unknown', 'keyword', normalizer=n3)
m.meta('_all', analyzer=a5)
assert {
'analyzer': {
'my_analyzer1': {'filter': ['lowercase', 'my_filter1'], 'tokenizer': 'keyword', 'type': 'custom'},
'my_analyzer2': {'filter': ['my_filter2'], 'tokenizer': 'trigram', 'type': 'custom'},
'my_analyzer3': {'tokenizer': 'keyword', 'type': 'custom'},
},
'normalizer': {
'my_normalizer1': {'filter': ['lowercase'], 'type': 'custom'},
'my_normalizer2': {'filter': ['my_filter1', 'my_filter2', 'my_filter3'], 'type': 'custom'},
},
'filter': {
'my_filter1': {'stopwords': ['a', 'b'], 'type': 'stop'},
'my_filter2': {'stopwords': ['c', 'd'], 'type': 'stop'},
'my_filter3': {'stopwords': ['e', 'f'], 'type': 'stop'},
},
'tokenizer': {
'trigram': {'max_gram': 3, 'min_gram': 3, 'type': 'nGram'},
}
} == m._collect_analysis()
assert json.loads(json.dumps(m.to_dict())) == m.to_dict()