当前位置: 首页>>代码示例>>Python>>正文


Python elasticsearch_dsl.Text方法代码示例

本文整理汇总了Python中elasticsearch_dsl.Text方法的典型用法代码示例。如果您正苦于以下问题:Python elasticsearch_dsl.Text方法的具体用法?Python elasticsearch_dsl.Text怎么用?Python elasticsearch_dsl.Text使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在elasticsearch_dsl的用法示例。


在下文中一共展示了elasticsearch_dsl.Text方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_doctype

# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Text [as 别名]
def create_doctype(index_name, similarity):
    if similarity == 'default':
        wiki_content_field = Text()
        qb_content_field = Text()
    else:
        wiki_content_field = Text(similarity=similarity)
        qb_content_field = Text(similarity=similarity)

    class Answer(Document):
        page = Text(fields={'raw': Keyword()})
        wiki_content = wiki_content_field
        qb_content = qb_content_field

        class Meta:
            index = index_name

    return Answer 
开发者ID:Pinafore,项目名称:qb,代码行数:19,代码来源:elasticsearch.py

示例2: test_multiple_doc_types_will_combine_mappings

# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Text [as 别名]
def test_multiple_doc_types_will_combine_mappings():
    class User(Document):
        username = Text()

    i = Index('i')
    i.document(Post)
    i.document(User)
    assert {
        'mappings': {
            'properties': {
                'title': {'type': 'text'},
                'username': {'type': 'text'},
                'published_from': {'type': 'date'}
            }
        }
    } == i.to_dict() 
开发者ID:elastic,项目名称:elasticsearch-dsl-py,代码行数:18,代码来源:test_index.py

示例3: _query

# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Text [as 别名]
def _query(self, quantity, o, value, nested=None):
        field = self._field(quantity, nested=nested)
        if o in _cmp_operators:
            return Q("range", **{field: {_cmp_operators[o]: value}})

        if quantity.elastic_mapping_type == Text:
            query_type = "match"
        elif quantity.elastic_mapping_type in [Keyword, Integer]:
            query_type = "term"
        else:
            raise NotImplementedError("Quantity has unsupported ES field type")

        if o in ["=", ""]:
            return Q(query_type, **{field: value})

        if o == "!=":
            return ~Q(
                query_type, **{field: value}
            )  # pylint: disable=invalid-unary-operand-type

        raise Exception("Unknown operator %s" % o) 
开发者ID:Materials-Consortia,项目名称:optimade-python-tools,代码行数:23,代码来源:elasticsearch.py

示例4: setup_collection

# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Text [as 别名]
def setup_collection(self):
        class WebLink(DocType):
            url = Text()
            html = Text()
            headers = Text()
            status = Integer()
            created = Date()

            class Meta:
                index = self.database_name
                doc_type = self.collection_name

        return WebLink 
开发者ID:invanalabs,项目名称:invana-bot,代码行数:15,代码来源:elasticsearch.py

示例5: setup_collection

# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Text [as 别名]
def setup_collection(self):
        class WebLinkExtracted(DocType):
            url = Text()
            body = Text()
            headers = Text()
            status = Integer()
            created = Date()

            class Meta:
                index = self.database_name
                doc_type = self.collection_name

        return WebLinkExtracted 
开发者ID:invanalabs,项目名称:invana-bot,代码行数:15,代码来源:elasticsearch.py

示例6: test_properties_can_iterate_over_all_the_fields

# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Text [as 别名]
def test_properties_can_iterate_over_all_the_fields():
    m = mapping.Mapping()
    m.field('f1', 'text', test_attr='f1', fields={'f2': Keyword(test_attr='f2')})
    m.field('f3', Nested(test_attr='f3', properties={
            'f4': Text(test_attr='f4')}))

    assert {'f1', 'f2', 'f3', 'f4'} == {f.test_attr for f in m.properties._collect_fields()} 
开发者ID:elastic,项目名称:elasticsearch-dsl-py,代码行数:9,代码来源:test_mapping.py

示例7: test_mapping_can_collect_multiple_analyzers

# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Text [as 别名]
def test_mapping_can_collect_multiple_analyzers():
    a1 = analysis.analyzer(
        'my_analyzer1',
        tokenizer='keyword',
        filter=['lowercase', analysis.token_filter('my_filter1', 'stop', stopwords=['a', 'b'])],
    )
    a2 = analysis.analyzer(
        'my_analyzer2',
        tokenizer=analysis.tokenizer('trigram', 'nGram', min_gram=3, max_gram=3),
        filter=[analysis.token_filter('my_filter2', 'stop', stopwords=['c', 'd'])],
    )
    m = mapping.Mapping()
    m.field('title', 'text', analyzer=a1, search_analyzer=a2)
    m.field(
        'text', 'text', analyzer=a1,
        fields={
            'english': Text(analyzer=a1),
            'unknown': Keyword(analyzer=a1, search_analyzer=a2),
        }
    )
    assert {
       'analyzer': {
           'my_analyzer1': {'filter': ['lowercase', 'my_filter1'],
                            'tokenizer': 'keyword',
                            'type': 'custom'},
           'my_analyzer2': {'filter': ['my_filter2'],
                            'tokenizer': 'trigram',
                            'type': 'custom'}},
       'filter': {
           'my_filter1': {'stopwords': ['a', 'b'], 'type': 'stop'},
           'my_filter2': {'stopwords': ['c', 'd'], 'type': 'stop'}},
       'tokenizer': {'trigram': {'max_gram': 3, 'min_gram': 3, 'type': 'nGram'}}
    } == m._collect_analysis() 
开发者ID:elastic,项目名称:elasticsearch-dsl-py,代码行数:35,代码来源:test_mapping.py

示例8: test_mapping_can_collect_all_analyzers_and_normalizers

# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import Text [as 别名]
def test_mapping_can_collect_all_analyzers_and_normalizers():
    a1 = analysis.analyzer('my_analyzer1',
        tokenizer='keyword',
        filter=['lowercase', analysis.token_filter('my_filter1', 'stop', stopwords=['a', 'b'])],
    )
    a2 = analysis.analyzer('english')
    a3 = analysis.analyzer('unknown_custom')
    a4 = analysis.analyzer('my_analyzer2',
        tokenizer=analysis.tokenizer('trigram', 'nGram', min_gram=3, max_gram=3),
        filter=[analysis.token_filter('my_filter2', 'stop', stopwords=['c', 'd'])],
    )
    a5 = analysis.analyzer('my_analyzer3', tokenizer='keyword')
    n1 = analysis.normalizer('my_normalizer1',
        filter=['lowercase']
    )
    n2 = analysis.normalizer('my_normalizer2',
        filter=['my_filter1', 'my_filter2', analysis.token_filter('my_filter3', 'stop', stopwords=['e', 'f'])]
    )
    n3 = analysis.normalizer('unknown_custom')

    m = mapping.Mapping()
    m.field('title', 'text', analyzer=a1,
        fields={
            'english': Text(analyzer=a2),
            'unknown': Keyword(search_analyzer=a3),
        }
    )
    m.field('comments', Nested(properties={
        'author': Text(analyzer=a4)
    }))
    m.field('normalized_title', 'keyword', normalizer=n1)
    m.field('normalized_comment', 'keyword', normalizer=n2)
    m.field('unknown', 'keyword', normalizer=n3)
    m.meta('_all', analyzer=a5)

    assert {
        'analyzer': {
            'my_analyzer1': {'filter': ['lowercase', 'my_filter1'], 'tokenizer': 'keyword', 'type': 'custom'},
            'my_analyzer2': {'filter': ['my_filter2'], 'tokenizer': 'trigram', 'type': 'custom'},
            'my_analyzer3': {'tokenizer': 'keyword', 'type': 'custom'},
        },
        'normalizer': {
            'my_normalizer1': {'filter': ['lowercase'], 'type': 'custom'},
            'my_normalizer2': {'filter': ['my_filter1', 'my_filter2', 'my_filter3'], 'type': 'custom'},
        },
        'filter': {
            'my_filter1': {'stopwords': ['a', 'b'], 'type': 'stop'},
            'my_filter2': {'stopwords': ['c', 'd'], 'type': 'stop'},
            'my_filter3': {'stopwords': ['e', 'f'], 'type': 'stop'},
        },
        'tokenizer': {
            'trigram': {'max_gram': 3, 'min_gram': 3, 'type': 'nGram'},
        }
    } == m._collect_analysis()

    assert json.loads(json.dumps(m.to_dict())) == m.to_dict() 
开发者ID:elastic,项目名称:elasticsearch-dsl-py,代码行数:58,代码来源:test_mapping.py


注:本文中的elasticsearch_dsl.Text方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。