本文整理汇总了Python中elasticsearch_dsl.DocType方法的典型用法代码示例。如果您正苦于以下问题:Python elasticsearch_dsl.DocType方法的具体用法?Python elasticsearch_dsl.DocType怎么用?Python elasticsearch_dsl.DocType使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类elasticsearch_dsl
的用法示例。
在下文中一共展示了elasticsearch_dsl.DocType方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _find_field_template
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import DocType [as 别名]
def _find_field_template(self, field_name):
"""
finds and sets the default template instance for the given field name with the given template.
"""
search_templates = []
if field_name in self.field_templates:
search_templates.append(self.field_templates[field_name])
for _cls in inspect.getmro(self.document):
if issubclass(_cls, dsl.DocType):
search_templates.append('seeker/%s/%s.html' % (_cls._doc_type.name, field_name))
search_templates.append('seeker/column.html')
template = loader.select_template(search_templates)
existing_templates = list(set(self._field_templates.values()))
for existing_template in existing_templates:
#If the template object already exists just re-use the existing one.
if template.template.name == existing_template.template.name:
template = existing_template
break
self._field_templates.update({field_name: template})
return template
示例2: restore_tokens
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import DocType [as 别名]
def restore_tokens():
connections.create_connection(hosts=ES_NODES)
Index(INDEX_NAME).delete()
class Token(DocType):
username = String()
token = String()
expires = Date()
read = Boolean()
write = Boolean()
revoked = Boolean()
acl = String()
groups = String()
admin = Boolean()
last_activity_at = Date()
class Meta:
index = INDEX_NAME
Token.init()
reindex_results = connections.get_connection().reindex(body={"source": {"index": BACKUP_INDEX_NAME}, "dest": {"index": INDEX_NAME}}, request_timeout=3600)
if reindex_results.get('created') + reindex_results.get('updated') == reindex_results.get('total'):
return ('Tokens restored to previous schema successfully!')
else:
return ('Tokens did not restore from backup properly')
示例3: setup_collection
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import DocType [as 别名]
def setup_collection(self):
class WebLink(DocType):
url = Text()
html = Text()
headers = Text()
status = Integer()
created = Date()
class Meta:
index = self.database_name
doc_type = self.collection_name
return WebLink
示例4: setup_collection
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import DocType [as 别名]
def setup_collection(self):
class WebLinkExtracted(DocType):
url = Text()
body = Text()
headers = Text()
status = Integer()
created = Date()
class Meta:
index = self.database_name
doc_type = self.collection_name
return WebLinkExtracted
示例5: test_elasticsearch_origin
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import DocType [as 别名]
def test_elasticsearch_origin(sdc_builder, sdc_executor, elasticsearch):
"""Test for Elasticsearch origin stage. We do so by putting data via Elastisearch client and reading via
Elastisearch origin pipeline. To assert, we will snapshot the pipeline.
The pipeline looks like:
Elasticsearch origin pipeline:
es_origin >> trash
"""
es_index = get_random_string(string.ascii_letters, 10).lower() # Elasticsearch indexes must be lower case
es_doc_id = get_random_string(string.ascii_letters, 10)
raw_str = 'Hello World!'
builder = sdc_builder.get_pipeline_builder()
es_origin = builder.add_stage('Elasticsearch', type='origin')
es_origin.set_attributes(index=es_index, query="{'query': {'match_all': {}}}")
trash = builder.add_stage('Trash')
es_origin >> trash
es_origin_pipeline = builder.build(title='ES origin pipeline').configure_for_environment(elasticsearch)
sdc_executor.add_pipeline(es_origin_pipeline)
try:
# Put data to Elasticsearch
elasticsearch.connect()
doc_type = DocType(meta={'id': es_doc_id, 'index': es_index})
doc_type.body = raw_str
doc_type.save() # save document to Elasticsearch
index = Index(es_index)
assert index.refresh() # assert to refresh index, making all operations available for search
# Run pipeline and assert
snapshot = sdc_executor.capture_snapshot(es_origin_pipeline, start_pipeline=True).snapshot
# no need to stop pipeline - as ES origin shuts off once data is read from Elasticsearch
snapshot_data = snapshot[es_origin.instance_name].output[0].field
# assert ES meta
assert snapshot_data['_index'].value == es_index and snapshot_data['_id'].value == es_doc_id
# assert ES data
assert snapshot_data['_source']['body'].value == raw_str
finally:
# Clean up test data in ES
idx = Index(es_index)
idx.delete()
示例6: test_offset_upgrade
# 需要导入模块: import elasticsearch_dsl [as 别名]
# 或者: from elasticsearch_dsl import DocType [as 别名]
def test_offset_upgrade(sdc_builder, sdc_executor, elasticsearch):
"""Ensure that when upgrading from older offset format (that can be generated by either SCH or by upgrading
pre-multithreaded pipeline) we properly upgrade the offset and the pipeline will not re-read everything
from the source.
"""
es_index = get_random_string(string.ascii_letters, 10).lower()
es_doc_id = get_random_string(string.ascii_letters, 10)
raw_str = 'Hello World!'
builder = sdc_builder.get_pipeline_builder()
es_origin = builder.add_stage('Elasticsearch', type='origin')
es_origin.set_attributes(index=es_index, query="{'query': {'match_all': {}}}")
trash = builder.add_stage('Trash')
es_origin >> trash
pipeline = builder.build().configure_for_environment(elasticsearch)
sdc_executor.add_pipeline(pipeline)
# We hard code offset to be pre-migration to multi-threaded origin and thus forcing the origin to upgrade it
offset = {
'offsets': {
'$com.streamsets.datacollector.pollsource.offset$': None,
},
'version': 2
}
sdc_executor.api_client.update_pipeline_committed_offsets(pipeline.id, body=offset)
try:
# Put data to Elasticsearch
elasticsearch.connect()
doc_type = DocType(meta={'id': es_doc_id, 'index': es_index})
doc_type.body = raw_str
doc_type.save() # save document to Elasticsearch
index = Index(es_index)
assert index.refresh() # assert to refresh index, making all operations available for search
# Run pipeline and assert
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
# no need to stop pipeline - as ES origin shuts off once data is read from Elasticsearch
snapshot_data = snapshot[es_origin.instance_name].output[0].field
# assert ES meta
assert snapshot_data['_index'] == es_index and snapshot_data['_id'] == es_doc_id
# assert ES data
assert snapshot_data['_source']['body'] == raw_str
# Now let's validate that the offset doesn't have the poll key any more
offset = sdc_executor.api_client.get_pipeline_committed_offsets(pipeline.id).response.json()
assert offset is not None
assert '$com.streamsets.datacollector.pollsource.offset$' not in offset['offsets']
finally:
# Clean up test data in ES
idx = Index(es_index)
idx.delete()