本文整理汇总了Python中elasticsearch_dsl.connections.connections.get_connection方法的典型用法代码示例。如果您正苦于以下问题:Python connections.get_connection方法的具体用法?Python connections.get_connection怎么用?Python connections.get_connection使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类elasticsearch_dsl.connections.connections
的用法示例。
在下文中一共展示了connections.get_connection方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: push
# 需要导入模块: from elasticsearch_dsl.connections import connections [as 别名]
# 或者: from elasticsearch_dsl.connections.connections import get_connection [as 别名]
def push(self):
"""Push built documents to ElasticSearch."""
self._refresh_connection()
self.create_mapping()
if not self.push_queue:
logger.debug("No documents to push, skipping push.")
return
logger.debug(
"Found %s documents to push to Elasticsearch.", len(self.push_queue)
)
bulk(
connections.get_connection(),
(doc.to_dict(True) for doc in self.push_queue),
refresh=True,
)
self.push_queue = []
logger.debug("Finished pushing builded documents to Elasticsearch server.")
示例2: clear
# 需要导入模块: from elasticsearch_dsl.connections import connections [as 别名]
# 或者: from elasticsearch_dsl.connections.connections import get_connection [as 别名]
def clear(cls, index=None, using=None):
"""
Deletes the Elasticsearch mapping associated with this document type.
"""
using = using or cls._doc_type.using or 'default'
index = index or cls._doc_type.index or getattr(settings, 'SEEKER_INDEX', 'seeker')
es = connections.get_connection(using)
if es.indices.exists_type(index=index, doc_type=cls._doc_type.name):
def get_actions():
for hit in scan(es, index=index, doc_type=cls._doc_type.name, query={'query': {'match_all': {}}}):
yield {
'_op_type': 'delete',
'_index': index,
'_type': cls._doc_type.name,
'_id': hit['_id'],
}
bulk(es, get_actions())
es.indices.refresh(index=index)
示例3: index
# 需要导入模块: from elasticsearch_dsl.connections import connections [as 别名]
# 或者: from elasticsearch_dsl.connections.connections import get_connection [as 别名]
def index(obj, index=None, using=None):
"""
Shortcut to index a Django object based on it's model class.
"""
from django.contrib.contenttypes.models import ContentType
model_class = ContentType.objects.get_for_model(obj).model_class()
for doc_class in model_documents.get(model_class, []):
if not doc_class.queryset().filter(pk=obj.pk).exists():
continue
doc_using = using or doc_class._doc_type.using or 'default'
doc_index = index or doc_class._doc_type.index or getattr(settings, 'SEEKER_INDEX', 'seeker')
es = connections.get_connection(doc_using)
body = doc_class.serialize(obj)
doc_id = body.pop('_id', None)
es.index(
index=doc_index,
doc_type=doc_class._doc_type.name,
body=body,
id=doc_id,
refresh=True
)
示例4: delete
# 需要导入模块: from elasticsearch_dsl.connections import connections [as 别名]
# 或者: from elasticsearch_dsl.connections.connections import get_connection [as 别名]
def delete(obj, index=None, using=None):
"""
Shortcut to delete a Django object from the ES index based on it's model class.
"""
from django.contrib.contenttypes.models import ContentType
model_class = ContentType.objects.get_for_model(obj).model_class()
for doc_class in model_documents.get(model_class, []):
doc_using = using or doc_class._doc_type.using or 'default'
doc_index = index or doc_class._doc_type.index or getattr(settings, 'SEEKER_INDEX', 'seeker')
es = connections.get_connection(doc_using)
try:
es.delete(
index=doc_index,
doc_type=doc_class._doc_type.name,
id=doc_class.get_id(obj),
refresh=True
)
except NotFoundError:
# If this object wasn't indexed for some reason (maybe not in the document's queryset), no big deal.
pass
示例5: reindex
# 需要导入模块: from elasticsearch_dsl.connections import connections [as 别名]
# 或者: from elasticsearch_dsl.connections.connections import get_connection [as 别名]
def reindex(doc_class, index, using, options):
"""
Index all the things, using ElasticSearch's bulk API for speed.
"""
def get_actions():
for doc in doc_class.documents(cursor=options['cursor']):
action = {
'_index': index,
'_type': doc_class._doc_type.name,
}
action.update(doc)
yield action
es = connections.get_connection(using)
actions = get_actions() if options['quiet'] else progress(get_actions(), count=doc_class.count(), label=doc_class.__name__)
bulk(es, actions)
es.indices.refresh(index=index)
示例6: handle
# 需要导入模块: from elasticsearch_dsl.connections import connections [as 别名]
# 或者: from elasticsearch_dsl.connections.connections import get_connection [as 别名]
def handle(self, *args, **options):
if not options['filename']:
raise CommandError('Please specify a file (-f) to read data from')
refresh_indices = set()
def get_actions():
for data in json.load(open(options['filename'], 'rb')):
if options['index']:
data['_index'] = options['index']
refresh_indices.add(data['_index'])
yield data
es = connections.get_connection()
bulk(es, get_actions())
for index in refresh_indices:
es.indices.refresh(index=index)
示例7: flush
# 需要导入模块: from elasticsearch_dsl.connections import connections [as 别名]
# 或者: from elasticsearch_dsl.connections.connections import get_connection [as 别名]
def flush(request, domain):
# Should use the delete-by-query plugin
# http://blog.appliedinformaticsinc.com/how-to-delete-elasticsearch-data-records-by-dsl-query/ # NOQA
# Or the new API
# https://www.elastic.co/guide/en/elasticsearch/reference/5.1/docs-delete-by-query.html # NOQA
# Perhaps we can use
# connections.get_connection().delete_by_query ?!?!
assert domain
t0 = time.time()
search = TitleDoc.search()
search = search.filter('term', domain=domain.name)
ids = set()
for hit in search.scan():
ids.add(hit._id)
for _id in ids:
TitleDoc.get(id=_id).delete()
t1 = time.time()
return http.JsonResponse({
'messsage': 'OK',
'took': t1 - t0,
})
示例8: restore_tokens
# 需要导入模块: from elasticsearch_dsl.connections import connections [as 别名]
# 或者: from elasticsearch_dsl.connections.connections import get_connection [as 别名]
def restore_tokens():
connections.create_connection(hosts=ES_NODES)
Index(INDEX_NAME).delete()
class Token(DocType):
username = String()
token = String()
expires = Date()
read = Boolean()
write = Boolean()
revoked = Boolean()
acl = String()
groups = String()
admin = Boolean()
last_activity_at = Date()
class Meta:
index = INDEX_NAME
Token.init()
reindex_results = connections.get_connection().reindex(body={"source": {"index": BACKUP_INDEX_NAME}, "dest": {"index": INDEX_NAME}}, request_timeout=3600)
if reindex_results.get('created') + reindex_results.get('updated') == reindex_results.get('total'):
return ('Tokens restored to previous schema successfully!')
else:
return ('Tokens did not restore from backup properly')
示例9: store
# 需要导入模块: from elasticsearch_dsl.connections import connections [as 别名]
# 或者: from elasticsearch_dsl.connections.connections import get_connection [as 别名]
def store():
try:
connections.get_connection().indices.delete(index='indicators-*')
connections.get_connection().indices.delete(index='tokens')
except Exception as e:
pass
with Store(store_type='elasticsearch', nodes='127.0.0.1:9200') as s:
s._load_plugin(nodes='127.0.0.1:9200')
yield s
try:
assert connections.get_connection().indices.delete(index='indicators-*')
assert connections.get_connection().indices.delete(index='tokens')
except Exception:
pass
示例10: setUpClass
# 需要导入模块: from elasticsearch_dsl.connections import connections [as 别名]
# 或者: from elasticsearch_dsl.connections.connections import get_connection [as 别名]
def setUpClass(cls):
super(ViewTests, cls).setUpClass()
es_client = connections.get_connection()
if es_client.indices.exists(cls.index):
es_client.indices.delete(cls.index)
es_client.indices.create(cls.index, body=INDEX_CREATION_BODY)
cls.catalog_id = 'csv_dump_test_catalog'
path = os.path.join(samples_dir, 'distribution_daily_periodicity.json')
index_catalog(cls.catalog_id, path, cls.index)
cls.task = GenerateDumpTask()
cls.task.save()
gen = DumpGenerator(cls.task)
gen.generate()
DumpGenerator(cls.task, cls.catalog_id).generate()
示例11: __init__
# 需要导入模块: from elasticsearch_dsl.connections import connections [as 别名]
# 或者: from elasticsearch_dsl.connections.connections import get_connection [as 别名]
def __init__(self, node: Node, task: IndexMetadataTask, index: str):
self.node = node
self.task = task
self.index_name = index
self.elastic: Elasticsearch = connections.get_connection()
if not self.elastic.indices.exists(self.index_name):
init_index(self.index_name)
self.fields_meta = {}
self.init_fields_meta_cache()
try:
data_json = DataJson(node.catalog_url)
themes = data_json.get('themeTaxonomy', [])
self.themes = self.get_themes(themes)
except Exception:
raise ValueError("Error de lectura de los themes del catálogo")
示例12: destroy
# 需要导入模块: from elasticsearch_dsl.connections import connections [as 别名]
# 或者: from elasticsearch_dsl.connections.connections import get_connection [as 别名]
def destroy(self):
"""Destroy an index."""
self._refresh_connection()
self.push_queue = []
index_name = self.document_class()._get_index()
connections.get_connection().indices.delete(index_name, ignore=404)
self._mapping_created = False
示例13: setUp
# 需要导入模块: from elasticsearch_dsl.connections import connections [as 别名]
# 或者: from elasticsearch_dsl.connections.connections import get_connection [as 别名]
def setUp(self):
super(TestsWithData, self).setUp()
self.docs = [
self.TestDoc(title='doc-' + str(i))
for i in range(1000)
]
actions = [d.to_dict(include_meta=True) for d in self.docs]
inserted, errors = bulk(connections.get_connection(), actions=actions, refresh=True)
self.assertEqual(inserted, len(actions))
self.assertEqual(len(errors), 0)
示例14: get_logout_event
# 需要导入模块: from elasticsearch_dsl.connections import connections [as 别名]
# 或者: from elasticsearch_dsl.connections.connections import get_connection [as 别名]
def get_logout_event(index,logonid,timestamp,maxtstamp,screen):
"""
Look for the logoff event belonging to the given logon id or a shutdown event.
"""
conn = connections.get_connection()
# workaround to fix time presition issues
timestamp = timestamp - 999
logoff = get_dsl_logoff_query(screen)
q = [ \
Q('match',data_type='windows:evtx:record') , \
Q('match',xml_string=logonid) , \
logoff \
]
s = Search(using=conn, index=index).query(Q('bool',must=q)).filter('range',datetime={'gte':timestamp,'lte':maxtstamp}).sort('-datetime')
res = s.execute()
try:
evt = res[0]
except:
evt = None
if evt is None:
q = [ Q('match',event_identifier=config.EVENT_SHUTDOWN) ]
s = Search(using=conn, index=index).query(Q('bool',must=q)).filter('range',datetime={'gte':timestamp,'lte':maxtstamp}).sort('-datetime')
res = s.execute()
try:
evt = res[0]
except:
evt = None
return evt
示例15: get_last_shutdown
# 需要导入模块: from elasticsearch_dsl.connections import connections [as 别名]
# 或者: from elasticsearch_dsl.connections.connections import get_connection [as 别名]
def get_last_shutdown(index,maxtstamp,pattern):
"""
Look for the last shutdown event
"""
conn = connections.get_connection()
q = [ \
Q('match',data_type='windows:evtx:record') , \
Q('match',event_identifier=config.EVENT_SHUTDOWN)
]
if pattern:
q.append(Q('query_string',query=pattern,analyze_wildcard=True))
s = Search(using=conn, index=index).query(Q('bool',must=q)).filter('range',datetime={'lte':maxtstamp}).sort('-datetime')[0:0]
s.aggs.bucket('computer','terms',field='computer_name.keyword').bucket('shutdown','top_hits',size=1)
res = s.execute()
ret = {}
for item in res.aggregations['computer']['buckets']:
ret[item['key']] = item['shutdown']['hits']['hits'][0]
if len(ret.keys()) == 0:
ret = None
return ret