本文整理汇总了Python中arches.app.search.elasticsearch_dsl_builder.Query.delete方法的典型用法代码示例。如果您正苦于以下问题:Python Query.delete方法的具体用法?Python Query.delete怎么用?Python Query.delete使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类arches.app.search.elasticsearch_dsl_builder.Query
的用法示例。
在下文中一共展示了Query.delete方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: delete_index
# 需要导入模块: from arches.app.search.elasticsearch_dsl_builder import Query [as 别名]
# 或者: from arches.app.search.elasticsearch_dsl_builder.Query import delete [as 别名]
def delete_index(self):
se = SearchEngineFactory().create()
query = Query(se, start=0, limit=10000)
phrase = Match(field='conceptid', query=self.conceptid, type='phrase')
query.add_query(phrase)
query.delete(index='concept_labels')
se.delete_terms(self.id)
示例2: reverse_func
# 需要导入模块: from arches.app.search.elasticsearch_dsl_builder import Query [as 别名]
# 或者: from arches.app.search.elasticsearch_dsl_builder.Query import delete [as 别名]
def reverse_func(apps, schema_editor):
extensions = [os.path.join(settings.ONTOLOGY_PATH, x) for x in settings.ONTOLOGY_EXT]
management.call_command('load_ontology', source=os.path.join(settings.ONTOLOGY_PATH, settings.ONTOLOGY_BASE),
version=settings.ONTOLOGY_BASE_VERSION, ontology_name=settings.ONTOLOGY_BASE_NAME, id=settings.ONTOLOGY_BASE_ID, extensions=','.join(extensions), verbosity=0)
Node = apps.get_model("models", "Node")
Edge = apps.get_model("models", "Edge")
for node in Node.objects.all():
node.ontologyclass = str(node.ontologyclass).split('/')[-1]
node.save()
for edge in Edge.objects.all():
edge.ontologyproperty = str(edge.ontologyproperty).split('/')[-1]
edge.save()
# remove index for base Arches concept
se = SearchEngineFactory().create()
query = Query(se, start=0, limit=10000)
query.add_query(Term(field='conceptid', term='00000000-0000-0000-0000-000000000001'))
query.delete(index='strings', doc_type='concept')
try:
DValueType = apps.get_model("models", "DValueType")
DValueType.objects.get(valuetype='identifier').delete()
except:
pass
示例3: test_bulk_delete
# 需要导入模块: from arches.app.search.elasticsearch_dsl_builder import Query [as 别名]
# 或者: from arches.app.search.elasticsearch_dsl_builder.Query import delete [as 别名]
def test_bulk_delete(self):
"""
Test bulk deleting of documents in Elasticsearch
"""
se = SearchEngineFactory().create()
# se.create_index(index='test')
for i in range(10):
x = {
'id': i,
'type': 'prefLabel',
'value': 'test pref label',
}
se.index_data(index='test', doc_type='test', body=x, idfield='id', refresh=True)
y = {
'id': i + 100,
'type': 'altLabel',
'value': 'test alt label',
}
se.index_data(index='test', doc_type='test', body=y, idfield='id', refresh=True)
query = Query(se, start=0, limit=100)
match = Match(field='type', query='altLabel')
query.add_query(match)
query.delete(index='test', refresh=True)
self.assertEqual(se.es.count(index='test', doc_type='test')['count'], 10)
示例4: delete_concept_values_index
# 需要导入模块: from arches.app.search.elasticsearch_dsl_builder import Query [as 别名]
# 或者: from arches.app.search.elasticsearch_dsl_builder.Query import delete [as 别名]
def delete_concept_values_index(concepts_to_delete):
se = SearchEngineFactory().create()
for concept in concepts_to_delete.itervalues():
query = Query(se, start=0, limit=10000)
term = Term(field='conceptid', term=concept.id)
query.add_query(term)
query.delete(index='strings', doc_type='concept')
示例5: index_resources
# 需要导入模块: from arches.app.search.elasticsearch_dsl_builder import Query [as 别名]
# 或者: from arches.app.search.elasticsearch_dsl_builder.Query import delete [as 别名]
def index_resources(clear_index=True, batch_size=settings.BULK_IMPORT_BATCH_SIZE):
"""
Indexes all resources from the database
Keyword Arguments:
clear_index -- set to True to remove all the resources from the index before the reindexing operation
batch_size -- the number of records to index as a group, the larger the number to more memory required
"""
se = SearchEngineFactory().create()
if clear_index:
q = Query(se=se)
q.delete(index='strings', doc_type='term')
resource_types = models.GraphModel.objects.filter(isresource=True).exclude(graphid=settings.SYSTEM_SETTINGS_RESOURCE_MODEL_ID).values_list('graphid', flat=True)
index_resources_by_type(resource_types, clear_index=clear_index, batch_size=batch_size)
示例6: index_resources_by_type
# 需要导入模块: from arches.app.search.elasticsearch_dsl_builder import Query [as 别名]
# 或者: from arches.app.search.elasticsearch_dsl_builder.Query import delete [as 别名]
def index_resources_by_type(resource_types, clear_index=True, batch_size=settings.BULK_IMPORT_BATCH_SIZE):
"""
Indexes all resources of a given type(s)
Arguments:
resource_types -- array of graph ids that represent resource types
Keyword Arguments:
clear_index -- set to True to remove all the resources of the types passed in from the index before the reindexing operation
batch_size -- the number of records to index as a group, the larger the number to more memory required
"""
se = SearchEngineFactory().create()
datatype_factory = DataTypeFactory()
node_datatypes = {str(nodeid): datatype for nodeid, datatype in models.Node.objects.values_list('nodeid', 'datatype')}
for resource_type in resource_types:
start = datetime.now()
resources = Resource.objects.filter(graph_id=str(resource_type))
graph_name = models.GraphModel.objects.get(graphid=str(resource_type)).name
print "Indexing resource type '{0}'".format(graph_name)
result_summary = {'database':len(resources), 'indexed':0}
if clear_index:
q = Query(se=se)
q.delete(index='resource', doc_type=str(resource_type))
with se.BulkIndexer(batch_size=batch_size, refresh=True) as doc_indexer:
with se.BulkIndexer(batch_size=batch_size, refresh=True) as term_indexer:
for resource in resources:
document, terms = resource.get_documents_to_index(fetchTiles=True, datatype_factory=datatype_factory, node_datatypes=node_datatypes)
doc_indexer.add(index='resource', doc_type=document['graph_id'], id=document['resourceinstanceid'], data=document)
for term in terms:
term_indexer.add(index='strings', doc_type='term', id=term['_id'], data=term['_source'])
result_summary['indexed'] = se.count(index='resource', doc_type=str(resource_type))
status = 'Passed' if result_summary['database'] == result_summary['indexed'] else 'Failed'
print "Status: {0}, Resource Type: {1}, In Database: {2}, Indexed: {3}, Took: {4} seconds".format(status, graph_name, result_summary['database'], result_summary['indexed'], (datetime.now()-start).seconds)
示例7: index_resource_relations
# 需要导入模块: from arches.app.search.elasticsearch_dsl_builder import Query [as 别名]
# 或者: from arches.app.search.elasticsearch_dsl_builder.Query import delete [as 别名]
def index_resource_relations(clear_index=True, batch_size=settings.BULK_IMPORT_BATCH_SIZE):
"""
Indexes all resource to resource relation records
Keyword Arguments:
clear_index -- set to True to remove all the resources from the index before the reindexing operation
batch_size -- the number of records to index as a group, the larger the number to more memory required
"""
start = datetime.now()
print "Indexing resource to resource relations"
cursor = connection.cursor()
se = SearchEngineFactory().create()
if clear_index:
q = Query(se=se)
q.delete(index='resource_relations')
with se.BulkIndexer(batch_size=batch_size, refresh=True) as resource_relations_indexer:
sql = """
SELECT resourcexid, resourceinstanceidfrom, notes, relationshiptype, resourceinstanceidto
FROM public.resource_x_resource;
"""
cursor.execute(sql)
for resource_relation in cursor.fetchall():
doc = {
'resourcexid': resource_relation[0],
'resourceinstanceidfrom': resource_relation[1],
'notes': resource_relation[2],
'relationshiptype': resource_relation[3],
'resourceinstanceidto': resource_relation[4]
}
resource_relations_indexer.add(index='resource_relations', doc_type='all', id=doc['resourcexid'], data=doc)
index_count = se.count(index='resource_relations')
print "Status: {0}, In Database: {1}, Indexed: {2}, Took: {3} seconds".format('Passed' if cursor.rowcount == index_count else 'Failed', cursor.rowcount, index_count, (datetime.now()-start).seconds)
示例8: clear_resources
# 需要导入模块: from arches.app.search.elasticsearch_dsl_builder import Query [as 别名]
# 或者: from arches.app.search.elasticsearch_dsl_builder.Query import delete [as 别名]
def clear_resources():
"""Removes all resource instances from your db and elasticsearch resource index"""
se = SearchEngineFactory().create()
match_all_query = Query(se)
match_all_query.delete(index='strings', doc_type='term')
match_all_query.delete(index='resource')
match_all_query.delete(index='resource_relations')
print 'deleting', Resource.objects.count(), 'resources'
cursor = connection.cursor()
cursor.execute("TRUNCATE public.resource_instances CASCADE;" )
print Resource.objects.count(), 'resources remaining'
print 'deleting', models.ResourceXResource.objects.count(), 'resource relationships'
cursor.execute("TRUNCATE public.resource_x_resource CASCADE;" )
print models.ResourceXResource.objects.count(), 'resource relationships remaining'
示例9: delete_index
# 需要导入模块: from arches.app.search.elasticsearch_dsl_builder import Query [as 别名]
# 或者: from arches.app.search.elasticsearch_dsl_builder.Query import delete [as 别名]
def delete_index(self):
se = SearchEngineFactory().create()
query = Query(se, start=0, limit=10000)
term = Term(field='id', term=self.id)
query.add_query(term)
query.delete(index='strings', doc_type='concept')
示例10: index_concepts
# 需要导入模块: from arches.app.search.elasticsearch_dsl_builder import Query [as 别名]
# 或者: from arches.app.search.elasticsearch_dsl_builder.Query import delete [as 别名]
def index_concepts(clear_index=True, batch_size=settings.BULK_IMPORT_BATCH_SIZE):
"""
Indxes all concepts from the database
Keyword Arguments:
clear_index -- set to True to remove all the concepts from the index before the reindexing operation
batch_size -- the number of records to index as a group, the larger the number to more memory required
"""
start = datetime.now()
print "Indexing concepts"
cursor = connection.cursor()
se = SearchEngineFactory().create()
if clear_index:
q = Query(se=se)
q.delete(index='strings', doc_type='concept')
with se.BulkIndexer(batch_size=batch_size, refresh=True) as concept_indexer:
concept_strings = []
for conceptValue in models.Value.objects.filter(Q(concept__nodetype='Collection') | Q(concept__nodetype='ConceptScheme'), valuetype__category ='label'):
doc = {
'category': 'label',
'conceptid': conceptValue.concept_id,
'language': conceptValue.language_id,
'value': conceptValue.value,
'type': conceptValue.valuetype_id,
'id': conceptValue.valueid,
'top_concept': conceptValue.concept_id
}
concept_indexer.add(index='strings', doc_type='concept', id=doc['id'], data=doc)
valueTypes = []
valueTypes2=[]
for valuetype in models.DValueType.objects.filter(category='label').values_list('valuetype', flat=True):
valueTypes2.append("%s" % valuetype)
valueTypes.append("'%s'" % valuetype)
valueTypes = ",".join(valueTypes)
for conceptValue in models.Relation.objects.filter(relationtype='hasTopConcept'):
topConcept = conceptValue.conceptto_id
sql = """
WITH RECURSIVE children_inclusive AS (
SELECT d.conceptidfrom, d.conceptidto, c.*, 1 AS depth ---|NonRecursive Part
FROM relations d
JOIN values c ON(c.conceptid = d.conceptidto)
JOIN values c2 ON(c2.conceptid = d.conceptidfrom)
WHERE d.conceptidto = '{0}'
and c2.valuetype = 'prefLabel'
and c.valuetype in ({1})
and (d.relationtype = 'narrower' or d.relationtype = 'hasTopConcept')
UNION
SELECT d.conceptidfrom, d.conceptidto, v.*, depth+1 ---|RecursivePart
FROM relations d
JOIN children_inclusive b ON(b.conceptidto = d.conceptidfrom)
JOIN values v ON(v.conceptid = d.conceptidto)
JOIN values v2 ON(v2.conceptid = d.conceptidfrom)
WHERE v2.valuetype = 'prefLabel'
and v.valuetype in ({1})
and (d.relationtype = 'narrower' or d.relationtype = 'hasTopConcept')
) SELECT valueid, value, conceptid, languageid, valuetype FROM children_inclusive ORDER BY depth;
""".format(topConcept, valueTypes)
cursor.execute(sql)
for conceptValue in cursor.fetchall():
doc = {
'category': 'label',
'conceptid': conceptValue[2],
'language': conceptValue[3],
'value': conceptValue[1],
'type': conceptValue[4],
'id': conceptValue[0],
'top_concept': topConcept
}
concept_indexer.add(index='strings', doc_type='concept', id=doc['id'], data=doc)
cursor.execute("SELECT count(*) from values WHERE valuetype in ({0})".format(valueTypes))
concept_count_in_db = cursor.fetchone()[0]
index_count = se.count(index='strings', doc_type='concept')
print "Status: {0}, In Database: {1}, Indexed: {2}, Took: {3} seconds".format('Passed' if concept_count_in_db == index_count else 'Failed', concept_count_in_db, index_count, (datetime.now()-start).seconds)