本文整理汇总了Python中org.apache.lucene.index.IndexWriter.rollback方法的典型用法代码示例。如果您正苦于以下问题:Python IndexWriter.rollback方法的具体用法?Python IndexWriter.rollback怎么用?Python IndexWriter.rollback使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.index.IndexWriter
的用法示例。
在下文中一共展示了IndexWriter.rollback方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: rollback
# 需要导入模块: from org.apache.lucene.index import IndexWriter [as 别名]
# 或者: from org.apache.lucene.index.IndexWriter import rollback [as 别名]
def rollback(collection_name):
if collection_name!="DEFAULT":
INDEX_DIR=collection_name
else:
INDEX_DIR=INDEX_DIR_DEFAULT
direc=SimpleFSDirectory(File(INDEX_DIR))
analyzer=StandardAnalyzer(Version.LUCENE_CURRENT)
#setting writer configurations
config=IndexWriterConfig(Version.LUCENE_CURRENT,analyzer)
config.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND)
writer=IndexWriter(direc,config)
writer.rollback()
writer.close()
示例2: update
# 需要导入模块: from org.apache.lucene.index import IndexWriter [as 别名]
# 或者: from org.apache.lucene.index.IndexWriter import rollback [as 别名]
#.........这里部分代码省略.........
#add the newly modified document
doc=Document()
#index files wrt primary key
for primary_key in primary_keys_map:
try:
field=Field(primary_key,data[primary_key],Field.Store.NO,Field.Index.ANALYZED)
doc.add(field)
except:
# primary_keys_map.pop(collection_name)
return 101
#compress data using snappy if compression is on
if to_be_compressed_input==True:
data_string=snappy.compress(str(json.dumps(data)))
else:
data_string=json.dumps(data)
field=Field("$DATA$",data_string,Field.Store.YES,Field.Index.ANALYZED)
doc.add(field)
writer.addDocument(doc)
tofind_primary_keyvalue_pairs={}
tofind_nonprimary_keyvalue_pairs={}
#separating out primary and non_primary keys
for key in tofind_keyvalue_pairs.keys():
if key in primary_keys_map:
tofind_primary_keyvalue_pairs[key]=tofind_keyvalue_pairs[key]
else:
tofind_nonprimary_keyvalue_pairs[key]=tofind_keyvalue_pairs[key]
#filtering documents
if len(tofind_primary_keyvalue_pairs)>0:
query=BooleanQuery()
for key in tofind_primary_keyvalue_pairs.keys():
temp=QueryParser(Version.LUCENE_CURRENT,key,analyzer).parse(tofind_primary_keyvalue_pairs[key])
query.add(BooleanClause(temp,BooleanClause.Occur.MUST))
hits=searcher.search(query,MAX_RESULTS).scoreDocs
for hit in hits:
doc=searcher.doc(hit.doc)
if to_be_compressed_input==True:
data=snappy.uncompress(doc.get("$DATA$"))
else:
data=doc.get("$DATA$")
#non primary key filtering(without having to load all the primary key filtered values into main memory!)
if len(tofind_nonprimary_keyvalue_pairs)>0:
entry=json.loads(data)
satisfied=True
for key in tofind_nonprimary_keyvalue_pairs.keys():
if entry.get(key)!=tofind_nonprimary_keyvalue_pairs[key]:
satisfied=False
break
if satisfied==True:
if rewrite(data)!=106:
no_of_documents_modified+=1
else:
writer.rollback()
return 106
else:
if rewrite(data)!=106:
no_of_documents_modified+=1
else:
writer.rollback()
return 106
else:
for i in range(0,ireader.numDocs()):
doc=searcher.doc(i)
if to_be_compressed_input==True:
data=snappy.uncompress(doc.get("$DATA$"))
else:
data=doc.get("$DATA$")
#non primary key filtering(without having to load all the primary key filtered values into main memory!)
if len(tofind_nonprimary_keyvalue_pairs)>0:
entry=json.loads(data)
satisfied=True
for key in tofind_nonprimary_keyvalue_pairs.keys():
if entry.get(key)!=tofind_nonprimary_keyvalue_pairs[key]:
satisfied=False
break
if satisfied==True:
if rewrite(data)!=106:
no_of_documents_modified+=1
else:
writer.rollback()
return 106
else:
if rewrite(data)!=106:
no_of_documents_modified+=1
else:
writer.rollback()
return 106
ireader.close()
if commit==True:
writer.commit()
writer.close()
return str(no_of_documents_modified)+" have been modified"