本文整理汇总了Java中org.apache.lucene.index.IndexWriter.updateDocument方法的典型用法代码示例。如果您正苦于以下问题:Java IndexWriter.updateDocument方法的具体用法?Java IndexWriter.updateDocument怎么用?Java IndexWriter.updateDocument使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.index.IndexWriter
的用法示例。
在下文中一共展示了IndexWriter.updateDocument方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: indexDoc
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
static void indexDoc(IndexWriter writer, Path file, long lastModified) throws IOException {
try (InputStream stream = Files.newInputStream(file)) {
Document doc = new Document();
Field pathField = new StringField("path", file.toString(), Field.Store.YES);
doc.add(pathField);
doc.add(new LongPoint("modified", lastModified));
doc.add(new TextField("contents", new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))));
if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
System.out.println("adding " + file);
writer.addDocument(doc);
} else {
System.out.println("updating " + file);
writer.updateDocument(new Term("path", file.toString()), doc);
}
}
}
示例2: update
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
private static void update(final Term uid, final List<ParseContext.Document> docs, final IndexWriter indexWriter) throws IOException {
if (docs.size() > 1) {
indexWriter.updateDocuments(uid, docs);
} else {
indexWriter.updateDocument(uid, docs.get(0));
}
}
示例3: writeEmptyTermVector
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
private void writeEmptyTermVector(TermVectorsResponse outResponse) throws IOException {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());
conf.setOpenMode(OpenMode.CREATE);
IndexWriter writer = new IndexWriter(dir, conf);
FieldType type = new FieldType(TextField.TYPE_STORED);
type.setStoreTermVectorOffsets(true);
type.setStoreTermVectorPayloads(false);
type.setStoreTermVectorPositions(true);
type.setStoreTermVectors(true);
type.freeze();
Document d = new Document();
d.add(new Field("id", "abc", StringField.TYPE_STORED));
writer.updateDocument(new Term("id", "abc"), d);
writer.commit();
writer.close();
DirectoryReader dr = DirectoryReader.open(dir);
IndexSearcher s = new IndexSearcher(dr);
TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
ScoreDoc[] scoreDocs = search.scoreDocs;
int doc = scoreDocs[0].doc;
Fields fields = dr.getTermVectors(doc);
EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
outResponse.setFields(fields, null, flags, fields);
outResponse.setExists(true);
dr.close();
dir.close();
}
示例4: writeStandardTermVector
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
private void writeStandardTermVector(TermVectorsResponse outResponse) throws IOException {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());
conf.setOpenMode(OpenMode.CREATE);
IndexWriter writer = new IndexWriter(dir, conf);
FieldType type = new FieldType(TextField.TYPE_STORED);
type.setStoreTermVectorOffsets(true);
type.setStoreTermVectorPayloads(false);
type.setStoreTermVectorPositions(true);
type.setStoreTermVectors(true);
type.freeze();
Document d = new Document();
d.add(new Field("id", "abc", StringField.TYPE_STORED));
d.add(new Field("title", "the1 quick brown fox jumps over the1 lazy dog", type));
d.add(new Field("desc", "the1 quick brown fox jumps over the1 lazy dog", type));
writer.updateDocument(new Term("id", "abc"), d);
writer.commit();
writer.close();
DirectoryReader dr = DirectoryReader.open(dir);
IndexSearcher s = new IndexSearcher(dr);
TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
ScoreDoc[] scoreDocs = search.scoreDocs;
int doc = scoreDocs[0].doc;
Fields termVectors = dr.getTermVectors(doc);
EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
outResponse.setFields(termVectors, null, flags, termVectors);
dr.close();
dir.close();
}
示例5: indexDoc
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
private static void indexDoc(IndexWriter writer, FileBean t) throws Exception {
Document doc = new Document();
if (t.getContent() != null) {
doc.add(new TextField(LuceneConstants.PATH, t.getFilepath(), Field.Store.YES));
doc.add(new StringField(LuceneConstants.MODIFIED, UtilsTool.getDateStrByLastModified(t.getLastModified()), Field.Store.YES));
doc.add(new TextField(LuceneConstants.CONTENT, t.getContent(), CommonConstants.IS_OPEN_CONTEXT ? Field.Store.YES : Field.Store.NO));
// System.out.println("added to document:" + t.getFilepath());
if (writer.getConfig().getOpenMode() == IndexWriterConfig.OpenMode.CREATE){
writer.addDocument(doc);
} else{
writer.updateDocument(new Term(LuceneConstants.PATH, t.getFilepath()), doc);
}
}
}
示例6: indexDoc
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
static void indexDoc(IndexWriter writer, Path file, long lastModified) throws IOException {
try (InputStream stream = Files.newInputStream(file)) {
Document doc = new Document();
Field pathField = new StringField("path", file.toString(), Field.Store.YES);
doc.add(pathField);
doc.add(new LongPoint("modified", lastModified));
doc.add(new TextField("contents", new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))));
if (writer.getConfig().getOpenMode() == IndexWriterConfig.OpenMode.CREATE) {
System.out.println("adding " + file);
writer.addDocument(doc);
System.out.println("updating " + file);
writer.updateDocument(new Term("path", file.toString()), doc);
}
}
}
示例7: updateIndex
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
/**
* 更新博客索引
*
* @param user
* @throws Exception
*/
public void updateIndex(UUser user) throws Exception {
IndexWriter writer = getWriter();
Document doc = new Document();
doc.add(new StringField("userid", String.valueOf(user.getId()), Field.Store.YES));
doc.add(new TextField("username", user.getUsername(), Field.Store.YES));
doc.add(new TextField("description", user.getDescription(), Field.Store.YES));
writer.updateDocument(new Term("userid", String.valueOf(user.getId())), doc);
writer.close();
}
示例8: testClearAllEntityIdentity
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
public void testClearAllEntityIdentity() throws Exception {
IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY);
AtomicBoolean indexShard = new AtomicBoolean(true);
ShardRequestCache requestCacheStats = new ShardRequestCache();
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig());
writer.addDocument(newDoc(0, "foo"));
DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer),
new ShardId("foo", "bar", 1));
TermQueryBuilder termQuery = new TermQueryBuilder("id", "0");
TestEntity entity = new TestEntity(requestCacheStats, indexShard);
Loader loader = new Loader(reader, 0);
writer.updateDocument(new Term("id", "0"), newDoc(0, "bar"));
DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer),
new ShardId("foo", "bar", 1));
TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard);
Loader secondLoader = new Loader(secondReader, 0);
writer.updateDocument(new Term("id", "0"), newDoc(0, "baz"));
DirectoryReader thirdReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer),
new ShardId("foo", "bar", 1));
AtomicBoolean differentIdentity = new AtomicBoolean(true);
TestEntity thirddEntity = new TestEntity(requestCacheStats, differentIdentity);
Loader thirdLoader = new Loader(thirdReader, 0);
BytesReference value1 = cache.getOrCompute(entity, loader, reader, termQuery.buildAsBytes());
assertEquals("foo", value1.streamInput().readString());
BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termQuery.buildAsBytes());
assertEquals("bar", value2.streamInput().readString());
logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize());
BytesReference value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termQuery.buildAsBytes());
assertEquals("baz", value3.streamInput().readString());
assertEquals(3, cache.count());
final long hitCount = requestCacheStats.stats().getHitCount();
// clear all for the indexShard Idendity even though is't still open
cache.clear(randomFrom(entity, secondEntity));
cache.cleanCache();
assertEquals(1, cache.count());
// third has not been validated since it's a different identity
value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termQuery.buildAsBytes());
assertEquals(hitCount + 1, requestCacheStats.stats().getHitCount());
assertEquals("baz", value3.streamInput().readString());
IOUtils.close(reader, secondReader, thirdReader, writer, dir, cache);
}
示例9: indexDocuments
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
/**
* Given a queue of documents to index, index them by popping the queue limited to 1000 items.
* This method must be synchronized as we have not added any logic to deal with multiple threads writing to the
* index.
*/
public synchronized void indexDocuments(List<ContentDTO> contentDTOList) throws IOException {
Analyzer analyzer = new StandardAnalyzer();
IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer);
indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
IndexWriter writer = new IndexWriter(this.indexDirectory, indexWriterConfig);
try {
for(ContentDTO contentDTO: contentDTOList) {
Document doc = new Document();
// Path is the primary key for documents
String primaryKey = Values.EMPTY_STRING + contentDTO.getId();
Field pathField = new StringField(Values.PATH, primaryKey, Field.Store.YES);
doc.add(pathField);
StringBuffer stringBuffer = new StringBuffer();
stringBuffer.append(contentDTO.getTitle().toLowerCase()).append(" ");
stringBuffer.append(contentDTO.getDescription().toLowerCase()).append(" ");
stringBuffer.append(contentDTO.getDescription().toLowerCase()).append(" ");
stringBuffer.append(contentDTO.getTopic().replace("/", " ").replace("_", " ")).append(" ");
stringBuffer.append(contentDTO.getUrl().replaceAll("\\W+", " ")).append(" ");
doc.add(new TextField(Values.CONTENT, stringBuffer.toString(), Field.Store.NO));
doc.add(new TextField(Values.TITLE, contentDTO.getTitle().toLowerCase(), Field.Store.NO));
doc.add(new TextField(Values.DESCRIPTION, contentDTO.getDescription().toLowerCase(), Field.Store.NO));
doc.add(new TextField("topics", contentDTO.getTopic().replace("/", " ").replace("_", " "), Field.Store.NO));
doc.add(new TextField(Values.URL, contentDTO.getUrl().replaceAll("\\W+", " "), Field.Store.NO));
writer.updateDocument(new Term(Values.PATH, primaryKey), doc);
}
}
finally {
writer.close();
}
}
示例10: update
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
public void update(IndexWriter indexWriter) throws CorruptIndexException, IOException {
indexWriter.updateDocument(GLOBAL_PROPERTIES_TERM, doc);
}
示例11: updateEvent
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
public void updateEvent (Event event) throws IOException {
Directory index = FSDirectory.open (Paths.get(indexDirectoryPath));
PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper (new StandardAnalyzer (), fieldAnalyzerLookup);
IndexWriterConfig indexWriterConfig = new IndexWriterConfig (analyzer);
indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
IndexWriter indexWriter = new IndexWriter (index, indexWriterConfig);
Document d = eventToDocument(event);
indexWriter.updateDocument(new Term ("id", event.getId()), d);
indexWriter.commit();
indexWriter.close();
index.close ();
}
示例12: addOrUpdateNote
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
public void addOrUpdateNote (Note note, String noteHtmlContents) throws IOException {
Directory index = FSDirectory.open (Paths.get(indexDirectoryPath));
PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper (new StandardAnalyzer (), fieldAnalyzerLookup);
IndexWriterConfig indexWriterConfig = new IndexWriterConfig (analyzer);
indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
IndexWriter indexWriter = new IndexWriter (index, indexWriterConfig);
Document d = noteToDocument(note, noteHtmlContents);
if (!DirectoryReader.indexExists(index))
indexWriter.addDocument(d);
else {
IndexReader indexReader = DirectoryReader.open (index);
IndexSearcher indexSearcher = new IndexSearcher (indexReader);
TopDocs existingDocuments = indexSearcher.search(new TermQuery (new Term ("id", note.getId())), 1);
if (existingDocuments.totalHits == 0)
indexWriter.addDocument(d);
else indexWriter.updateDocument(new Term ("id", note.getId()), d);
}
indexWriter.commit();
indexWriter.close();
index.close ();
}