当前位置: 首页>>代码示例>>Java>>正文


Java IndexWriter类代码示例

本文整理汇总了Java中org.apache.lucene.index.IndexWriter的典型用法代码示例。如果您正苦于以下问题:Java IndexWriter类的具体用法?Java IndexWriter怎么用?Java IndexWriter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


IndexWriter类属于org.apache.lucene.index包,在下文中一共展示了IndexWriter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: indexDoc

import org.apache.lucene.index.IndexWriter; //导入依赖的package包/类
static void indexDoc(IndexWriter writer, Path file, long lastModified) throws IOException {
	try (InputStream stream = Files.newInputStream(file)) {
		Document doc = new Document();
		Field pathField = new StringField("path", file.toString(), Field.Store.YES);
		doc.add(pathField);
		doc.add(new LongPoint("modified", lastModified));
		doc.add(new TextField("contents", new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))));

		if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
			System.out.println("adding " + file);
			writer.addDocument(doc);
		} else {
			System.out.println("updating " + file);
			writer.updateDocument(new Term("path", file.toString()), doc);
		}
	}
}
 
开发者ID:PacktPublishing,项目名称:Java-Data-Science-Cookbook,代码行数:18,代码来源:IndexFiles.java

示例2: testRamDirectory

import org.apache.lucene.index.IndexWriter; //导入依赖的package包/类
public void testRamDirectory() throws IOException {
    long start = System.currentTimeMillis();
    IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new WhitespaceAnalyzer()).setOpenMode(IndexWriterConfig
            .OpenMode.CREATE);
    RAMDirectory ramDirectory = new RAMDirectory();
    IndexWriter indexWriter = new IndexWriter(ramDirectory, indexWriterConfig);
    for (int i = 0; i < 10000000; i++) {
        indexWriter.addDocument(addDocument(i));
    }
    indexWriter.commit();
    indexWriter.close();
    long end = System.currentTimeMillis();
    log.error("RamDirectory consumes {}s!", (end - start) / 1000);
    start = System.currentTimeMillis();
    IndexSearcher indexSearcher = new IndexSearcher(DirectoryReader.open(ramDirectory));
    int total = 0;
    for (int i = 0; i < 10000000; i++) {
        TermQuery key1 = new TermQuery(new Term("key1", "key" + i));
        TopDocs search = indexSearcher.search(key1, 10);
        total += search.totalHits;
    }
    System.out.println(total);
    end = System.currentTimeMillis();
    log.error("RamDirectory search consumes {}ms!", (end - start));
}
 
开发者ID:shijiebei2009,项目名称:RedisDirectory,代码行数:26,代码来源:TestLucene.java

示例3: testMMapDirectory

import org.apache.lucene.index.IndexWriter; //导入依赖的package包/类
public void testMMapDirectory() throws IOException {
    long start = System.currentTimeMillis();
    IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new WhitespaceAnalyzer()).setOpenMode(IndexWriterConfig
            .OpenMode.CREATE);
    FSDirectory open = FSDirectory.open(Paths.get("E:/testlucene"));
    IndexWriter indexWriter = new IndexWriter(open, indexWriterConfig);
    for (int i = 0; i < 10000000; i++) {
        indexWriter.addDocument(addDocument(i));
    }
    indexWriter.commit();
    indexWriter.close();
    long end = System.currentTimeMillis();
    log.error("MMapDirectory consumes {}s!", (end - start) / 1000);
    start = System.currentTimeMillis();
    IndexSearcher indexSearcher = new IndexSearcher(DirectoryReader.open(open));
    int total = 0;
    for (int i = 0; i < 10000000; i++) {
        TermQuery key1 = new TermQuery(new Term("key1", "key" + i));
        TopDocs search = indexSearcher.search(key1, 10);
        total += search.totalHits;
    }
    System.out.println(total);
    end = System.currentTimeMillis();
    log.error("MMapDirectory search consumes {}ms!", (end - start));
}
 
开发者ID:shijiebei2009,项目名称:RedisDirectory,代码行数:26,代码来源:TestLucene.java

示例4: testSimpleNumericOps

import org.apache.lucene.index.IndexWriter; //导入依赖的package包/类
public void testSimpleNumericOps() throws Exception {
    Directory dir = new RAMDirectory();
    IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));

    Document document = new Document();
    document.add(new TextField("_id", "1", Field.Store.YES));
    document.add(new LegacyIntField("test", 2, LegacyIntField.TYPE_STORED));
    indexWriter.addDocument(document);

    IndexReader reader = DirectoryReader.open(indexWriter);
    IndexSearcher searcher = new IndexSearcher(reader);
    TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
    Document doc = searcher.doc(topDocs.scoreDocs[0].doc);
    IndexableField f = doc.getField("test");
    assertThat(f.stringValue(), equalTo("2"));

    BytesRefBuilder bytes = new BytesRefBuilder();
    LegacyNumericUtils.intToPrefixCoded(2, 0, bytes);
    topDocs = searcher.search(new TermQuery(new Term("test", bytes.get())), 1);
    doc = searcher.doc(topDocs.scoreDocs[0].doc);
    f = doc.getField("test");
    assertThat(f.stringValue(), equalTo("2"));

    indexWriter.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:26,代码来源:SimpleLuceneTests.java

示例5: update

import org.apache.lucene.index.IndexWriter; //导入依赖的package包/类
public void update() throws CorruptIndexException, IOException, ParserConfigurationException, SAXException {
	try (IndexWriter indexWriter = openIndexWriter(indexDir)) {
		SAXParser parser = createParser();
		PubMedIndexDOMBuilderHandler handler = new PubMedIndexDOMBuilderHandler(XMLUtils.docBuilder, indexWriter, meshPaths, openLicenses);
		PubmedIndexProperties properties = new PubmedIndexProperties(indexWriter);
		fileFilter.properties = properties;
		SourceStream source = new CollectionSourceStream("UTF-8", sources);
		for (InputStream is : Iterators.loop(source.getInputStreams())) {
			String streamName = source.getStreamName(is);
			String filename = getFilename(streamName);
			PubMedIndexUtils.log("parsing and indexing: %s", filename);
			handler.resetCounts();
			handler.setSource(filename);
			parser.parse(is, handler);
			properties.addIndexedFile(filename);
			properties.update(indexWriter);
			indexWriter.commit();
			PubMedIndexUtils.log("citations updated: %d", handler.getUpdatedCitationsCount());
			PubMedIndexUtils.log("citations deleted: %d", handler.getDeletedCitationsCount());
		}
	}
}
 
开发者ID:Bibliome,项目名称:bibliome-java-utils,代码行数:23,代码来源:PubMedIndexUpdater.java

示例6: acquireFSLockForPaths

import org.apache.lucene.index.IndexWriter; //导入依赖的package包/类
/**
 * Acquires, then releases, all {@code write.lock} files in the given
 * shard paths. The "write.lock" file is assumed to be under the shard
 * path's "index" directory as used by Elasticsearch.
 *
 * @throws LockObtainFailedException if any of the locks could not be acquired
 */
public static void acquireFSLockForPaths(Settings indexSettings, Path... shardPaths) throws IOException {
    Lock[] locks = new Lock[shardPaths.length];
    Directory[] dirs = new Directory[shardPaths.length];
    try {
        for (int i = 0; i < shardPaths.length; i++) {
            // resolve the directory the shard actually lives in
            Path p = shardPaths[i].resolve("index");
            // open a directory (will be immediately closed) on the shard's location
            dirs[i] = new SimpleFSDirectory(p, FsDirectoryService.buildLockFactory(indexSettings));
            // create a lock for the "write.lock" file
            try {
                locks[i] = dirs[i].obtainLock(IndexWriter.WRITE_LOCK_NAME);
            } catch (IOException ex) {
                throw new LockObtainFailedException("unable to acquire " +
                        IndexWriter.WRITE_LOCK_NAME + " for " + p);
            }
        }
    } finally {
        IOUtils.closeWhileHandlingException(locks);
        IOUtils.closeWhileHandlingException(dirs);
    }
}
 
开发者ID:baidu,项目名称:Elasticsearch,代码行数:30,代码来源:NodeEnvironment.java

示例7: snapshotStoreMetadata

import org.apache.lucene.index.IndexWriter; //导入依赖的package包/类
/**
 * gets a {@link Store.MetadataSnapshot} for the current directory. This method is safe to call in all lifecycle of the index shard,
 * without having to worry about the current state of the engine and concurrent flushes.
 *
 * @throws org.apache.lucene.index.IndexNotFoundException     if no index is found in the current directory
 * @throws CorruptIndexException      if the lucene index is corrupted. This can be caused by a checksum mismatch or an
 *                                    unexpected exception when opening the index reading the segments file.
 * @throws IndexFormatTooOldException if the lucene index is too old to be opened.
 * @throws IndexFormatTooNewException if the lucene index is too new to be opened.
 * @throws FileNotFoundException      if one or more files referenced by a commit are not present.
 * @throws NoSuchFileException        if one or more files referenced by a commit are not present.
 */
public Store.MetadataSnapshot snapshotStoreMetadata() throws IOException {
    IndexCommit indexCommit = null;
    store.incRef();
    try {
        synchronized (mutex) {
            // if the engine is not running, we can access the store directly, but we need to make sure no one starts
            // the engine on us. If the engine is running, we can get a snapshot via the deletion policy which is initialized.
            // That can be done out of mutex, since the engine can be closed half way.
            Engine engine = getEngineOrNull();
            if (engine == null) {
                try (Lock ignored = store.directory().obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
                    return store.getMetadata(null);
                }
            }
        }
        indexCommit = deletionPolicy.snapshot();
        return store.getMetadata(indexCommit);
    } finally {
        store.decRef();
        if (indexCommit != null) {
            deletionPolicy.release(indexCommit);
        }
    }
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:37,代码来源:IndexShard.java

示例8: indexDocs

import org.apache.lucene.index.IndexWriter; //导入依赖的package包/类
static void indexDocs(final IndexWriter writer, Path path) throws IOException {
    if (Files.isDirectory(path)) {
        Files.walkFileTree(path, new SimpleFileVisitor<Path>() {
            @Override
            public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
                try {
                    indexDoc(writer, file, attrs.lastModifiedTime().toMillis());
                } catch (IOException ignore) {
                    // don't index files that can't be read.
                }
                return FileVisitResult.CONTINUE;
            }
        });
    } else {
        indexDoc(writer, path, Files.getLastModifiedTime(path).toMillis());
    }
}
 
开发者ID:felayman,项目名称:elasticsearch-full,代码行数:18,代码来源:IndexFiles.java

示例9: cleanLuceneIndex

import org.apache.lucene.index.IndexWriter; //导入依赖的package包/类
/**
 * This method removes all lucene files from the given directory. It will first try to delete all commit points / segments
 * files to ensure broken commits or corrupted indices will not be opened in the future. If any of the segment files can't be deleted
 * this operation fails.
 */
public static void cleanLuceneIndex(Directory directory) throws IOException {
    try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
        for (final String file : directory.listAll()) {
            if (file.startsWith(IndexFileNames.SEGMENTS) || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
                directory.deleteFile(file); // remove all segment_N files
            }
        }
    }
    try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)
            .setMergePolicy(NoMergePolicy.INSTANCE) // no merges
            .setCommitOnClose(false) // no commits
            .setOpenMode(IndexWriterConfig.OpenMode.CREATE))) // force creation - don't append...
    {
        // do nothing and close this will kick of IndexFileDeleter which will remove all pending files
    }
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:22,代码来源:Lucene.java

示例10: randomOfType

import org.apache.lucene.index.IndexWriter; //导入依赖的package包/类
private Object randomOfType(SortField.Type type) {
    switch (type) {
    case CUSTOM:
        throw new UnsupportedOperationException();
    case DOC:
        return between(0, IndexWriter.MAX_DOCS);
    case DOUBLE:
        return randomDouble();
    case FLOAT:
        return randomFloat();
    case INT:
        return randomInt();
    case LONG:
        return randomLong();
    case REWRITEABLE:
        throw new UnsupportedOperationException();
    case SCORE:
        return randomFloat();
    case STRING:
        return new BytesRef(randomAsciiOfLength(5));
    case STRING_VAL:
        return new BytesRef(randomAsciiOfLength(5));
    default:
        throw new UnsupportedOperationException("Unkown SortField.Type: " + type);
    }
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:27,代码来源:InternalTopHitsTests.java

示例11: testBuildWordScorer

import org.apache.lucene.index.IndexWriter; //导入依赖的package包/类
/**
 * Test the WordScorer emitted by the smoothing model
 */
public void testBuildWordScorer() throws IOException {
    SmoothingModel testModel = createTestModel();
    Map<String, Analyzer> mapping = new HashMap<>();
    mapping.put("field", new WhitespaceAnalyzer());
    PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(), mapping);
    IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(wrapper));
    Document doc = new Document();
    doc.add(new Field("field", "someText", TextField.TYPE_NOT_STORED));
    writer.addDocument(doc);
    DirectoryReader ir = DirectoryReader.open(writer);

    WordScorer wordScorer = testModel.buildWordScorerFactory().newScorer(ir, MultiFields.getTerms(ir, "field"), "field", 0.9d,
            BytesRefs.toBytesRef(" "));
    assertWordScorer(wordScorer, testModel);
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:19,代码来源:SmoothingModelTestCase.java

示例12: testGetParentIdNoParentField

import org.apache.lucene.index.IndexWriter; //导入依赖的package包/类
public void testGetParentIdNoParentField() throws Exception {
    ParentFieldMapper fieldMapper = createParentFieldMapper();
    Directory directory = newDirectory();
    IndexWriter indexWriter = new IndexWriter(directory, newIndexWriterConfig());
    Document document = new Document();
    document.add(new SortedDocValuesField("different_field", new BytesRef("1")));
    indexWriter.addDocument(document);
    indexWriter.close();

    IndexReader indexReader = DirectoryReader.open(directory);
    String id = ParentFieldSubFetchPhase.getParentId(fieldMapper, indexReader.leaves().get(0).reader(), 0);
    assertNull(id);

    indexReader.close();
    directory.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:17,代码来源:ParentFieldSubFetchPhaseTests.java

示例13: testSingleValued

import org.apache.lucene.index.IndexWriter; //导入依赖的package包/类
public void testSingleValued() throws IOException {
    Directory dir = newDirectory();
    // we need the default codec to check for singletons
    IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null).setCodec(TestUtil.getDefaultCodec()));
    Document doc = new Document();
    for (IndexableField f : NumberFieldMapper.NumberType.HALF_FLOAT.createFields("half_float", 3f, false, true, false)) {
        doc.add(f);
    }
    w.addDocument(doc);
    final DirectoryReader dirReader = DirectoryReader.open(w);
    LeafReader reader = getOnlyLeafReader(dirReader);
    SortedNumericDoubleValues values = new SortedNumericDVIndexFieldData.SortedNumericHalfFloatFieldData(
            reader, "half_float").getDoubleValues();
    assertNotNull(FieldData.unwrapSingleton(values));
    values.setDocument(0);
    assertEquals(1, values.count());
    assertEquals(3f, values.valueAt(0), 0f);
    IOUtils.close(dirReader, w, dir);
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:20,代码来源:HalfFloatFielddataTests.java

示例14: createInternalEngine

import org.apache.lucene.index.IndexWriter; //导入依赖的package包/类
public static InternalEngine createInternalEngine(@Nullable final IndexWriterFactory indexWriterFactory,
                                                  @Nullable final Supplier<SequenceNumbersService> sequenceNumbersServiceSupplier,
                                                  final EngineConfig config) {
    return new InternalEngine(config) {
            @Override
            IndexWriter createWriter(Directory directory, IndexWriterConfig iwc) throws IOException {
                return (indexWriterFactory != null) ?
                    indexWriterFactory.createWriter(directory, iwc) :
                    super.createWriter(directory, iwc);
            }

            @Override
            public SequenceNumbersService seqNoService() {
                return (sequenceNumbersServiceSupplier != null) ? sequenceNumbersServiceSupplier.get() : super.seqNoService();
            }
        };
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:18,代码来源:InternalEngineTests.java

示例15: main

import org.apache.lucene.index.IndexWriter; //导入依赖的package包/类
/**
 * Main entry point. 
 * 
 * @param args the command line arguments.
 * @throws IOException in case of I/O failure.
 * @throws ParseException in case of Query parse exception.
 */
public static void main(String[] args) throws IOException, ParseException {
	// 1. Creates a directory reference. This is where index datafiles will be created.
	Directory directory = FSDirectory.open(new File("/tmp").toPath());
	
	// 2. Creates an IndexWriter
	try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig())) {
		
		// 3. Add some data
		indexSomeData(writer);
		
		// 4. Search
		search(directory);			
		
		writer.deleteAll();
	} 
}
 
开发者ID:agazzarini,项目名称:as-full-text-search-server,代码行数:24,代码来源:LuceneBasicFlowExample.java


注:本文中的org.apache.lucene.index.IndexWriter类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。