当前位置: 首页>>代码示例>>Java>>正文


Java DirectoryReader.listCommits方法代码示例

本文整理汇总了Java中org.apache.lucene.index.DirectoryReader.listCommits方法的典型用法代码示例。如果您正苦于以下问题:Java DirectoryReader.listCommits方法的具体用法?Java DirectoryReader.listCommits怎么用?Java DirectoryReader.listCommits使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.lucene.index.DirectoryReader的用法示例。


在下文中一共展示了DirectoryReader.listCommits方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: execute

import org.apache.lucene.index.DirectoryReader; //导入方法依赖的package包/类
@Override
protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception {
    boolean batch = options.has(batchMode);

    Path translogPath = getTranslogPath(options);
    Path idxLocation = translogPath.getParent().resolve("index");

    if (Files.exists(translogPath) == false || Files.isDirectory(translogPath) == false) {
        throw new ElasticsearchException("translog directory [" + translogPath + "], must exist and be a directory");
    }

    if (Files.exists(idxLocation) == false || Files.isDirectory(idxLocation) == false) {
        throw new ElasticsearchException("unable to find a shard at [" + idxLocation + "], which must exist and be a directory");
    }

    // Hold the lock open for the duration of the tool running
    try (Directory dir = FSDirectory.open(idxLocation, NativeFSLockFactory.INSTANCE);
            Lock writeLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
        Set<Path> translogFiles;
        try {
            terminal.println("Checking existing translog files");
            translogFiles = filesInDirectory(translogPath);
        } catch (IOException e) {
            terminal.println("encountered IOException while listing directory, aborting...");
            throw new ElasticsearchException("failed to find existing translog files", e);
        }

        // Warn about ES being stopped and files being deleted
        warnAboutDeletingFiles(terminal, translogFiles, batch);

        List<IndexCommit> commits;
        try {
            terminal.println("Reading translog UUID information from Lucene commit from shard at [" + idxLocation + "]");
            commits = DirectoryReader.listCommits(dir);
        } catch (IndexNotFoundException infe) {
            throw new ElasticsearchException("unable to find a valid shard at [" + idxLocation + "]", infe);
        }

        // Retrieve the generation and UUID from the existing data
        Map<String, String> commitData = commits.get(commits.size() - 1).getUserData();
        String translogGeneration = commitData.get(Translog.TRANSLOG_GENERATION_KEY);
        String translogUUID = commitData.get(Translog.TRANSLOG_UUID_KEY);
        if (translogGeneration == null || translogUUID == null) {
            throw new ElasticsearchException("shard must have a valid translog generation and UUID but got: [{}] and: [{}]",
                    translogGeneration, translogUUID);
        }
        terminal.println("Translog Generation: " + translogGeneration);
        terminal.println("Translog UUID      : " + translogUUID);

        Path tempEmptyCheckpoint = translogPath.resolve("temp-" + Translog.CHECKPOINT_FILE_NAME);
        Path realEmptyCheckpoint = translogPath.resolve(Translog.CHECKPOINT_FILE_NAME);
        Path tempEmptyTranslog = translogPath.resolve("temp-" + Translog.TRANSLOG_FILE_PREFIX +
                        translogGeneration + Translog.TRANSLOG_FILE_SUFFIX);
        Path realEmptyTranslog = translogPath.resolve(Translog.TRANSLOG_FILE_PREFIX +
                        translogGeneration + Translog.TRANSLOG_FILE_SUFFIX);

        // Write empty checkpoint and translog to empty files
        long gen = Long.parseLong(translogGeneration);
        int translogLen = writeEmptyTranslog(tempEmptyTranslog, translogUUID);
        writeEmptyCheckpoint(tempEmptyCheckpoint, translogLen, gen);

        terminal.println("Removing existing translog files");
        IOUtils.rm(translogFiles.toArray(new Path[]{}));

        terminal.println("Creating new empty checkpoint at [" + realEmptyCheckpoint + "]");
        Files.move(tempEmptyCheckpoint, realEmptyCheckpoint, StandardCopyOption.ATOMIC_MOVE);
        terminal.println("Creating new empty translog at [" + realEmptyTranslog + "]");
        Files.move(tempEmptyTranslog, realEmptyTranslog, StandardCopyOption.ATOMIC_MOVE);

        // Fsync the translog directory after rename
        IOUtils.fsync(translogPath, true);

    } catch (LockObtainFailedException lofe) {
        throw new ElasticsearchException("Failed to lock shard's directory at [" + idxLocation + "], is Elasticsearch still running?");
    }

    terminal.println("Done.");
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:79,代码来源:TruncateTranslogCommand.java

示例2: testConcurrentWritesAndCommits

import org.apache.lucene.index.DirectoryReader; //导入方法依赖的package包/类
public void testConcurrentWritesAndCommits() throws Exception {
    try (Store store = createStore();
         InternalEngine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), newMergePolicy(),
                                                                 new SnapshotDeletionPolicy(NoDeletionPolicy.INSTANCE),
                                                                 IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null))) {

        final int numIndexingThreads = scaledRandomIntBetween(3, 6);
        final int numDocsPerThread = randomIntBetween(500, 1000);
        final CyclicBarrier barrier = new CyclicBarrier(numIndexingThreads + 1);
        final List<Thread> indexingThreads = new ArrayList<>();
        // create N indexing threads to index documents simultaneously
        for (int threadNum = 0; threadNum < numIndexingThreads; threadNum++) {
            final int threadIdx = threadNum;
            Thread indexingThread = new Thread(() -> {
                try {
                    barrier.await(); // wait for all threads to start at the same time
                    // index random number of docs
                    for (int i = 0; i < numDocsPerThread; i++) {
                        final String id = "thread" + threadIdx + "#" + i;
                        ParsedDocument doc = testParsedDocument(id, "test", null, testDocument(), B_1, null);
                        engine.index(indexForDoc(doc));
                    }
                } catch (Exception e) {
                    throw new RuntimeException(e);
                }
            });
            indexingThreads.add(indexingThread);
        }

        // start the indexing threads
        for (Thread thread : indexingThreads) {
            thread.start();
        }
        barrier.await(); // wait for indexing threads to all be ready to start

        // create random commit points
        boolean doneIndexing;
        do {
            doneIndexing = indexingThreads.stream().filter(Thread::isAlive).count() == 0;
            //engine.flush(); // flush and commit
        } while (doneIndexing == false);

        // now, verify all the commits have the correct docs according to the user commit data
        long prevLocalCheckpoint = SequenceNumbersService.NO_OPS_PERFORMED;
        long prevMaxSeqNo = SequenceNumbersService.NO_OPS_PERFORMED;
        for (IndexCommit commit : DirectoryReader.listCommits(store.directory())) {
            Map<String, String> userData = commit.getUserData();
            long localCheckpoint = userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) ?
                                       Long.parseLong(userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) :
                                       SequenceNumbersService.NO_OPS_PERFORMED;
            long maxSeqNo = userData.containsKey(SequenceNumbers.MAX_SEQ_NO) ?
                                Long.parseLong(userData.get(SequenceNumbers.MAX_SEQ_NO)) :
                                SequenceNumbersService.UNASSIGNED_SEQ_NO;
            // local checkpoint and max seq no shouldn't go backwards
            assertThat(localCheckpoint, greaterThanOrEqualTo(prevLocalCheckpoint));
            assertThat(maxSeqNo, greaterThanOrEqualTo(prevMaxSeqNo));
            try (IndexReader reader = DirectoryReader.open(commit)) {
                FieldStats stats = SeqNoFieldMapper.SeqNoDefaults.FIELD_TYPE.stats(reader);
                final long highestSeqNo;
                if (stats != null) {
                    highestSeqNo = (long) stats.getMaxValue();
                } else {
                    highestSeqNo = SequenceNumbersService.NO_OPS_PERFORMED;
                }
                // make sure localCheckpoint <= highest seq no found <= maxSeqNo
                assertThat(highestSeqNo, greaterThanOrEqualTo(localCheckpoint));
                assertThat(highestSeqNo, lessThanOrEqualTo(maxSeqNo));
                // make sure all sequence numbers up to and including the local checkpoint are in the index
                FixedBitSet seqNosBitSet = getSeqNosSet(reader, highestSeqNo);
                for (int i = 0; i <= localCheckpoint; i++) {
                    assertTrue("local checkpoint [" + localCheckpoint + "], _seq_no [" + i + "] should be indexed",
                               seqNosBitSet.get(i));
                }
            }
            prevLocalCheckpoint = localCheckpoint;
            prevMaxSeqNo = maxSeqNo;
        }
    }
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:80,代码来源:InternalEngineTests.java


注:本文中的org.apache.lucene.index.DirectoryReader.listCommits方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。