当前位置: 首页>>代码示例>>Java>>正文


Java AtomicReader.document方法代码示例

本文整理汇总了Java中org.apache.lucene.index.AtomicReader.document方法的典型用法代码示例。如果您正苦于以下问题:Java AtomicReader.document方法的具体用法?Java AtomicReader.document怎么用?Java AtomicReader.document使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.lucene.index.AtomicReader的用法示例。


在下文中一共展示了AtomicReader.document方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: merge

import org.apache.lucene.index.AtomicReader; //导入方法依赖的package包/类
/** Merges in the stored fields from the readers in 
 *  <code>mergeState</code>. The default implementation skips
 *  over deleted documents, and uses {@link #startDocument()},
 *  {@link #writeField(FieldInfo, IndexableField)}, and {@link #finish(FieldInfos, int)},
 *  returning the number of documents that were written.
 *  Implementations can override this method for more sophisticated
 *  merging (bulk-byte copying, etc). */
public int merge(MergeState mergeState) throws IOException {
  int docCount = 0;
  for (AtomicReader reader : mergeState.readers) {
    final int maxDoc = reader.maxDoc();
    final Bits liveDocs = reader.getLiveDocs();
    for (int i = 0; i < maxDoc; i++) {
      if (liveDocs != null && !liveDocs.get(i)) {
        // skip deleted docs
        continue;
      }
      // TODO: this could be more efficient using
      // FieldVisitor instead of loading/writing entire
      // doc; ie we just have to renumber the field number
      // on the fly?
      // NOTE: it's very important to first assign to doc then pass it to
      // fieldsWriter.addDocument; see LUCENE-1282
      Document doc = reader.document(i);
      addDocument(doc, mergeState.fieldInfos);
      docCount++;
      mergeState.checkAbort.work(300);
    }
  }
  finish(mergeState.fieldInfos, docCount);
  return docCount;
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:33,代码来源:StoredFieldsWriter.java

示例2: copyFieldsNoDeletions

import org.apache.lucene.index.AtomicReader; //导入方法依赖的package包/类
private int copyFieldsNoDeletions(MergeState mergeState, final AtomicReader reader,
                                  final Lucene40StoredFieldsReader matchingFieldsReader, int rawDocLengths[])
  throws IOException {
  final int maxDoc = reader.maxDoc();
  int docCount = 0;
  if (matchingFieldsReader != null) {
    // We can bulk-copy because the fieldInfos are "congruent"
    while (docCount < maxDoc) {
      int len = Math.min(MAX_RAW_MERGE_DOCS, maxDoc - docCount);
      IndexInput stream = matchingFieldsReader.rawDocs(rawDocLengths, docCount, len);
      addRawDocuments(stream, rawDocLengths, len);
      docCount += len;
      mergeState.checkAbort.work(300 * len);
    }
  } else {
    for (; docCount < maxDoc; docCount++) {
      // NOTE: it's very important to first assign to doc then pass it to
      // fieldsWriter.addDocument; see LUCENE-1282
      Document doc = reader.document(docCount);
      addDocument(doc, mergeState.fieldInfos);
      mergeState.checkAbort.work(300);
    }
  }
  return docCount;
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:26,代码来源:Lucene40StoredFieldsWriter.java

示例3: merge

import org.apache.lucene.index.AtomicReader; //导入方法依赖的package包/类
@Override
public int merge(MergeState mergeState) throws IOException {
  int docCount = 0;
  int idx = 0;

  for (AtomicReader reader : mergeState.readers) {
    final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
    CompressingStoredFieldsReader matchingFieldsReader = null;
    if (matchingSegmentReader != null) {
      final StoredFieldsReader fieldsReader = matchingSegmentReader.getFieldsReader();
      // we can only bulk-copy if the matching reader is also a CompressingStoredFieldsReader
      if (fieldsReader != null && fieldsReader instanceof CompressingStoredFieldsReader) {
        matchingFieldsReader = (CompressingStoredFieldsReader) fieldsReader;
      }
    }

    final int maxDoc = reader.maxDoc();
    final Bits liveDocs = reader.getLiveDocs();

    if (matchingFieldsReader == null
        || matchingFieldsReader.getVersion() != VERSION_CURRENT // means reader version is not the same as the writer version
        || matchingFieldsReader.getCompressionMode() != compressionMode
        || matchingFieldsReader.getChunkSize() != chunkSize) { // the way data is decompressed depends on the chunk size
      // naive merge...
      for (int i = nextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; i = nextLiveDoc(i + 1, liveDocs, maxDoc)) {
        Document doc = reader.document(i);
        addDocument(doc, mergeState.fieldInfos);
        ++docCount;
        mergeState.checkAbort.work(300);
      }
    } else {
      int docID = nextLiveDoc(0, liveDocs, maxDoc);
      if (docID < maxDoc) {
        // not all docs were deleted
        final ChunkIterator it = matchingFieldsReader.chunkIterator(docID);
        int[] startOffsets = new int[0];
        do {
          // go to the next chunk that contains docID
          it.next(docID);
          // transform lengths into offsets
          if (startOffsets.length < it.chunkDocs) {
            startOffsets = new int[ArrayUtil.oversize(it.chunkDocs, 4)];
          }
          for (int i = 1; i < it.chunkDocs; ++i) {
            startOffsets[i] = startOffsets[i - 1] + it.lengths[i - 1];
          }

          // decompress
          it.decompress();
          if (startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] != it.bytes.length) {
            throw new CorruptIndexException("Corrupted: expected chunk size=" + startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] + ", got " + it.bytes.length);
          }
          // copy non-deleted docs
          for (; docID < it.docBase + it.chunkDocs; docID = nextLiveDoc(docID + 1, liveDocs, maxDoc)) {
            final int diff = docID - it.docBase;
            startDocument();
            bufferedDocs.writeBytes(it.bytes.bytes, it.bytes.offset + startOffsets[diff], it.lengths[diff]);
            numStoredFieldsInDoc = it.numStoredFields[diff];
            finishDocument();
            ++docCount;
            mergeState.checkAbort.work(300);
          }
        } while (docID < maxDoc);

        it.checkIntegrity();
      }
    }
  }
  finish(mergeState.fieldInfos, docCount);
  return docCount;
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:72,代码来源:CompressingStoredFieldsWriter.java


注:本文中的org.apache.lucene.index.AtomicReader.document方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。