本文整理匯總了Java中org.apache.lucene.index.IndexReader.leaves方法的典型用法代碼示例。如果您正苦於以下問題:Java IndexReader.leaves方法的具體用法?Java IndexReader.leaves怎麽用?Java IndexReader.leaves使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.lucene.index.IndexReader
的用法示例。
在下文中一共展示了IndexReader.leaves方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: rewrite
import org.apache.lucene.index.IndexReader; //導入方法依賴的package包/類
@Override
public Query rewrite(IndexReader reader) throws IOException {
Query rewritten = super.rewrite(reader);
if (rewritten != this) {
return rewritten;
}
boolean hasPayloads = false;
for (LeafReaderContext context : reader.leaves()) {
final Terms terms = context.reader().terms(term.field());
if (terms != null) {
if (terms.hasPayloads()) {
hasPayloads = true;
break;
}
}
}
// if the terms does not exist we could return a MatchNoDocsQuery but this would break the unified highlighter
// which rewrites query with an empty reader.
if (hasPayloads == false) {
return new TermQuery(term);
}
return this;
}
示例2: getSeqNosSet
import org.apache.lucene.index.IndexReader; //導入方法依賴的package包/類
private static FixedBitSet getSeqNosSet(final IndexReader reader, final long highestSeqNo) throws IOException {
// _seq_no are stored as doc values for the time being, so this is how we get them
// (as opposed to using an IndexSearcher or IndexReader)
final FixedBitSet bitSet = new FixedBitSet((int) highestSeqNo + 1);
final List<LeafReaderContext> leaves = reader.leaves();
if (leaves.isEmpty()) {
return bitSet;
}
for (int i = 0; i < leaves.size(); i++) {
final LeafReader leaf = leaves.get(i).reader();
final NumericDocValues values = leaf.getNumericDocValues(SeqNoFieldMapper.NAME);
if (values == null) {
continue;
}
final Bits bits = leaf.getLiveDocs();
for (int docID = 0; docID < leaf.maxDoc(); docID++) {
if (bits == null || bits.get(docID)) {
final long seqNo = values.get(docID);
assertFalse("should not have more than one document with the same seq_no[" + seqNo + "]", bitSet.get((int) seqNo));
bitSet.set((int) seqNo);
}
}
}
return bitSet;
}
示例3: doPostCollection
import org.apache.lucene.index.IndexReader; //導入方法依賴的package包/類
@Override
protected void doPostCollection() throws IOException {
IndexReader indexReader = context().searcher().getIndexReader();
for (LeafReaderContext ctx : indexReader.leaves()) {
Scorer childDocsScorer = childFilter.scorer(ctx);
if (childDocsScorer == null) {
continue;
}
DocIdSetIterator childDocsIter = childDocsScorer.iterator();
final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(ctx);
final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx);
// Set the scorer, since we now replay only the child docIds
sub.setScorer(new ConstantScoreScorer(null, 1f,childDocsIter));
final Bits liveDocs = ctx.reader().getLiveDocs();
for (int docId = childDocsIter.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = childDocsIter.nextDoc()) {
if (liveDocs != null && liveDocs.get(docId) == false) {
continue;
}
long globalOrdinal = globalOrdinals.getOrd(docId);
if (globalOrdinal != -1) {
long bucketOrd = parentOrdToBuckets.get(globalOrdinal);
if (bucketOrd != -1) {
collectBucket(sub, docId, bucketOrd);
if (multipleBucketsPerParentOrd) {
long[] otherBucketOrds = parentOrdToOtherBuckets.get(globalOrdinal);
if (otherBucketOrds != null) {
for (long otherBucketOrd : otherBucketOrds) {
collectBucket(sub, docId, otherBucketOrd);
}
}
}
}
}
}
}
}
示例4: completionStats
import org.apache.lucene.index.IndexReader; //導入方法依賴的package包/類
/**
* Returns total in-heap bytes used by all suggesters. This method has CPU cost <code>O(numIndexedFields)</code>.
*
* @param fieldNamePatterns if non-null, any completion field name matching any of these patterns will break out its in-heap bytes
* separately in the returned {@link CompletionStats}
*/
public static CompletionStats completionStats(IndexReader indexReader, String ... fieldNamePatterns) {
long sizeInBytes = 0;
ObjectLongHashMap<String> completionFields = null;
if (fieldNamePatterns != null && fieldNamePatterns.length > 0) {
completionFields = new ObjectLongHashMap<>(fieldNamePatterns.length);
}
for (LeafReaderContext atomicReaderContext : indexReader.leaves()) {
LeafReader atomicReader = atomicReaderContext.reader();
try {
Fields fields = atomicReader.fields();
for (String fieldName : fields) {
Terms terms = fields.terms(fieldName);
if (terms instanceof CompletionTerms) {
// TODO: currently we load up the suggester for reporting its size
long fstSize = ((CompletionTerms) terms).suggester().ramBytesUsed();
if (fieldNamePatterns != null && fieldNamePatterns.length > 0 && Regex.simpleMatch(fieldNamePatterns, fieldName)) {
completionFields.addTo(fieldName, fstSize);
}
sizeInBytes += fstSize;
}
}
} catch (IOException ioe) {
throw new ElasticsearchException(ioe);
}
}
return new CompletionStats(sizeInBytes, completionFields == null ? null : new FieldMemoryStats(completionFields));
}
示例5: GlobalFieldData
import org.apache.lucene.index.IndexReader; //導入方法依賴的package包/類
GlobalFieldData(IndexReader reader, AtomicParentChildFieldData[] fielddata, long ramBytesUsed, Map<String, OrdinalMapAndAtomicFieldData> ordinalMapPerType) {
this.coreCacheKey = reader.getCoreCacheKey();
this.leaves = reader.leaves();
this.ramBytesUsed = ramBytesUsed;
this.fielddata = fielddata;
this.ordinalMapPerType = ordinalMapPerType;
}
示例6: getPrefixTerms
import org.apache.lucene.index.IndexReader; //導入方法依賴的package包/類
private void getPrefixTerms(ObjectHashSet<Term> terms, final Term prefix, final IndexReader reader) throws IOException {
// SlowCompositeReaderWrapper could be used... but this would merge all terms from each segment into one terms
// instance, which is very expensive. Therefore I think it is better to iterate over each leaf individually.
List<LeafReaderContext> leaves = reader.leaves();
for (LeafReaderContext leaf : leaves) {
Terms _terms = leaf.reader().terms(field);
if (_terms == null) {
continue;
}
TermsEnum termsEnum = _terms.iterator();
TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(prefix.bytes());
if (TermsEnum.SeekStatus.END == seekStatus) {
continue;
}
for (BytesRef term = termsEnum.term(); term != null; term = termsEnum.next()) {
if (!StringHelper.startsWith(term, prefix.bytes())) {
break;
}
terms.add(new Term(field, BytesRef.deepCopyOf(term)));
if (terms.size() >= maxExpansions) {
return;
}
}
}
}
示例7: matchCount
import org.apache.lucene.index.IndexReader; //導入方法依賴的package包/類
private static int matchCount(BitSetProducer producer, IndexReader reader) throws IOException {
int count = 0;
for (LeafReaderContext ctx : reader.leaves()) {
final BitSet bitSet = producer.getBitSet(ctx);
if (bitSet != null) {
count += bitSet.cardinality();
}
}
return count;
}
示例8: doPostCollection
import org.apache.lucene.index.IndexReader; //導入方法依賴的package包/類
@Override
protected void doPostCollection() throws IOException {
IndexReader indexReader = context().searchContext().searcher().getIndexReader();
for (LeafReaderContext ctx : indexReader.leaves()) {
Scorer childDocsScorer = childFilter.scorer(ctx);
if (childDocsScorer == null) {
continue;
}
DocIdSetIterator childDocsIter = childDocsScorer.iterator();
final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(ctx);
final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx);
// Set the scorer, since we now replay only the child docIds
sub.setScorer(ConstantScorer.create(childDocsIter, null, 1f));
final Bits liveDocs = ctx.reader().getLiveDocs();
for (int docId = childDocsIter.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = childDocsIter.nextDoc()) {
if (liveDocs != null && liveDocs.get(docId) == false) {
continue;
}
long globalOrdinal = globalOrdinals.getOrd(docId);
if (globalOrdinal != -1) {
long bucketOrd = parentOrdToBuckets.get(globalOrdinal);
if (bucketOrd != -1) {
collectBucket(sub, docId, bucketOrd);
if (multipleBucketsPerParentOrd) {
long[] otherBucketOrds = parentOrdToOtherBuckets.get(globalOrdinal);
if (otherBucketOrds != null) {
for (long otherBucketOrd : otherBucketOrds) {
collectBucket(sub, docId, otherBucketOrd);
}
}
}
}
}
}
}
}
示例9: testNestedChildrenFilter
import org.apache.lucene.index.IndexReader; //導入方法依賴的package包/類
public void testNestedChildrenFilter() throws Exception {
int numParentDocs = scaledRandomIntBetween(0, 32);
int maxChildDocsPerParent = scaledRandomIntBetween(8, 16);
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
for (int i = 0; i < numParentDocs; i++) {
int numChildDocs = scaledRandomIntBetween(0, maxChildDocsPerParent);
List<Document> docs = new ArrayList<>(numChildDocs + 1);
for (int j = 0; j < numChildDocs; j++) {
Document childDoc = new Document();
childDoc.add(new StringField("type", "child", Field.Store.NO));
docs.add(childDoc);
}
Document parenDoc = new Document();
parenDoc.add(new StringField("type", "parent", Field.Store.NO));
parenDoc.add(new LegacyIntField("num_child_docs", numChildDocs, Field.Store.YES));
docs.add(parenDoc);
writer.addDocuments(docs);
}
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(reader);
FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
BitSetProducer parentFilter = new QueryBitSetProducer(new TermQuery(new Term("type", "parent")));
Query childFilter = new TermQuery(new Term("type", "child"));
int checkedParents = 0;
final Weight parentsWeight = searcher.createNormalizedWeight(new TermQuery(new Term("type", "parent")), false);
for (LeafReaderContext leaf : reader.leaves()) {
DocIdSetIterator parents = parentsWeight.scorer(leaf).iterator();
for (int parentDoc = parents.nextDoc(); parentDoc != DocIdSetIterator.NO_MORE_DOCS ; parentDoc = parents.nextDoc()) {
int expectedChildDocs = leaf.reader().document(parentDoc).getField("num_child_docs").numericValue().intValue();
hitContext.reset(null, leaf, parentDoc, searcher);
NestedChildrenQuery nestedChildrenFilter = new NestedChildrenQuery(parentFilter, childFilter, hitContext);
TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
searcher.search(new ConstantScoreQuery(nestedChildrenFilter), totalHitCountCollector);
assertThat(totalHitCountCollector.getTotalHits(), equalTo(expectedChildDocs));
checkedParents++;
}
}
assertThat(checkedParents, equalTo(numParentDocs));
reader.close();
dir.close();
}