本文整理汇总了Java中org.apache.lucene.index.LeafReaderContext.reader方法的典型用法代码示例。如果您正苦于以下问题:Java LeafReaderContext.reader方法的具体用法?Java LeafReaderContext.reader怎么用?Java LeafReaderContext.reader使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.index.LeafReaderContext
的用法示例。
在下文中一共展示了LeafReaderContext.reader方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: load
import org.apache.lucene.index.LeafReaderContext; //导入方法依赖的package包/类
@Override
public AtomicNumericFieldData load(LeafReaderContext context) {
final LeafReader reader = context.reader();
final String field = fieldName;
switch (numericType) {
case HALF_FLOAT:
return new SortedNumericHalfFloatFieldData(reader, field);
case FLOAT:
return new SortedNumericFloatFieldData(reader, field);
case DOUBLE:
return new SortedNumericDoubleFieldData(reader, field);
default:
return new SortedNumericLongFieldData(reader, field, numericType);
}
}
示例2: loadDirect
import org.apache.lucene.index.LeafReaderContext; //导入方法依赖的package包/类
@Override
public AtomicGeoPointFieldData loadDirect(LeafReaderContext context) throws Exception {
LeafReader reader = context.reader();
Terms terms = reader.terms(getFieldNames().indexName());
AtomicGeoPointFieldData data = null;
// TODO: Use an actual estimator to estimate before loading.
NonEstimatingEstimator estimator = new NonEstimatingEstimator(breakerService.getBreaker(CircuitBreaker.FIELDDATA));
if (terms == null) {
data = AbstractAtomicGeoPointFieldData.empty(reader.maxDoc());
estimator.afterLoad(null, data.ramBytesUsed());
return data;
}
return (Version.indexCreated(indexSettings).before(Version.V_2_2_0)) ?
loadLegacyFieldData(reader, estimator, terms, data) : loadFieldData22(reader, estimator, terms, data);
}
示例3: testSimple
import org.apache.lucene.index.LeafReaderContext; //导入方法依赖的package包/类
/**
* test version lookup actually works
*/
public void testSimple() throws Exception {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
Document doc = new Document();
doc.add(new Field(UidFieldMapper.NAME, "6", UidFieldMapper.Defaults.FIELD_TYPE));
doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 87));
writer.addDocument(doc);
DirectoryReader reader = DirectoryReader.open(writer);
LeafReaderContext segment = reader.leaves().get(0);
PerThreadIDAndVersionLookup lookup = new PerThreadIDAndVersionLookup(segment.reader());
// found doc
DocIdAndVersion result = lookup.lookup(new BytesRef("6"), null, segment);
assertNotNull(result);
assertEquals(87, result.version);
assertEquals(0, result.docId);
// not found doc
assertNull(lookup.lookup(new BytesRef("7"), null, segment));
// deleted doc
assertNull(lookup.lookup(new BytesRef("6"), new Bits.MatchNoBits(1), segment));
reader.close();
writer.close();
dir.close();
}
示例4: completionStats
import org.apache.lucene.index.LeafReaderContext; //导入方法依赖的package包/类
/**
* Returns total in-heap bytes used by all suggesters. This method has CPU cost <code>O(numIndexedFields)</code>.
*
* @param fieldNamePatterns if non-null, any completion field name matching any of these patterns will break out its in-heap bytes
* separately in the returned {@link CompletionStats}
*/
public static CompletionStats completionStats(IndexReader indexReader, String ... fieldNamePatterns) {
long sizeInBytes = 0;
ObjectLongHashMap<String> completionFields = null;
if (fieldNamePatterns != null && fieldNamePatterns.length > 0) {
completionFields = new ObjectLongHashMap<>(fieldNamePatterns.length);
}
for (LeafReaderContext atomicReaderContext : indexReader.leaves()) {
LeafReader atomicReader = atomicReaderContext.reader();
try {
Fields fields = atomicReader.fields();
for (String fieldName : fields) {
Terms terms = fields.terms(fieldName);
if (terms instanceof CompletionTerms) {
// TODO: currently we load up the suggester for reporting its size
long fstSize = ((CompletionTerms) terms).suggester().ramBytesUsed();
if (fieldNamePatterns != null && fieldNamePatterns.length > 0 && Regex.simpleMatch(fieldNamePatterns, fieldName)) {
completionFields.addTo(fieldName, fstSize);
}
sizeInBytes += fstSize;
}
}
} catch (IOException ioe) {
throw new ElasticsearchException(ioe);
}
}
return new CompletionStats(sizeInBytes, completionFields == null ? null : new FieldMemoryStats(completionFields));
}
示例5: setSegmentAndDocument
import org.apache.lucene.index.LeafReaderContext; //导入方法依赖的package包/类
public void setSegmentAndDocument(LeafReaderContext context, int docId) {
if (this.reader == context.reader() && this.docId == docId) {
// if we are called with the same document, don't invalidate source
return;
}
this.reader = context.reader();
this.source = null;
this.sourceAsBytes = null;
this.docId = docId;
}
示例6: load
import org.apache.lucene.index.LeafReaderContext; //导入方法依赖的package包/类
@Override
public AtomicGeoPointFieldData load(LeafReaderContext context) {
try {
LeafReader reader = context.reader();
FieldInfo info = reader.getFieldInfos().fieldInfo(fieldName);
if (info != null) {
checkCompatible(info);
}
return new LatLonPointDVAtomicFieldData(DocValues.getSortedNumeric(reader, fieldName));
} catch (IOException e) {
throw new IllegalStateException("Cannot load doc values", e);
}
}
示例7: testTwoDocuments
import org.apache.lucene.index.LeafReaderContext; //导入方法依赖的package包/类
/**
* test version lookup with two documents matching the ID
*/
public void testTwoDocuments() throws Exception {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
Document doc = new Document();
doc.add(new Field(UidFieldMapper.NAME, "6", UidFieldMapper.Defaults.FIELD_TYPE));
doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 87));
writer.addDocument(doc);
writer.addDocument(doc);
DirectoryReader reader = DirectoryReader.open(writer);
LeafReaderContext segment = reader.leaves().get(0);
PerThreadIDAndVersionLookup lookup = new PerThreadIDAndVersionLookup(segment.reader());
// return the last doc when there are duplicates
DocIdAndVersion result = lookup.lookup(new BytesRef("6"), null, segment);
assertNotNull(result);
assertEquals(87, result.version);
assertEquals(1, result.docId);
// delete the first doc only
FixedBitSet live = new FixedBitSet(2);
live.set(1);
result = lookup.lookup(new BytesRef("6"), live, segment);
assertNotNull(result);
assertEquals(87, result.version);
assertEquals(1, result.docId);
// delete the second doc only
live.clear(1);
live.set(0);
result = lookup.lookup(new BytesRef("6"), live, segment);
assertNotNull(result);
assertEquals(87, result.version);
assertEquals(0, result.docId);
// delete both docs
assertNull(lookup.lookup(new BytesRef("6"), new Bits.MatchNoBits(2), segment));
reader.close();
writer.close();
dir.close();
}
示例8: load
import org.apache.lucene.index.LeafReaderContext; //导入方法依赖的package包/类
@Override
public AtomicNumericFieldData load(LeafReaderContext context) {
final LeafReader reader = context.reader();
final String field = fieldNames.indexName();
switch (numericType) {
case FLOAT:
return new SortedNumericFloatFieldData(reader, field);
case DOUBLE:
return new SortedNumericDoubleFieldData(reader, field);
default:
return new SortedNumericLongFieldData(reader, field);
}
}
示例9: load
import org.apache.lucene.index.LeafReaderContext; //导入方法依赖的package包/类
@Override
public AtomicOrdinalsFieldData load(LeafReaderContext context) {
return new SortedSetDVBytesAtomicFieldData(context.reader(), fieldNames.indexName());
}
示例10: LeafIndexLookup
import org.apache.lucene.index.LeafReaderContext; //导入方法依赖的package包/类
public LeafIndexLookup(LeafReaderContext ctx) {
reader = ctx.reader();
parentReader = ReaderUtil.getTopLevelContext(ctx).reader();
indexSearcher = new IndexSearcher(parentReader);
indexSearcher.setQueryCache(null);
}
示例11: doSetNextReader
import org.apache.lucene.index.LeafReaderContext; //导入方法依赖的package包/类
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
reader = context.reader();
uidValues = uidFieldData.load(context).getBytesValues();
}
示例12: load
import org.apache.lucene.index.LeafReaderContext; //导入方法依赖的package包/类
@Override
public BinaryDVAtomicFieldData load(LeafReaderContext context) {
return new BinaryDVAtomicFieldData(context.reader(), fieldName);
}
示例13: testSingleValueAllSet
import org.apache.lucene.index.LeafReaderContext; //导入方法依赖的package包/类
public void testSingleValueAllSet() throws Exception {
fillSingleValueAllSet();
IndexFieldData indexFieldData = getForField("value");
List<LeafReaderContext> readerContexts = refreshReader();
for (LeafReaderContext readerContext : readerContexts) {
AtomicFieldData fieldData = indexFieldData.load(readerContext);
assertThat(fieldData.ramBytesUsed(), greaterThanOrEqualTo(minRamBytesUsed()));
SortedBinaryDocValues bytesValues = fieldData.getBytesValues();
bytesValues.setDocument(0);
assertThat(bytesValues.count(), equalTo(1));
assertThat(bytesValues.valueAt(0), equalTo(new BytesRef(two())));
bytesValues.setDocument(1);
assertThat(bytesValues.count(), equalTo(1));
assertThat(bytesValues.valueAt(0), equalTo(new BytesRef(one())));
bytesValues.setDocument(2);
assertThat(bytesValues.count(), equalTo(1));
assertThat(bytesValues.valueAt(0), equalTo(new BytesRef(three())));
assertValues(bytesValues, 0, two());
assertValues(bytesValues, 1, one());
assertValues(bytesValues, 2, three());
IndexSearcher searcher = new IndexSearcher(readerContext.reader());
TopFieldDocs topDocs;
topDocs = searcher.search(new MatchAllDocsQuery(), 10,
new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MIN, null))));
assertThat(topDocs.totalHits, equalTo(3));
assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
assertThat(toString(((FieldDoc) topDocs.scoreDocs[0]).fields[0]), equalTo(one()));
assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
assertThat(toString(((FieldDoc) topDocs.scoreDocs[1]).fields[0]), equalTo(two()));
assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
assertThat(toString(((FieldDoc) topDocs.scoreDocs[2]).fields[0]), equalTo(three()));
topDocs = searcher.search(new MatchAllDocsQuery(), 10,
new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MAX, null), true)));
assertThat(topDocs.totalHits, equalTo(3));
assertThat(topDocs.scoreDocs[0].doc, equalTo(2));
assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
}
}
示例14: doSetNextReader
import org.apache.lucene.index.LeafReaderContext; //导入方法依赖的package包/类
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
super.doSetNextReader(context);
collector.getLeafCollector(context);
currentReader = context.reader();
}
示例15: setNextReader
import org.apache.lucene.index.LeafReaderContext; //导入方法依赖的package包/类
public void setNextReader(LeafReaderContext context) throws IOException {
currentReader = context.reader();
for (LuceneCollectorExpression expr : collectorExpressions) {
expr.setNextReader(context);
}
}