本文整理汇总了Java中org.apache.lucene.index.LeafReader类的典型用法代码示例。如果您正苦于以下问题:Java LeafReader类的具体用法?Java LeafReader怎么用?Java LeafReader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
LeafReader类属于org.apache.lucene.index包,在下文中一共展示了LeafReader类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: load
import org.apache.lucene.index.LeafReader; //导入依赖的package包/类
@Override
public AtomicNumericFieldData load(LeafReaderContext context) {
final LeafReader reader = context.reader();
final String field = fieldName;
switch (numericType) {
case HALF_FLOAT:
return new SortedNumericHalfFloatFieldData(reader, field);
case FLOAT:
return new SortedNumericFloatFieldData(reader, field);
case DOUBLE:
return new SortedNumericDoubleFieldData(reader, field);
default:
return new SortedNumericLongFieldData(reader, field, numericType);
}
}
示例2: estimateStringFieldData
import org.apache.lucene.index.LeafReader; //导入依赖的package包/类
/**
* @return the estimate for loading the entire term set into field data, or 0 if unavailable
*/
public long estimateStringFieldData() {
try {
LeafReader reader = context.reader();
Terms terms = reader.terms(getFieldName());
Fields fields = reader.fields();
final Terms fieldTerms = fields.terms(getFieldName());
if (fieldTerms instanceof FieldReader) {
final Stats stats = ((FieldReader) fieldTerms).getStats();
long totalTermBytes = stats.totalTermBytes;
if (logger.isTraceEnabled()) {
logger.trace("totalTermBytes: {}, terms.size(): {}, terms.getSumDocFreq(): {}",
totalTermBytes, terms.size(), terms.getSumDocFreq());
}
long totalBytes = totalTermBytes + (2 * terms.size()) + (4 * terms.getSumDocFreq());
return totalBytes;
}
} catch (Exception e) {
logger.warn("Unable to estimate memory overhead", e);
}
return 0;
}
示例3: filter
import org.apache.lucene.index.LeafReader; //导入依赖的package包/类
protected TermsEnum filter(Terms terms, TermsEnum iterator, LeafReader reader) throws IOException {
if (iterator == null) {
return null;
}
int docCount = terms.getDocCount();
if (docCount == -1) {
docCount = reader.maxDoc();
}
if (docCount >= minSegmentSize) {
final int minFreq = minFrequency > 1.0
? (int) minFrequency
: (int)(docCount * minFrequency);
final int maxFreq = maxFrequency > 1.0
? (int) maxFrequency
: (int)(docCount * maxFrequency);
if (minFreq > 1 || maxFreq < docCount) {
iterator = new FrequencyFilter(iterator, minFreq, maxFreq);
}
}
return iterator;
}
示例4: PerThreadIDAndVersionLookup
import org.apache.lucene.index.LeafReader; //导入依赖的package包/类
/**
* Initialize lookup for the provided segment
*/
PerThreadIDAndVersionLookup(LeafReader reader) throws IOException {
TermsEnum termsEnum = null;
NumericDocValues versions = null;
Fields fields = reader.fields();
if (fields != null) {
Terms terms = fields.terms(UidFieldMapper.NAME);
if (terms != null) {
termsEnum = terms.iterator();
assert termsEnum != null;
versions = reader.getNumericDocValues(VersionFieldMapper.NAME);
assert versions != null;
}
}
this.versions = versions;
this.termsEnum = termsEnum;
}
示例5: testSingleValued
import org.apache.lucene.index.LeafReader; //导入依赖的package包/类
public void testSingleValued() throws IOException {
Directory dir = newDirectory();
// we need the default codec to check for singletons
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null).setCodec(TestUtil.getDefaultCodec()));
Document doc = new Document();
for (IndexableField f : NumberFieldMapper.NumberType.HALF_FLOAT.createFields("half_float", 3f, false, true, false)) {
doc.add(f);
}
w.addDocument(doc);
final DirectoryReader dirReader = DirectoryReader.open(w);
LeafReader reader = getOnlyLeafReader(dirReader);
SortedNumericDoubleValues values = new SortedNumericDVIndexFieldData.SortedNumericHalfFloatFieldData(
reader, "half_float").getDoubleValues();
assertNotNull(FieldData.unwrapSingleton(values));
values.setDocument(0);
assertEquals(1, values.count());
assertEquals(3f, values.valueAt(0), 0f);
IOUtils.close(dirReader, w, dir);
}
示例6: getSeqNosSet
import org.apache.lucene.index.LeafReader; //导入依赖的package包/类
private static FixedBitSet getSeqNosSet(final IndexReader reader, final long highestSeqNo) throws IOException {
// _seq_no are stored as doc values for the time being, so this is how we get them
// (as opposed to using an IndexSearcher or IndexReader)
final FixedBitSet bitSet = new FixedBitSet((int) highestSeqNo + 1);
final List<LeafReaderContext> leaves = reader.leaves();
if (leaves.isEmpty()) {
return bitSet;
}
for (int i = 0; i < leaves.size(); i++) {
final LeafReader leaf = leaves.get(i).reader();
final NumericDocValues values = leaf.getNumericDocValues(SeqNoFieldMapper.NAME);
if (values == null) {
continue;
}
final Bits bits = leaf.getLiveDocs();
for (int docID = 0; docID < leaf.maxDoc(); docID++) {
if (bits == null || bits.get(docID)) {
final long seqNo = values.get(docID);
assertFalse("should not have more than one document with the same seq_no[" + seqNo + "]", bitSet.get((int) seqNo));
bitSet.set((int) seqNo);
}
}
}
return bitSet;
}
示例7: testAddingAClosedReader
import org.apache.lucene.index.LeafReader; //导入依赖的package包/类
public void testAddingAClosedReader() throws Exception {
LeafReader reader;
try (Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir)) {
writer.addDocument(new Document());
try (DirectoryReader dirReader = ElasticsearchDirectoryReader.wrap(writer.getReader(), new ShardId("index1", "_na_", 1))) {
reader = dirReader.leaves().get(0).reader();
}
}
ShardCoreKeyMap map = new ShardCoreKeyMap();
try {
map.add(reader);
fail("Expected AlreadyClosedException");
} catch (AlreadyClosedException e) {
// What we wanted
}
assertEquals(0, map.size());
}
示例8: loadDirect
import org.apache.lucene.index.LeafReader; //导入依赖的package包/类
@Override
public AtomicGeoPointFieldData loadDirect(LeafReaderContext context) throws Exception {
LeafReader reader = context.reader();
Terms terms = reader.terms(getFieldNames().indexName());
AtomicGeoPointFieldData data = null;
// TODO: Use an actual estimator to estimate before loading.
NonEstimatingEstimator estimator = new NonEstimatingEstimator(breakerService.getBreaker(CircuitBreaker.FIELDDATA));
if (terms == null) {
data = AbstractAtomicGeoPointFieldData.empty(reader.maxDoc());
estimator.afterLoad(null, data.ramBytesUsed());
return data;
}
return (Version.indexCreated(indexSettings).before(Version.V_2_2_0)) ?
loadLegacyFieldData(reader, estimator, terms, data) : loadFieldData22(reader, estimator, terms, data);
}
示例9: PerThreadIDAndVersionLookup
import org.apache.lucene.index.LeafReader; //导入依赖的package包/类
/**
* Initialize lookup for the provided segment
*/
public PerThreadIDAndVersionLookup(LeafReader reader) throws IOException {
TermsEnum termsEnum = null;
NumericDocValues versions = null;
boolean hasPayloads = false;
Fields fields = reader.fields();
if (fields != null) {
Terms terms = fields.terms(UidFieldMapper.NAME);
if (terms != null) {
hasPayloads = terms.hasPayloads();
termsEnum = terms.iterator();
assert termsEnum != null;
versions = reader.getNumericDocValues(VersionFieldMapper.NAME);
}
}
this.versions = versions;
this.termsEnum = termsEnum;
this.hasPayloads = hasPayloads;
}
示例10: buildEntryValue
import org.apache.lucene.index.LeafReader; //导入依赖的package包/类
private NamedList<Object> buildEntryValue(long count, Term t, List<Entry<LeafReader, Bits>> leaves) throws IOException {
NamedList<Object> entry = new NamedList<>();
entry.add("count", count);
int i = -1;
for (Entry<LeafReader, Bits> e : leaves) {
PostingsEnum postings = e.getKey().postings(t, PostingsEnum.PAYLOADS);
Bits liveDocs = e.getValue();
while (postings.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
if (!liveDocs.get(postings.docID())) {
continue;
}
i++;
NamedList<Object> documentEntry = new NamedList<>();
entry.add("doc" + i, documentEntry);
for (int j = 0; j < postings.freq(); j++) {
postings.nextPosition();
String extra = postings.getPayload().utf8ToString();
documentEntry.add("position" + j, extra);
}
}
}
return entry;
}
示例11: LocalEnv
import org.apache.lucene.index.LeafReader; //导入依赖的package包/类
public LocalEnv(int offset, int limit, int startTermIndex, int adjust, int targetIdx, int nTerms, Predicate<BytesRef> termFilter,
int mincount, int[] counts, CharsRefBuilder charsRef, boolean extend, SortedSetDocValues si,
SolrIndexSearcher searcher, List<Entry<LeafReader, Bits>> leaves, String fieldName, T ft, NamedList res) {
super(offset, limit, targetIdx, mincount, fieldName, ft, res);
if (startTermIndex == -1) {
// weird case where missing is counted at counts[0].
this.startTermOrd = 0;
this.endTermOrd = nTerms - 1;
} else if (startTermIndex >= 0) {
this.startTermOrd = startTermIndex;
this.endTermOrd = startTermIndex + nTerms;
} else {
throw new IllegalStateException();
}
this.startTermIndex = startTermIndex;
this.adjust = adjust;
this.nTerms = nTerms;
this.termFilter = termFilter;
this.counts = counts;
this.charsRef = charsRef;
this.extend = extend;
this.si = si;
this.searcher = searcher;
this.leaves = leaves;
}
示例12: getInsanityWrapper
import org.apache.lucene.index.LeafReader; //导入依赖的package包/类
private Collector getInsanityWrapper(final String field, Collector collector) {
SchemaField sf = searcher.getSchema().getFieldOrNull(field);
if (sf != null && !sf.hasDocValues() && !sf.multiValued() && sf.getType().getNumberType() != null) {
// it's a single-valued numeric field: we must currently create insanity :(
// there isn't a GroupedFacetCollector that works on numerics right now...
return new FilterCollector(collector) {
@Override
public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
LeafReader insane = Insanity.wrapInsanity(context.reader(), field);
return in.getLeafCollector(insane.getContext());
}
};
} else {
return collector;
}
}
示例13: shouldGetEmptyDistinctTrackFieldValuesWhenTermsNull
import org.apache.lucene.index.LeafReader; //导入依赖的package包/类
@Test
public void shouldGetEmptyDistinctTrackFieldValuesWhenTermsNull() throws Exception {
IndexSearcher mockIndexSearcher = mock(IndexSearcher.class);
when(mockTrackManager.acquire()).thenReturn(mockIndexSearcher);
IndexReader mockIndexReader = mock(IndexReader.class);
when(mockIndexSearcher.getIndexReader()).thenReturn(mockIndexReader);
LeafReaderContext mockLeafReaderContext = mock(LeafReaderContext.class);
List<LeafReaderContext> mockLeafReaderContexts = Arrays.asList(mockLeafReaderContext);
when(mockIndexReader.leaves()).thenReturn(mockLeafReaderContexts);
LeafReader mockLeafReader = mock(LeafReader.class);
when(mockLeafReaderContext.reader()).thenReturn(mockLeafReader);
when(mockLeafReader.terms(anyString())).thenReturn(null);
List<String> result = spySearchManager.getDistinctTrackFieldValues(TrackField.ALBUMID);
assertThat("Result should be empty", result.isEmpty(), equalTo(true));
}
示例14: get
import org.apache.lucene.index.LeafReader; //导入依赖的package包/类
public static int[] get(LeafReaderContext context, String keyName) throws IOException {
LeafReader reader = context.reader();
NumericDocValues ndv = reader.getNumericDocValues(keyName);
if (ndv == null) {
return null;
}
CacheValue cacheValue = safeGet(reader, keyName);
int[] keyValues = cacheValue.keyValues;
if (!cacheValue.newValue) {
return keyValues;
}
for (int i = 0; i < reader.maxDoc(); i++) {
keyValues[i] = (int) ndv.get(i);
}
return keyValues;
}
示例15: readDocValues
import org.apache.lucene.index.LeafReader; //导入依赖的package包/类
private Object readDocValues(String field, DocValuesType docValType, LeafReader atomicReader) throws IOException{
Object docVals = null;
if (docValType == DocValuesType.NUMERIC) {
docVals = atomicReader.getNumericDocValues(field);
}
else if (docValType == DocValuesType.BINARY) {
docVals = atomicReader.getBinaryDocValues(field);
}
else if (docValType == DocValuesType.SORTED) {
docVals = atomicReader.getSortedDocValues(field);
}
else if (docValType == DocValuesType.SORTED_NUMERIC) {
docVals = atomicReader.getSortedNumericDocValues(field);
}
else if (docValType == DocValuesType.SORTED_SET) {
docVals = atomicReader.getSortedSetDocValues(field);
}
return docVals;
}