本文整理汇总了Java中org.apache.lucene.index.LeafReader.maxDoc方法的典型用法代码示例。如果您正苦于以下问题:Java LeafReader.maxDoc方法的具体用法?Java LeafReader.maxDoc怎么用?Java LeafReader.maxDoc使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.index.LeafReader
的用法示例。
在下文中一共展示了LeafReader.maxDoc方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: filter
import org.apache.lucene.index.LeafReader; //导入方法依赖的package包/类
protected TermsEnum filter(Terms terms, TermsEnum iterator, LeafReader reader) throws IOException {
if (iterator == null) {
return null;
}
int docCount = terms.getDocCount();
if (docCount == -1) {
docCount = reader.maxDoc();
}
if (docCount >= minSegmentSize) {
final int minFreq = minFrequency > 1.0
? (int) minFrequency
: (int)(docCount * minFrequency);
final int maxFreq = maxFrequency > 1.0
? (int) maxFrequency
: (int)(docCount * maxFrequency);
if (minFreq > 1 || maxFreq < docCount) {
iterator = new FrequencyFilter(iterator, minFreq, maxFreq);
}
}
return iterator;
}
示例2: getSeqNosSet
import org.apache.lucene.index.LeafReader; //导入方法依赖的package包/类
private static FixedBitSet getSeqNosSet(final IndexReader reader, final long highestSeqNo) throws IOException {
// _seq_no are stored as doc values for the time being, so this is how we get them
// (as opposed to using an IndexSearcher or IndexReader)
final FixedBitSet bitSet = new FixedBitSet((int) highestSeqNo + 1);
final List<LeafReaderContext> leaves = reader.leaves();
if (leaves.isEmpty()) {
return bitSet;
}
for (int i = 0; i < leaves.size(); i++) {
final LeafReader leaf = leaves.get(i).reader();
final NumericDocValues values = leaf.getNumericDocValues(SeqNoFieldMapper.NAME);
if (values == null) {
continue;
}
final Bits bits = leaf.getLiveDocs();
for (int docID = 0; docID < leaf.maxDoc(); docID++) {
if (bits == null || bits.get(docID)) {
final long seqNo = values.get(docID);
assertFalse("should not have more than one document with the same seq_no[" + seqNo + "]", bitSet.get((int) seqNo));
bitSet.set((int) seqNo);
}
}
}
return bitSet;
}
示例3: get
import org.apache.lucene.index.LeafReader; //导入方法依赖的package包/类
public static int[] get(LeafReaderContext context, String keyName) throws IOException {
LeafReader reader = context.reader();
NumericDocValues ndv = reader.getNumericDocValues(keyName);
if (ndv == null) {
return null;
}
CacheValue cacheValue = safeGet(reader, keyName);
int[] keyValues = cacheValue.keyValues;
if (!cacheValue.newValue) {
return keyValues;
}
for (int i = 0; i < reader.maxDoc(); i++) {
keyValues[i] = (int) ndv.get(i);
}
return keyValues;
}
示例4: build
import org.apache.lucene.index.LeafReader; //导入方法依赖的package包/类
/**
* Returns a DocIdSet per segments containing the matching docs for the specified slice.
*/
private DocIdSet build(LeafReader reader) throws IOException {
final DocIdSetBuilder builder = new DocIdSetBuilder(reader.maxDoc());
final Terms terms = reader.terms(getField());
final TermsEnum te = terms.iterator();
PostingsEnum docsEnum = null;
for (BytesRef term = te.next(); term != null; term = te.next()) {
int hashCode = term.hashCode();
if (contains(hashCode)) {
docsEnum = te.postings(docsEnum, PostingsEnum.NONE);
builder.add(docsEnum);
}
}
return builder.build();
}
示例5: safeGet
import org.apache.lucene.index.LeafReader; //导入方法依赖的package包/类
private static synchronized CacheValue safeGet(LeafReader reader, String keyName) {
Map<String, CacheValue> fieldCache = cache.get(reader.getCoreCacheKey());
if (fieldCache == null) {
fieldCache = new HashMap<String, CacheValue>();
KeyValuesCache.cache.put(reader.getCoreCacheKey(), fieldCache);
}
CacheValue cacheValue = fieldCache.get(keyName);
if (cacheValue == null) {
int[] keyValues = new int[reader.maxDoc()];
fieldCache.put(keyName, new CacheValue(keyValues, false));
cacheValue = new CacheValue(keyValues, true);
}
return cacheValue;
}
示例6: getValues
import org.apache.lucene.index.LeafReader; //导入方法依赖的package包/类
@Override
public FunctionValues getValues(Map context, LeafReaderContext readerContext) throws IOException {
final LeafReader reader = readerContext.reader();
final BinaryDocValues docValues = reader.getBinaryDocValues(fieldName);
if (docValues == null)
return null;
return new DoubleDocValues(this) {
public BytesRef scratch = null;
//TODO why is the value being requested twice per doc?!
int lastDoc = -1;
double lastVal;
@Override
public double doubleVal(int doc) {
if (doc == lastDoc)
return lastVal;
//sanity check; shouldn't be necessary
if (doc < 0 || doc >= reader.maxDoc())
throw new IllegalStateException("Bad doc "+doc+" for reader "+reader);
BytesRef bytes = null;
try {//shouldn't be necessary
scratch = docValues.get(doc);
bytes = scratch;
} catch (ArrayIndexOutOfBoundsException e) {
if (log.isErrorEnabled())
log.error("DocValues index corruption for docid "+doc+" reader "+reader);//don't log 'e'
}
if (bytes != null)
lastVal = MultiPointEncoding.calcDistance(point, bytes, ctx);
else
lastVal = 1;//1 degree away, 111.2km
lastDoc = doc;
return lastVal;
}
};
}
示例7: lookupDocIdByPK
import org.apache.lucene.index.LeafReader; //导入方法依赖的package包/类
protected int[] lookupDocIdByPK(final IndexSearcher searcher,
final String... ids) throws IOException {
final List<LeafReaderContext> subReaders = searcher.getIndexReader().leaves();
final TermsEnum[] termsEnums = new TermsEnum[subReaders.size()];
final PostingsEnum[] docsEnums = new PostingsEnum[subReaders.size()];
for (int subIDX = 0; subIDX < subReaders.size(); subIDX++) {
termsEnums[subIDX] = subReaders.get(subIDX).reader().fields().terms("id").iterator();
}
int[] results = new int[ids.length];
for (int i = 0; i < results.length; i++) {
results[i] = -1;
}
// for each id given
for (int idx = 0; idx < ids.length; idx++) {
int base = 0;
final BytesRef id = new BytesRef(ids[idx]);
// for each leaf reader..
for (int subIDX = 0; subIDX < subReaders.size(); subIDX++) {
final LeafReader subReader = subReaders.get(subIDX).reader();
final TermsEnum termsEnum = termsEnums[subIDX];
// does the enumeration of ("id") terms from our reader contain the "id" field we're looking for?
if (termsEnum.seekExact(id)) {
final PostingsEnum docs = docsEnums[subIDX] = termsEnum.postings(docsEnums[subIDX],
0);
// okay, the reader contains it, get the postings ("docs+") for and check that they're there (NP check)
if (docs != null) {
final int docID = docs.nextDoc();
Bits liveDocs = subReader.getLiveDocs();
// But wait, maybe some of the docs have been deleted! Check that too..
if ((liveDocs == null || liveDocs.get(docID)) && docID != DocIdSetIterator.NO_MORE_DOCS) {
results[idx] = base + docID;
break;
}
}
}
base += subReader.maxDoc();
}
}
return results;
}