本文整理汇总了Java中org.apache.lucene.index.IndexReader.maxDoc方法的典型用法代码示例。如果您正苦于以下问题:Java IndexReader.maxDoc方法的具体用法?Java IndexReader.maxDoc怎么用?Java IndexReader.maxDoc使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.index.IndexReader
的用法示例。
在下文中一共展示了IndexReader.maxDoc方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: rewrite
import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
@Override
public Query rewrite(IndexReader reader) throws IOException {
Query rewritten = super.rewrite(reader);
if (rewritten != this) {
return rewritten;
}
IndexReaderContext context = reader.getContext();
TermContext[] ctx = new TermContext[terms.length];
int[] docFreqs = new int[ctx.length];
for (int i = 0; i < terms.length; i++) {
ctx[i] = TermContext.build(context, terms[i]);
docFreqs[i] = ctx[i].docFreq();
}
final int maxDoc = reader.maxDoc();
blend(ctx, maxDoc, reader);
return topLevelQuery(terms, ctx, docFreqs, maxDoc);
}
示例2: DirectCandidateGenerator
import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
public DirectCandidateGenerator(DirectSpellChecker spellchecker, String field, SuggestMode suggestMode, IndexReader reader,
double nonErrorLikelihood, int numCandidates, Analyzer preFilter, Analyzer postFilter, Terms terms) throws IOException {
if (terms == null) {
throw new IllegalArgumentException("generator field [" + field + "] doesn't exist");
}
this.spellchecker = spellchecker;
this.field = field;
this.numCandidates = numCandidates;
this.suggestMode = suggestMode;
this.reader = reader;
final long dictSize = terms.getSumTotalTermFreq();
this.useTotalTermFrequency = dictSize != -1;
this.dictSize = dictSize == -1 ? reader.maxDoc() : dictSize;
this.preFilter = preFilter;
this.postFilter = postFilter;
this.nonErrorLikelihood = nonErrorLikelihood;
float thresholdFrequency = spellchecker.getThresholdFrequency();
this.frequencyPlateau = thresholdFrequency >= 1.0f ? (int) thresholdFrequency: (int)(dictSize * thresholdFrequency);
termsEnum = terms.iterator();
}
示例3: stats
import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
@Override
FieldStats.Double stats(IndexReader reader, String fieldName,
boolean isSearchable, boolean isAggregatable) throws IOException {
FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(fieldName);
if (fi == null) {
return null;
}
long size = PointValues.size(reader, fieldName);
if (size == 0) {
return new FieldStats.Double(reader.maxDoc(), 0, -1, -1, isSearchable, isAggregatable);
}
int docCount = PointValues.getDocCount(reader, fieldName);
byte[] min = PointValues.getMinPackedValue(reader, fieldName);
byte[] max = PointValues.getMaxPackedValue(reader, fieldName);
return new FieldStats.Double(reader.maxDoc(), docCount, -1L, size,
isSearchable, isAggregatable,
HalfFloatPoint.decodeDimension(min, 0), HalfFloatPoint.decodeDimension(max, 0));
}
示例4: main
import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
public static void main(String[] args) {
try {
Directory directory = FSDirectory.getDirectory("demo index", false);
IndexReader reader = IndexReader.open(directory);
// Term term = new Term("path", "pizza");
// int deleted = reader.delete(term);
// System.out.println("deleted " + deleted +
// " documents containing " + term);
for (int i = 0; i < reader.maxDoc(); i++)
reader.delete(i);
reader.close();
directory.close();
} catch (Exception e) {
System.out.println(" caught a " + e.getClass() +
"\n with message: " + e.getMessage());
}
}
示例5: stats
import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
@Override
public FieldStats.Date stats(IndexReader reader) throws IOException {
String field = name();
FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(name());
if (fi == null) {
return null;
}
long size = PointValues.size(reader, field);
if (size == 0) {
return new FieldStats.Date(reader.maxDoc(), 0, -1, -1, isSearchable(), isAggregatable());
}
int docCount = PointValues.getDocCount(reader, field);
byte[] min = PointValues.getMinPackedValue(reader, field);
byte[] max = PointValues.getMaxPackedValue(reader, field);
return new FieldStats.Date(reader.maxDoc(),docCount, -1L, size,
isSearchable(), isAggregatable(),
dateTimeFormatter(), LongPoint.decodeDimension(min, 0), LongPoint.decodeDimension(max, 0));
}
示例6: stats
import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
@Override
public FieldStats.GeoPoint stats(IndexReader reader) throws IOException {
String field = name();
FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(name());
if (fi == null) {
return null;
}
final long size = PointValues.size(reader, field);
if (size == 0) {
return new FieldStats.GeoPoint(reader.maxDoc(), -1L, -1L, -1L, isSearchable(), isAggregatable());
}
final int docCount = PointValues.getDocCount(reader, field);
byte[] min = PointValues.getMinPackedValue(reader, field);
byte[] max = PointValues.getMaxPackedValue(reader, field);
GeoPoint minPt = new GeoPoint(GeoEncodingUtils.decodeLatitude(min, 0), GeoEncodingUtils.decodeLongitude(min, Integer.BYTES));
GeoPoint maxPt = new GeoPoint(GeoEncodingUtils.decodeLatitude(max, 0), GeoEncodingUtils.decodeLongitude(max, Integer.BYTES));
return new FieldStats.GeoPoint(reader.maxDoc(), docCount, -1L, size, isSearchable(), isAggregatable(),
minPt, maxPt);
}
示例7: stats
import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
@Override
public FieldStats.Ip stats(IndexReader reader) throws IOException {
String field = name();
FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(name());
if (fi == null) {
return null;
}
long size = PointValues.size(reader, field);
if (size == 0) {
return new FieldStats.Ip(reader.maxDoc(), 0, -1, -1, isSearchable(), isAggregatable());
}
int docCount = PointValues.getDocCount(reader, field);
byte[] min = PointValues.getMinPackedValue(reader, field);
byte[] max = PointValues.getMaxPackedValue(reader, field);
return new FieldStats.Ip(reader.maxDoc(), docCount, -1L, size,
isSearchable(), isAggregatable(),
InetAddressPoint.decode(min), InetAddressPoint.decode(max));
}
示例8: getDocIdSet
import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
@Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException
{
int max = reader.maxDoc();
OpenBitSet good = new OpenBitSet(max);
good.set(0, max);
for( List<Field> values : terms )
{
for( Field nv : values )
{
Term term = new Term(nv.getField(), nv.getValue());
TermDocs docs = reader.termDocs(term);
while( docs.next() )
{
good.clear(docs.doc());
}
docs.close();
}
}
return good;
}
示例9: rewrite
import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
@Override
public Query rewrite(IndexReader reader) throws IOException {
IndexReaderContext context = reader.getContext();
TermContext[] ctx = new TermContext[terms.length];
int[] docFreqs = new int[ctx.length];
for (int i = 0; i < terms.length; i++) {
ctx[i] = TermContext.build(context, terms[i]);
docFreqs[i] = ctx[i].docFreq();
}
final int maxDoc = reader.maxDoc();
blend(ctx, maxDoc, reader);
Query query = topLevelQuery(terms, ctx, docFreqs, maxDoc);
query.setBoost(getBoost());
return query;
}
示例10: WordScorer
import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
public WordScorer(IndexReader reader, Terms terms, String field, double realWordLikelyHood, BytesRef separator) throws IOException {
this.field = field;
if (terms == null) {
throw new IllegalArgumentException("Field: [" + field + "] does not exist");
}
this.terms = terms;
final long vocSize = terms.getSumTotalTermFreq();
this.vocabluarySize = vocSize == -1 ? reader.maxDoc() : vocSize;
this.useTotalTermFreq = vocSize != -1;
this.numTerms = terms.size();
this.termsEnum = new FreqTermsEnum(reader, field, !useTotalTermFreq, useTotalTermFreq, null, BigArrays.NON_RECYCLING_INSTANCE); // non recycling for now
this.reader = reader;
this.realWordLikelyhood = realWordLikelyHood;
this.separator = separator;
}
示例11: stats
import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
@Override
public FieldStats stats(IndexReader reader) throws IOException {
String fieldName = name();
long size = PointValues.size(reader, fieldName);
if (size == 0) {
return null;
}
int docCount = PointValues.getDocCount(reader, fieldName);
byte[] min = PointValues.getMinPackedValue(reader, fieldName);
byte[] max = PointValues.getMaxPackedValue(reader, fieldName);
return new FieldStats.Long(reader.maxDoc(),docCount, -1L, size, true, false,
LongPoint.decodeDimension(min, 0), LongPoint.decodeDimension(max, 0));
}
示例12: getDocIdSet
import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
@Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException
{
int max = reader.maxDoc();
OpenBitSet good = new OpenBitSet(max);
Institution institution = CurrentInstitution.get();
Term term = new Term(FreeTextQuery.FIELD_INSTITUTION, Long.toString(institution.getUniqueId()));
TermDocs docs = reader.termDocs(term);
while( docs.next() )
{
good.set(docs.doc());
}
docs.close();
return good;
}
示例13: query
import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
@Override
public <T> void query(
@NonNull Collection<? super T> result,
@NonNull Convertor<? super Document, T> convertor,
@NullAllowed FieldSelector selector,
@NullAllowed AtomicBoolean cancel,
@NonNull Query... queries) throws IOException, InterruptedException {
Parameters.notNull("queries", queries); //NOI18N
Parameters.notNull("convertor", convertor); //NOI18N
Parameters.notNull("result", result); //NOI18N
if (selector == null) {
selector = AllFieldsSelector.INSTANCE;
}
lock.readLock().lock();
try {
final IndexReader in = getReader();
if (in == null) {
return;
}
final BitSet bs = new BitSet(in.maxDoc());
final Collector c = new BitSetCollector(bs);
final Searcher searcher = new IndexSearcher(in);
try {
for (Query q : queries) {
if (cancel != null && cancel.get()) {
throw new InterruptedException ();
}
searcher.search(q, c);
}
} finally {
searcher.close();
}
for (int docNum = bs.nextSetBit(0); docNum >= 0; docNum = bs.nextSetBit(docNum+1)) {
if (cancel != null && cancel.get()) {
throw new InterruptedException ();
}
final Document doc = in.document(docNum, selector);
final T value = convertor.convert(doc);
if (value != null) {
result.add (value);
}
}
} finally {
lock.readLock().unlock();
}
}
示例14: queryDocTerms
import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
@Override
public <S, T> void queryDocTerms(
@NonNull Map<? super T, Set<S>> result,
@NonNull Convertor<? super Document, T> convertor,
@NonNull Convertor<? super Term, S> termConvertor,
@NullAllowed FieldSelector selector,
@NullAllowed AtomicBoolean cancel,
@NonNull Query... queries) throws IOException, InterruptedException {
Parameters.notNull("result", result); //NOI18N
Parameters.notNull("convertor", convertor); //NOI18N
Parameters.notNull("termConvertor", termConvertor); //NOI18N
Parameters.notNull("queries", queries); //NOI18N
if (selector == null) {
selector = AllFieldsSelector.INSTANCE;
}
lock.readLock().lock();
try {
final IndexReader in = getReader();
if (in == null) {
return;
}
final BitSet bs = new BitSet(in.maxDoc());
final Collector c = new BitSetCollector(bs);
final Searcher searcher = new IndexSearcher(in);
final TermCollector termCollector = new TermCollector(c);
try {
for (Query q : queries) {
if (cancel != null && cancel.get()) {
throw new InterruptedException ();
}
if (q instanceof TermCollector.TermCollecting) {
((TermCollector.TermCollecting)q).attach(termCollector);
} else {
throw new IllegalArgumentException (
String.format("Query: %s does not implement TermCollecting", //NOI18N
q.getClass().getName()));
}
searcher.search(q, termCollector);
}
} finally {
searcher.close();
}
for (int docNum = bs.nextSetBit(0); docNum >= 0; docNum = bs.nextSetBit(docNum+1)) {
if (cancel != null && cancel.get()) {
throw new InterruptedException ();
}
final Document doc = in.document(docNum, selector);
final T value = convertor.convert(doc);
if (value != null) {
final Set<Term> terms = termCollector.get(docNum);
if (terms != null) {
result.put (value, convertTerms(termConvertor, terms));
}
}
}
} finally {
lock.readLock().unlock();
}
}
示例15: getDocIdSet
import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
@Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException
{
OpenBitSet bits = new OpenBitSet(reader.maxDoc());
Term startTerm = new Term(field, start);
Term endTerm = new Term(field, end);
TermEnum enumerator = reader.terms(startTerm);
if( enumerator.term() == null )
{
return bits;
}
TermDocs termDocs = reader.termDocs();
try
{
Term current = enumerator.term();
while( current.compareTo(endTerm) <= 0 )
{
termDocs.seek(enumerator.term());
while( termDocs.next() )
{
bits.set(termDocs.doc());
}
if( !enumerator.next() )
{
break;
}
current = enumerator.term();
}
}
finally
{
enumerator.close();
termDocs.close();
}
return bits;
}