本文整理汇总了Java中org.apache.lucene.index.AtomicReader.terms方法的典型用法代码示例。如果您正苦于以下问题:Java AtomicReader.terms方法的具体用法?Java AtomicReader.terms怎么用?Java AtomicReader.terms使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.index.AtomicReader
的用法示例。
在下文中一共展示了AtomicReader.terms方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testReuseDocsEnumNoReuse
import org.apache.lucene.index.AtomicReader; //导入方法依赖的package包/类
public void testReuseDocsEnumNoReuse() throws IOException {
Directory dir = newDirectory();
Codec cp = TestUtil.alwaysPostingsFormat(new Lucene40RWPostingsFormat());
RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
newIndexWriterConfig(new MockAnalyzer(random())).setCodec(cp));
int numdocs = atLeast(20);
createRandomIndex(numdocs, writer, random());
writer.commit();
DirectoryReader open = DirectoryReader.open(dir);
for (AtomicReaderContext ctx : open.leaves()) {
AtomicReader indexReader = ctx.reader();
Terms terms = indexReader.terms("body");
TermsEnum iterator = terms.iterator(null);
IdentityHashMap<DocsEnum, Boolean> enums = new IdentityHashMap<>();
MatchNoBits bits = new Bits.MatchNoBits(indexReader.maxDoc());
while ((iterator.next()) != null) {
DocsEnum docs = iterator.docs(random().nextBoolean() ? bits : new Bits.MatchNoBits(indexReader.maxDoc()), null, random().nextBoolean() ? DocsEnum.FLAG_FREQS : DocsEnum.FLAG_NONE);
enums.put(docs, true);
}
assertEquals(terms.size(), enums.size());
}
IOUtils.close(writer, open, dir);
}
示例2: BaseTermsEnumTraverser
import org.apache.lucene.index.AtomicReader; //导入方法依赖的package包/类
public BaseTermsEnumTraverser(AtomicReaderContext context, Bits acceptDocs) throws IOException {
this.context = context;
AtomicReader reader = context.reader();
this.acceptDocs = acceptDocs;
this.maxDoc = reader.maxDoc();
Terms terms = reader.terms(fieldName);
if (terms != null)
this.termsEnum = terms.iterator(null);
}
示例3: addTaxonomy
import org.apache.lucene.index.AtomicReader; //导入方法依赖的package包/类
/**
* Takes the categories from the given taxonomy directory, and adds the
* missing ones to this taxonomy. Additionally, it fills the given
* {@link OrdinalMap} with a mapping from the original ordinal to the new
* ordinal.
*/
public void addTaxonomy(Directory taxoDir, OrdinalMap map) throws IOException {
ensureOpen();
DirectoryReader r = DirectoryReader.open(taxoDir);
try {
final int size = r.numDocs();
final OrdinalMap ordinalMap = map;
ordinalMap.setSize(size);
int base = 0;
TermsEnum te = null;
DocsEnum docs = null;
for (final AtomicReaderContext ctx : r.leaves()) {
final AtomicReader ar = ctx.reader();
final Terms terms = ar.terms(Consts.FULL);
te = terms.iterator(te);
while (te.next() != null) {
FacetLabel cp = new FacetLabel(FacetsConfig.stringToPath(te.term().utf8ToString()));
final int ordinal = addCategory(cp);
docs = te.docs(null, docs, DocsEnum.FLAG_NONE);
ordinalMap.addMapping(docs.nextDoc() + base, ordinal);
}
base += ar.maxDoc(); // no deletions, so we're ok
}
ordinalMap.addDone();
} finally {
r.close();
}
}
示例4: randomDocsEnum
import org.apache.lucene.index.AtomicReader; //导入方法依赖的package包/类
public DocsEnum randomDocsEnum(String field, BytesRef term, List<AtomicReaderContext> readers, Bits bits) throws IOException {
if (random().nextInt(10) == 0) {
return null;
}
AtomicReader indexReader = readers.get(random().nextInt(readers.size())).reader();
Terms terms = indexReader.terms(field);
if (terms == null) {
return null;
}
TermsEnum iterator = terms.iterator(null);
if (iterator.seekExact(term)) {
return iterator.docs(bits, null, random().nextBoolean() ? DocsEnum.FLAG_FREQS : DocsEnum.FLAG_NONE);
}
return null;
}
示例5: createValue
import org.apache.lucene.index.AtomicReader; //导入方法依赖的package包/类
@Override
protected Accountable createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField /* ignored */)
throws IOException {
final int maxDoc = reader.maxDoc();
Terms terms = reader.terms(key.field);
final float acceptableOverheadRatio = ((Float) key.custom).floatValue();
final PagedBytes bytes = new PagedBytes(15);
int startTermsBPV;
final int termCountHardLimit;
if (maxDoc == Integer.MAX_VALUE) {
termCountHardLimit = Integer.MAX_VALUE;
} else {
termCountHardLimit = maxDoc+1;
}
// TODO: use Uninvert?
if (terms != null) {
// Try for coarse estimate for number of bits; this
// should be an underestimate most of the time, which
// is fine -- GrowableWriter will reallocate as needed
long numUniqueTerms = terms.size();
if (numUniqueTerms != -1L) {
if (numUniqueTerms > termCountHardLimit) {
// app is misusing the API (there is more than
// one term per doc); in this case we make best
// effort to load what we can (see LUCENE-2142)
numUniqueTerms = termCountHardLimit;
}
startTermsBPV = PackedInts.bitsRequired(numUniqueTerms);
} else {
startTermsBPV = 1;
}
} else {
startTermsBPV = 1;
}
PackedLongValues.Builder termOrdToBytesOffset = PackedLongValues.monotonicBuilder(PackedInts.COMPACT);
final GrowableWriter docToTermOrd = new GrowableWriter(startTermsBPV, maxDoc, acceptableOverheadRatio);
int termOrd = 0;
// TODO: use Uninvert?
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
DocsEnum docs = null;
while(true) {
final BytesRef term = termsEnum.next();
if (term == null) {
break;
}
if (termOrd >= termCountHardLimit) {
break;
}
termOrdToBytesOffset.add(bytes.copyUsingLengthPrefix(term));
docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
while (true) {
final int docID = docs.nextDoc();
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
break;
}
// Store 1+ ord into packed bits
docToTermOrd.set(docID, 1+termOrd);
}
termOrd++;
}
}
// maybe an int-only impl?
return new SortedDocValuesImpl(bytes.freeze(true), termOrdToBytesOffset.build(), docToTermOrd.getMutable(), termOrd);
}