本文整理汇总了Java中org.apache.lucene.index.AtomicReaderContext.reader方法的典型用法代码示例。如果您正苦于以下问题:Java AtomicReaderContext.reader方法的具体用法?Java AtomicReaderContext.reader怎么用?Java AtomicReaderContext.reader使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.index.AtomicReaderContext
的用法示例。
在下文中一共展示了AtomicReaderContext.reader方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getPosEnum
import org.apache.lucene.index.AtomicReaderContext; //导入方法依赖的package包/类
protected DocsAndPositionsEnum getPosEnum(IndexReader r, int docid, Term t)
throws IOException {
List<AtomicReaderContext> leaves = r.getContext().leaves();
for (AtomicReaderContext context : leaves) {
AtomicReader reader = context.reader();
DocsAndPositionsEnum termPositions = reader.termPositionsEnum(t);
int doc;
while ((doc = termPositions.nextDoc()) != DocsEnum.NO_MORE_DOCS
&& doc != docid) {
}
if (doc != DocsEnum.NO_MORE_DOCS) {
return termPositions;
}
}
assertFalse("Expected positions enum for doc " + docid, true);
return null; // will never come here
}
示例2: testReuseDocsEnumNoReuse
import org.apache.lucene.index.AtomicReaderContext; //导入方法依赖的package包/类
public void testReuseDocsEnumNoReuse() throws IOException {
Directory dir = newDirectory();
Codec cp = TestUtil.alwaysPostingsFormat(new Lucene40RWPostingsFormat());
RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
newIndexWriterConfig(new MockAnalyzer(random())).setCodec(cp));
int numdocs = atLeast(20);
createRandomIndex(numdocs, writer, random());
writer.commit();
DirectoryReader open = DirectoryReader.open(dir);
for (AtomicReaderContext ctx : open.leaves()) {
AtomicReader indexReader = ctx.reader();
Terms terms = indexReader.terms("body");
TermsEnum iterator = terms.iterator(null);
IdentityHashMap<DocsEnum, Boolean> enums = new IdentityHashMap<>();
MatchNoBits bits = new Bits.MatchNoBits(indexReader.maxDoc());
while ((iterator.next()) != null) {
DocsEnum docs = iterator.docs(random().nextBoolean() ? bits : new Bits.MatchNoBits(indexReader.maxDoc()), null, random().nextBoolean() ? DocsEnum.FLAG_FREQS : DocsEnum.FLAG_NONE);
enums.put(docs, true);
}
assertEquals(terms.size(), enums.size());
}
IOUtils.close(writer, open, dir);
}
示例3: SpatialScorer
import org.apache.lucene.index.AtomicReaderContext; //导入方法依赖的package包/类
public SpatialScorer(AtomicReaderContext readerContext, Bits acceptDocs, SpatialWeight w, float qWeight) throws IOException {
super(w);
this.weight = w;
this.qWeight = qWeight;
this.reader = readerContext.reader();
this.maxDoc = reader.maxDoc();
this.acceptDocs = acceptDocs;
latVals = latSource.getValues(weight.latContext, readerContext);
lonVals = lonSource.getValues(weight.lonContext, readerContext);
this.lonMin = SpatialDistanceQuery.this.lonMin;
this.lonMax = SpatialDistanceQuery.this.lonMax;
this.lon2Min = SpatialDistanceQuery.this.lon2Min;
this.lon2Max = SpatialDistanceQuery.this.lon2Max;
this.latMin = SpatialDistanceQuery.this.latMin;
this.latMax = SpatialDistanceQuery.this.latMax;
this.lon2 = SpatialDistanceQuery.this.lon2;
this.calcDist = SpatialDistanceQuery.this.calcDist;
this.latCenterRad = SpatialDistanceQuery.this.latCenter * DistanceUtils.DEGREES_TO_RADIANS;
this.lonCenterRad = SpatialDistanceQuery.this.lonCenter * DistanceUtils.DEGREES_TO_RADIANS;
this.latCenterRad_cos = this.calcDist ? Math.cos(latCenterRad) : 0;
this.dist = SpatialDistanceQuery.this.dist;
this.planetRadius = SpatialDistanceQuery.this.planetRadius;
}
示例4: assertFreqPosAndPayload
import org.apache.lucene.index.AtomicReaderContext; //导入方法依赖的package包/类
private void assertFreqPosAndPayload(Term t, int[] expFreq, int[][] expPos,
int[][][] expPay, int numAtomicReaders) throws IOException {
List<AtomicReaderContext> leaves = r.getContext().leaves();
for (AtomicReaderContext context : leaves) {
AtomicReader reader = context.reader();
DocsAndPositionsEnum termPositions = reader.termPositionsEnum(t);
int docIndex = 0;
while (termPositions.nextDoc() != DocsEnum.NO_MORE_DOCS) {
assertEquals("Incorrect doc " + docIndex + " freq",
expFreq[docIndex], termPositions.freq());
assertEquals("Incorrect doc " + docIndex + " pos length",
expPos[docIndex].length, termPositions.freq());
int posIndex = 0;
while (posIndex < termPositions.freq()) {
int position = termPositions.nextPosition();
assertEquals("Incorrect pos " + posIndex + " in doc "
+ docIndex, expPos[docIndex][posIndex], position);
BytesRef payload = termPositions.getPayload();
int[] expPayload = expPay[docIndex][posIndex];
String[] payloadDesc = new String[] { "left", "right",
"depth", "parent" };
for (int j = 0; j < 4; j++) {
assertEquals(
"Incorrect " + payloadDesc[j] + " payload",
expPayload[j],
payload.bytes[payload.offset + j]);
}
posIndex++;
}
docIndex++;
}
numAtomicReaders++;
}
assertEquals("Expected one atomic reader", 1, numAtomicReaders);
}
示例5: initialResult
import org.apache.lucene.index.AtomicReaderContext; //导入方法依赖的package包/类
private FixedBitSet initialResult(AtomicReaderContext context, int logic, int[] index)
throws IOException {
AtomicReader reader = context.reader();
FixedBitSet result = new FixedBitSet(reader.maxDoc());
if (logic == AND) {
result.or(getDISI(chain[index[0]], context));
++index[0];
} else if (logic == ANDNOT) {
result.or(getDISI(chain[index[0]], context));
result.flip(0, reader.maxDoc()); // NOTE: may set bits for deleted docs.
++index[0];
}
return result;
}
示例6: AllScorer
import org.apache.lucene.index.AtomicReaderContext; //导入方法依赖的package包/类
public AllScorer(AtomicReaderContext context, Bits acceptDocs, FunctionWeight w, float qWeight) throws IOException {
super(w);
this.weight = w;
this.qWeight = qWeight;
this.reader = context.reader();
this.maxDoc = reader.maxDoc();
this.acceptDocs = acceptDocs;
vals = func.getValues(weight.context, context);
}
示例7: BaseTermsEnumTraverser
import org.apache.lucene.index.AtomicReaderContext; //导入方法依赖的package包/类
public BaseTermsEnumTraverser(AtomicReaderContext context, Bits acceptDocs) throws IOException {
this.context = context;
AtomicReader reader = context.reader();
this.acceptDocs = acceptDocs;
this.maxDoc = reader.maxDoc();
Terms terms = reader.terms(fieldName);
if (terms != null)
this.termsEnum = terms.iterator(null);
}
示例8: addTaxonomy
import org.apache.lucene.index.AtomicReaderContext; //导入方法依赖的package包/类
/**
* Takes the categories from the given taxonomy directory, and adds the
* missing ones to this taxonomy. Additionally, it fills the given
* {@link OrdinalMap} with a mapping from the original ordinal to the new
* ordinal.
*/
public void addTaxonomy(Directory taxoDir, OrdinalMap map) throws IOException {
ensureOpen();
DirectoryReader r = DirectoryReader.open(taxoDir);
try {
final int size = r.numDocs();
final OrdinalMap ordinalMap = map;
ordinalMap.setSize(size);
int base = 0;
TermsEnum te = null;
DocsEnum docs = null;
for (final AtomicReaderContext ctx : r.leaves()) {
final AtomicReader ar = ctx.reader();
final Terms terms = ar.terms(Consts.FULL);
te = terms.iterator(te);
while (te.next() != null) {
FacetLabel cp = new FacetLabel(FacetsConfig.stringToPath(te.term().utf8ToString()));
final int ordinal = addCategory(cp);
docs = te.docs(null, docs, DocsEnum.FLAG_NONE);
ordinalMap.addMapping(docs.nextDoc() + base, ordinal);
}
base += ar.maxDoc(); // no deletions, so we're ok
}
ordinalMap.addDone();
} finally {
r.close();
}
}
示例9: assertOrdinalsExist
import org.apache.lucene.index.AtomicReaderContext; //导入方法依赖的package包/类
private void assertOrdinalsExist(String field, IndexReader ir) throws IOException {
for (AtomicReaderContext context : ir.leaves()) {
AtomicReader r = context.reader();
if (r.getBinaryDocValues(field) != null) {
return; // not all segments must have this DocValues
}
}
fail("no ordinals found for " + field);
}
示例10: verify
import org.apache.lucene.index.AtomicReaderContext; //导入方法依赖的package包/类
private void verify(Directory dir) throws Exception {
DirectoryReader ir = DirectoryReader.open(dir);
for (AtomicReaderContext leaf : ir.leaves()) {
AtomicReader leafReader = leaf.reader();
assertTerms(leafReader.terms("field1docs"), leafReader.terms("field2freqs"), true);
assertTerms(leafReader.terms("field3positions"), leafReader.terms("field4offsets"), true);
assertTerms(leafReader.terms("field4offsets"), leafReader.terms("field5payloadsFixed"), true);
assertTerms(leafReader.terms("field5payloadsFixed"), leafReader.terms("field6payloadsVariable"), true);
assertTerms(leafReader.terms("field6payloadsVariable"), leafReader.terms("field7payloadsFixedOffsets"), true);
assertTerms(leafReader.terms("field7payloadsFixedOffsets"), leafReader.terms("field8payloadsVariableOffsets"), true);
}
ir.close();
}
示例11: scorer
import org.apache.lucene.index.AtomicReaderContext; //导入方法依赖的package包/类
@Override
public Scorer scorer(AtomicReaderContext context, Bits acceptDocs) throws IOException {
return new MatchAllScorer(context.reader(), acceptDocs, this, queryWeight);
}
示例12: LuceneIndex
import org.apache.lucene.index.AtomicReaderContext; //导入方法依赖的package包/类
public LuceneIndex(File path, boolean readOnly, Analyzer analyzer, Index... indices) throws Exception {
// fieldsToSkip.add(Variable.CONTENT);
fieldsToSkip.add(Variable.BYTES);
this.readOnly = readOnly;
if (analyzer != null) {
this.analyzer = analyzer;
} else {
this.analyzer = new StandardAnalyzer(LUCENEVERSION);
}
if (indices.length == 1) {
getAlgorithmMap().put("Index0", (Algorithm) indices[0]);
setLabel(((Algorithm) indices[0]).getLabel());
} else if (indices.length > 1) {
MultiIndex multiIndex = new MultiIndex(indices);
getAlgorithmMap().put("Index0", multiIndex);
setLabel(multiIndex.getLabel());
}
if (path == null) {
path = File.createTempFile("lucene", "");
path.delete();
path.mkdir();
path.deleteOnExit();
readOnly = false;
}
fields.add(Sample.ID);
this.path = path;
directory = FSDirectory.open(path);
if (DirectoryReader.indexExists(directory)) {
if (!readOnly) {
if (IndexWriter.isLocked(directory)) {
IndexWriter.unlock(directory);
}
indexWriter = new IndexWriter(directory, new IndexWriterConfig(LUCENEVERSION, analyzer));
}
for (AtomicReaderContext rc : getIndexSearcher().getIndexReader().leaves()) {
AtomicReader ar = rc.reader();
FieldInfos fis = ar.getFieldInfos();
for (FieldInfo fi : fis) {
fields.add(fi.name);
}
}
} else if (!readOnly) {
indexWriter = new IndexWriter(directory,
new IndexWriterConfig(LUCENEVERSION, analyzer).setOpenMode(OpenMode.CREATE));
}
setSamples(new LuceneSampleMap(this));
}
示例13: split
import org.apache.lucene.index.AtomicReaderContext; //导入方法依赖的package包/类
FixedBitSet[] split(AtomicReaderContext readerContext) throws IOException {
AtomicReader reader = readerContext.reader();
FixedBitSet[] docSets = new FixedBitSet[numPieces];
for (int i=0; i<docSets.length; i++) {
docSets[i] = new FixedBitSet(reader.maxDoc());
}
Bits liveDocs = reader.getLiveDocs();
Fields fields = reader.fields();
Terms terms = fields==null ? null : fields.terms(field.getName());
TermsEnum termsEnum = terms==null ? null : terms.iterator(null);
if (termsEnum == null) return docSets;
BytesRef term = null;
DocsEnum docsEnum = null;
CharsRef idRef = new CharsRef();
for (;;) {
term = termsEnum.next();
if (term == null) break;
// figure out the hash for the term
// FUTURE: if conversion to strings costs too much, we could
// specialize and use the hash function that can work over bytes.
field.getType().indexedToReadable(term, idRef);
String idString = idRef.toString();
if (splitKey != null) {
// todo have composite routers support these kind of things instead
String part1 = getRouteKey(idString);
if (part1 == null)
continue;
if (!splitKey.equals(part1)) {
continue;
}
}
int hash = 0;
if (hashRouter != null) {
hash = hashRouter.sliceHash(idString, null, null, null);
}
docsEnum = termsEnum.docs(liveDocs, docsEnum, DocsEnum.FLAG_NONE);
for (;;) {
int doc = docsEnum.nextDoc();
if (doc == DocIdSetIterator.NO_MORE_DOCS) break;
if (ranges == null) {
docSets[currPartition].set(doc);
currPartition = (currPartition + 1) % numPieces;
} else {
for (int i=0; i<rangesArr.length; i++) { // inner-loop: use array here for extra speed.
if (rangesArr[i].includes(hash)) {
docSets[i].set(doc);
}
}
}
}
}
return docSets;
}
示例14: LiveDocsReader
import org.apache.lucene.index.AtomicReaderContext; //导入方法依赖的package包/类
public LiveDocsReader(AtomicReaderContext context, FixedBitSet liveDocs) throws IOException {
super(context.reader());
this.liveDocs = liveDocs;
this.numDocs = liveDocs.cardinality();
}
示例15: getDocSet
import org.apache.lucene.index.AtomicReaderContext; //导入方法依赖的package包/类
/**
* Returns the set of document ids matching all queries.
* This method is cache-aware and attempts to retrieve the answer from the cache if possible.
* If the answer was not cached, it may have been inserted into the cache as a result of this call.
* This method can handle negative queries.
* <p>
* The DocSet returned should <b>not</b> be modified.
*/
public DocSet getDocSet(List<Query> queries) throws IOException {
if(queries != null) {
for(Query q : queries) {
if(q instanceof ScoreFilter) {
return getDocSetScore(queries);
}
}
}
ProcessedFilter pf = getProcessedFilter(null, queries);
if (pf.answer != null) return pf.answer;
DocSetCollector setCollector = new DocSetCollector(maxDoc()>>6, maxDoc());
Collector collector = setCollector;
if (pf.postFilter != null) {
pf.postFilter.setLastDelegate(collector);
collector = pf.postFilter;
}
for (final AtomicReaderContext leaf : leafContexts) {
final AtomicReader reader = leaf.reader();
final Bits liveDocs = reader.getLiveDocs(); // TODO: the filter may already only have liveDocs...
DocIdSet idSet = null;
if (pf.filter != null) {
idSet = pf.filter.getDocIdSet(leaf, liveDocs);
if (idSet == null) continue;
}
DocIdSetIterator idIter = null;
if (idSet != null) {
idIter = idSet.iterator();
if (idIter == null) continue;
}
collector.setNextReader(leaf);
int max = reader.maxDoc();
if (idIter == null) {
for (int docid = 0; docid<max; docid++) {
if (liveDocs != null && !liveDocs.get(docid)) continue;
collector.collect(docid);
}
} else {
for (int docid = -1; (docid = idIter.advance(docid+1)) < max; ) {
collector.collect(docid);
}
}
}
if(collector instanceof DelegatingCollector) {
((DelegatingCollector) collector).finish();
}
return setCollector.getDocSet();
}