本文整理汇总了Java中org.apache.lucene.search.DocIdSet.iterator方法的典型用法代码示例。如果您正苦于以下问题:Java DocIdSet.iterator方法的具体用法?Java DocIdSet.iterator怎么用?Java DocIdSet.iterator使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.search.DocIdSet
的用法示例。
在下文中一共展示了DocIdSet.iterator方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: scorer
import org.apache.lucene.search.DocIdSet; //导入方法依赖的package包/类
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
DocIdSet childrenDocSet = childrenFilter.getDocIdSet(context, null);
// we forcefully apply live docs here so that deleted children don't give matching parents
childrenDocSet = BitsFilteredDocIdSet.wrap(childrenDocSet, context.reader().getLiveDocs());
if (Lucene.isEmpty(childrenDocSet)) {
return null;
}
final DocIdSetIterator childIterator = childrenDocSet.iterator();
if (childIterator == null) {
return null;
}
SortedDocValues bytesValues = globalIfd.load(context).getOrdinalsValues(parentType);
if (bytesValues == null) {
return null;
}
return new ChildScorer(this, parentIdxs, scores, childIterator, bytesValues);
}
示例2: scorer
import org.apache.lucene.search.DocIdSet; //导入方法依赖的package包/类
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
DocIdSet childrenDocIdSet = childrenFilter.getDocIdSet(context, null);
if (Lucene.isEmpty(childrenDocIdSet)) {
return null;
}
SortedDocValues globalValues = globalIfd.load(context).getOrdinalsValues(parentType);
if (globalValues != null) {
// we forcefully apply live docs here so that deleted children don't give matching parents
childrenDocIdSet = BitsFilteredDocIdSet.wrap(childrenDocIdSet, context.reader().getLiveDocs());
DocIdSetIterator innerIterator = childrenDocIdSet.iterator();
if (innerIterator != null) {
ChildrenDocIdIterator childrenDocIdIterator = new ChildrenDocIdIterator(
innerIterator, parentOrds, globalValues
);
return ConstantScorer.create(childrenDocIdIterator, this, queryWeight);
}
}
return null;
}
示例3: docIdSetToCache
import org.apache.lucene.search.DocIdSet; //导入方法依赖的package包/类
@Override
protected DocIdSet docIdSetToCache(DocIdSet docIdSet, AtomicReader reader)
throws IOException {
if (docIdSet == null) {
return EMPTY;
} else if (docIdSet instanceof FixedBitSet) {
// this is different from CachingWrapperFilter: even when the DocIdSet is
// cacheable, we convert it to a FixedBitSet since we require all the
// cached filters to be FixedBitSets
return docIdSet;
} else {
final DocIdSetIterator it = docIdSet.iterator();
if (it == null) {
return EMPTY;
} else {
final FixedBitSet copy = new FixedBitSet(reader.maxDoc());
copy.or(it);
return copy;
}
}
}
示例4: scorer
import org.apache.lucene.search.DocIdSet; //导入方法依赖的package包/类
@Override
public Scorer scorer(AtomicReaderContext context, Bits acceptDocs) throws IOException {
if (filter == null) {
boolean debug = rb != null && rb.isDebug();
long start = System.currentTimeMillis();
resultSet = getDocSet();
long delta = System.currentTimeMillis()-start;
if (debug) {
System.out.println("Graph Traverse took : " + delta + " ms.");
}
filter = resultSet.getTopFilter();
}
// TODO: understand this comment.
// Although this set only includes live docs, other filters can be pushed down to queries.
DocIdSet readerSet = filter.getDocIdSet(context, acceptDocs);
// create a scrorer on the result set, if results from right query are empty, use empty iterator.
return new GraphScorer(this, readerSet == null ? DocIdSetIterator.empty() : readerSet.iterator(), getBoost());
}
示例5: docIdSetToCache
import org.apache.lucene.search.DocIdSet; //导入方法依赖的package包/类
@Override
protected DocIdSet docIdSetToCache(DocIdSet docIdSet, AtomicReader reader)
throws IOException {
if (docIdSet == null) {
return EMPTY_DOCIDSET;
} else if (docIdSet instanceof FixedBitSet) {
// this is different from CachingWrapperFilter: even when the DocIdSet is
// cacheable, we convert it to a FixedBitSet since we require all the
// cached filters to be FixedBitSets
return docIdSet;
} else {
final DocIdSetIterator it = docIdSet.iterator();
if (it == null) {
return EMPTY_DOCIDSET;
} else {
final FixedBitSet copy = new FixedBitSet(reader.maxDoc());
copy.or(it);
return copy;
}
}
}
示例6: createWeight
import org.apache.lucene.search.DocIdSet; //导入方法依赖的package包/类
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
return new ConstantScoreWeight(this) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
final DocIdSet disi = build(context.reader());
final DocIdSetIterator leafIt = disi.iterator();
return new ConstantScoreScorer(this, score(), leafIt);
}
};
}
示例7: applyQueryDeletes
import org.apache.lucene.search.DocIdSet; //导入方法依赖的package包/类
private static long applyQueryDeletes(Iterable<QueryAndLimit> queriesIter, ReadersAndUpdates rld, final SegmentReader reader) throws IOException {
long delCount = 0;
final AtomicReaderContext readerContext = reader.getContext();
boolean any = false;
for (QueryAndLimit ent : queriesIter) {
Query query = ent.query;
int limit = ent.limit;
final DocIdSet docs = new QueryWrapperFilter(query).getDocIdSet(readerContext, reader.getLiveDocs());
if (docs != null) {
final DocIdSetIterator it = docs.iterator();
if (it != null) {
while(true) {
int doc = it.nextDoc();
if (doc >= limit) {
break;
}
if (!any) {
rld.initWritableLiveDocs();
any = true;
}
if (rld.delete(doc)) {
delCount++;
}
}
}
}
}
return delCount;
}
示例8: scorer
import org.apache.lucene.search.DocIdSet; //导入方法依赖的package包/类
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
if (remaining == 0) {
return null;
}
if (shortCircuitFilter != null) {
DocIdSet docIdSet = shortCircuitFilter.getDocIdSet(context, null);
if (!Lucene.isEmpty(docIdSet)) {
DocIdSetIterator iterator = docIdSet.iterator();
if (iterator != null) {
return ConstantScorer.create(iterator, this, queryWeight);
}
}
return null;
}
DocIdSet parentDocIdSet = this.parentFilter.getDocIdSet(context, null);
if (!Lucene.isEmpty(parentDocIdSet)) {
// We can't be sure of the fact that liveDocs have been applied, so we apply it here. The "remaining"
// count down (short circuit) logic will then work as expected.
parentDocIdSet = BitsFilteredDocIdSet.wrap(parentDocIdSet, context.reader().getLiveDocs());
DocIdSetIterator innerIterator = parentDocIdSet.iterator();
if (innerIterator != null) {
LongBitSet parentOrds = collector.parentOrds;
SortedDocValues globalValues = globalIfd.load(context).getOrdinalsValues(parentType);
if (globalValues != null) {
DocIdSetIterator parentIdIterator = new ParentOrdIterator(innerIterator, parentOrds, globalValues, this);
return ConstantScorer.create(parentIdIterator, this, queryWeight);
}
}
}
return null;
}
示例9: getDISI
import org.apache.lucene.search.DocIdSet; //导入方法依赖的package包/类
private DocIdSetIterator getDISI(Filter filter, AtomicReaderContext context)
throws IOException {
// we dont pass acceptDocs, we will filter at the end using an additional filter
DocIdSet docIdSet = filter.getDocIdSet(context, null);
if (docIdSet == null) {
return DocIdSetIterator.empty();
} else {
DocIdSetIterator iter = docIdSet.iterator();
if (iter == null) {
return DocIdSetIterator.empty();
} else {
return iter;
}
}
}
示例10: tstFilterCard
import org.apache.lucene.search.DocIdSet; //导入方法依赖的package包/类
private void tstFilterCard(String mes, int expected, Filter filt)
throws Exception {
final DocIdSet docIdSet = filt.getDocIdSet(reader.getContext(), reader.getLiveDocs());
int actual = 0;
if (docIdSet != null) {
DocIdSetIterator disi = docIdSet.iterator();
while (disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
actual++;
}
}
assertEquals(mes, expected, actual);
}
示例11: testMissingTermAndField
import org.apache.lucene.search.DocIdSet; //导入方法依赖的package包/类
public void testMissingTermAndField() throws Exception {
String fieldName = "field1";
Directory rd = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), rd);
Document doc = new Document();
doc.add(newStringField(fieldName, "value1", Field.Store.NO));
w.addDocument(doc);
IndexReader reader = SlowCompositeReaderWrapper.wrap(w.getReader());
assertTrue(reader.getContext() instanceof AtomicReaderContext);
AtomicReaderContext context = (AtomicReaderContext) reader.getContext();
w.close();
DocIdSet idSet = termFilter(fieldName, "value1").getDocIdSet(context, context.reader().getLiveDocs());
assertNotNull("must not be null", idSet);
DocIdSetIterator iter = idSet.iterator();
assertEquals(iter.nextDoc(), 0);
assertEquals(iter.nextDoc(), DocIdSetIterator.NO_MORE_DOCS);
idSet = termFilter(fieldName, "value2").getDocIdSet(context, context.reader().getLiveDocs());
assertNull("must be null", idSet);
idSet = termFilter("field2", "value1").getDocIdSet(context, context.reader().getLiveDocs());
assertNull("must be null", idSet);
reader.close();
rd.close();
}
示例12: scorer
import org.apache.lucene.search.DocIdSet; //导入方法依赖的package包/类
@Override
public Scorer scorer(AtomicReaderContext context, Bits acceptDocs) throws IOException {
if (filter == null) {
boolean debug = rb != null && rb.isDebug();
long start = debug ? System.currentTimeMillis() : 0;
resultSet = getDocSet();
long end = debug ? System.currentTimeMillis() : 0;
if (debug) {
SimpleOrderedMap<Object> dbg = new SimpleOrderedMap<>();
dbg.add("time", (end-start));
dbg.add("fromSetSize", fromSetSize); // the input
dbg.add("toSetSize", resultSet.size()); // the output
dbg.add("fromTermCount", fromTermCount);
dbg.add("fromTermTotalDf", fromTermTotalDf);
dbg.add("fromTermDirectCount", fromTermDirectCount);
dbg.add("fromTermHits", fromTermHits);
dbg.add("fromTermHitsTotalDf", fromTermHitsTotalDf);
dbg.add("toTermHits", toTermHits);
dbg.add("toTermHitsTotalDf", toTermHitsTotalDf);
dbg.add("toTermDirectCount", toTermDirectCount);
dbg.add("smallSetsDeferred", smallSetsDeferred);
dbg.add("toSetDocsAdded", resultListDocs);
// TODO: perhaps synchronize addDebug in the future...
rb.addDebug(dbg, "join", JoinQuery.this.toString());
}
filter = resultSet.getTopFilter();
}
// Although this set only includes live docs, other filters can be pushed down to queries.
DocIdSet readerSet = filter.getDocIdSet(context, acceptDocs);
return new JoinScorer(this, readerSet == null ? DocIdSetIterator.empty() : readerSet.iterator(), getBoost());
}
示例13: doTestIteratorEqual
import org.apache.lucene.search.DocIdSet; //导入方法依赖的package包/类
public void doTestIteratorEqual(DocIdSet a, DocIdSet b) throws IOException {
DocIdSetIterator ia = a.iterator();
DocIdSetIterator ib = b.iterator();
// test for next() equivalence
for(;;) {
int da = ia.nextDoc();
int db = ib.nextDoc();
assertEquals(da, db);
assertEquals(ia.docID(), ib.docID());
if (da==DocIdSetIterator.NO_MORE_DOCS) break;
}
for (int i=0; i<10; i++) {
// test random skipTo() and next()
ia = a.iterator();
ib = b.iterator();
int doc = -1;
for (;;) {
int da,db;
if (rand.nextBoolean()) {
da = ia.nextDoc();
db = ib.nextDoc();
} else {
int target = doc + rand.nextInt(10) + 1; // keep in mind future edge cases like probing (increase if necessary)
da = ia.advance(target);
db = ib.advance(target);
}
assertEquals(da, db);
assertEquals(ia.docID(), ib.docID());
if (da==DocIdSetIterator.NO_MORE_DOCS) break;
doc = da;
}
}
}
示例14: processFacetCategory
import org.apache.lucene.search.DocIdSet; //导入方法依赖的package包/类
/**
* Determines the number of hits for each facet across the main query.
*
* @param facets
* @param reader
* @param mainQueryBits
* @throws ParseException
* @throws IOException
*/
private void processFacetCategory(Collection<FacetResult> facets,
IndexReader reader,
OpenBitSetDISI mainQueryBitSet,
IndexSearcher searcher)
throws ParseException, IOException
{
for(FacetResult f : facets )
{
long count = 0;
String searchString = f.getFacetName();
if( !searchString.trim().equals(""))
{
QueryParser subQueryParser = new QueryParser(Version.LUCENE_35, f.getField(), keywordAnalyzer);
searchString = "\"" + searchString +"\"";
Query subQuery = subQueryParser.parse(searchString);
QueryWrapperFilter subQueryWrapper = new QueryWrapperFilter(subQuery);
log.debug("Fixed query in process facet catagory 2 = " + subQuery + " subQueryWrapper = " + subQueryWrapper);
DocIdSet subQueryBits = subQueryWrapper.getDocIdSet(reader);
OpenBitSetDISI subQuerybitSet = new OpenBitSetDISI(subQueryBits.iterator(), reader.maxDoc());
count = getFacetHitCount(mainQueryBitSet, subQuerybitSet);
log.debug("count = " + count);
}
else
{
log.error("bad search string " + searchString);
}
f.setHits(count);
}
}
示例15: processFacetCategory
import org.apache.lucene.search.DocIdSet; //导入方法依赖的package包/类
/**
* Determines the number of hits for each facet across the main query.
*
* @param facets
* @param reader
* @param mainQueryBits
* @throws ParseException
* @throws IOException
*/
private void processFacetCategory(Collection<FacetResult> facets,
IndexReader reader,
DocIdSet mainQueryBits)
throws ParseException, IOException
{
for(FacetResult f : facets )
{
QueryParser subQueryParser = new QueryParser(Version.LUCENE_35, f.getField(), analyzer);
subQueryParser.setDefaultOperator(QueryParser.AND_OPERATOR);
String fixedQuery = SearchHelper.prepareFacetSearchString(f.getFacetName(), false);
fixedQuery = "\"" + fixedQuery + "\"";
Query subQuery = subQueryParser.parse(fixedQuery);
if(log.isDebugEnabled())
{
log.debug("Fiexed query in process Facet Category = " + fixedQuery);
}
QueryWrapperFilter subQueryWrapper = new QueryWrapperFilter(subQuery);
DocIdSet subQueryBits = subQueryWrapper.getDocIdSet(reader);
OpenBitSetDISI mainQuerybitSet = new OpenBitSetDISI(mainQueryBits.iterator(), maxNumberOfMainQueryHits);
OpenBitSetDISI subQuerybitSet = new OpenBitSetDISI(subQueryBits.iterator(), maxNumberOfMainQueryHits);
long count = getFacetHitCount(mainQuerybitSet, subQuerybitSet);
f.setHits(count);
}
}