本文整理汇总了Java中org.apache.lucene.search.Weight.scorer方法的典型用法代码示例。如果您正苦于以下问题:Java Weight.scorer方法的具体用法?Java Weight.scorer怎么用?Java Weight.scorer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.search.Weight
的用法示例。
在下文中一共展示了Weight.scorer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: exists
import org.apache.lucene.search.Weight; //导入方法依赖的package包/类
/**
* Check whether there is one or more documents matching the provided query.
*/
public static boolean exists(IndexSearcher searcher, Query query) throws IOException {
final Weight weight = searcher.createNormalizedWeight(query, false);
// the scorer API should be more efficient at stopping after the first
// match than the bulk scorer API
for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
final Scorer scorer = weight.scorer(context);
if (scorer == null) {
continue;
}
final Bits liveDocs = context.reader().getLiveDocs();
final DocIdSetIterator iterator = scorer.iterator();
for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
if (liveDocs == null || liveDocs.get(doc)) {
return true;
}
}
}
return false;
}
示例2: addMatchedQueries
import org.apache.lucene.search.Weight; //导入方法依赖的package包/类
private void addMatchedQueries(HitContext hitContext, ImmutableMap<String, Query> namedQueries, List<String> matchedQueries) throws IOException {
for (Map.Entry<String, Query> entry : namedQueries.entrySet()) {
String name = entry.getKey();
Query filter = entry.getValue();
final Weight weight = hitContext.topLevelSearcher().createNormalizedWeight(filter, false);
final Scorer scorer = weight.scorer(hitContext.readerContext());
if (scorer == null) {
continue;
}
final TwoPhaseIterator twoPhase = scorer.twoPhaseIterator();
if (twoPhase == null) {
if (scorer.iterator().advance(hitContext.docId()) == hitContext.docId()) {
matchedQueries.add(name);
}
} else {
if (twoPhase.approximation().advance(hitContext.docId()) == hitContext.docId() && twoPhase.matches()) {
matchedQueries.add(name);
}
}
}
}
示例3: findNestedObjectMapper
import org.apache.lucene.search.Weight; //导入方法依赖的package包/类
/**
* Returns the best nested {@link ObjectMapper} instances that is in the scope of the specified nested docId.
*/
public ObjectMapper findNestedObjectMapper(int nestedDocId, SearchContext sc, LeafReaderContext context) throws IOException {
ObjectMapper nestedObjectMapper = null;
for (ObjectMapper objectMapper : objectMappers().values()) {
if (!objectMapper.nested().isNested()) {
continue;
}
Query filter = objectMapper.nestedTypeFilter();
if (filter == null) {
continue;
}
// We can pass down 'null' as acceptedDocs, because nestedDocId is a doc to be fetched and
// therefor is guaranteed to be a live doc.
final Weight nestedWeight = filter.createWeight(sc.searcher(), false);
Scorer scorer = nestedWeight.scorer(context);
if (scorer == null) {
continue;
}
if (scorer.iterator().advance(nestedDocId) == nestedDocId) {
if (nestedObjectMapper == null) {
nestedObjectMapper = objectMapper;
} else {
if (nestedObjectMapper.fullPath().length() < objectMapper.fullPath().length()) {
nestedObjectMapper = objectMapper;
}
}
}
}
return nestedObjectMapper;
}
示例4: iterator
import org.apache.lucene.search.Weight; //导入方法依赖的package包/类
@Override
public DocIdSetIterator iterator() throws IOException {
List<DocIdSetIterator> iterators = new ArrayList<>(weights.size()+1);
if (docIdSet != null) {
DocIdSetIterator iter = docIdSet.iterator();
if (iter == null) return null;
iterators.add(iter);
}
for (Weight w : weights) {
Scorer scorer = w.scorer(context, context.reader().getLiveDocs());
if (scorer == null) return null;
iterators.add(scorer);
}
if (iterators.size()==0) return null;
if (iterators.size()==1) return iterators.get(0);
if (iterators.size()==2) return new DualFilterIterator(iterators.get(0), iterators.get(1));
return new FilterIterator(iterators.toArray(new DocIdSetIterator[iterators.size()]));
}
示例5: iterator
import org.apache.lucene.search.Weight; //导入方法依赖的package包/类
@Override
public DocIdSetIterator iterator() throws IOException {
List<DocIdSetIterator> iterators = new ArrayList<DocIdSetIterator>(weights.size()+1);
if (docIdSet != null) {
DocIdSetIterator iter = docIdSet.iterator();
if (iter == null) return null;
iterators.add(iter);
}
for (Weight w : weights) {
Scorer scorer = w.scorer(context, true, false, context.reader().getLiveDocs());
if (scorer == null) return null;
iterators.add(scorer);
}
if (iterators.size()==0) return null;
if (iterators.size()==1) return iterators.get(0);
if (iterators.size()==2) return new DualFilterIterator(iterators.get(0), iterators.get(1));
return new FilterIterator(iterators.toArray(new DocIdSetIterator[iterators.size()]));
}
示例6: iterator
import org.apache.lucene.search.Weight; //导入方法依赖的package包/类
@Override
public DocIdSetIterator iterator() throws IOException {
List<DocIdSetIterator> iterators = new ArrayList<DocIdSetIterator>(weights.size() + 1);
if(docIdSet != null) {
DocIdSetIterator iter = docIdSet.iterator();
if(iter == null)
return null;
iterators.add(iter);
}
for(Weight w : weights) {
Scorer scorer = w.scorer(context, true, false, context.reader().getLiveDocs());
if(scorer == null)
return null;
iterators.add(scorer);
}
if(iterators.size() == 0)
return null;
if(iterators.size() == 1)
return iterators.get(0);
if(iterators.size() == 2)
return new DualFilterIterator(iterators.get(0), iterators.get(1));
return new FilterIterator(iterators.toArray(new DocIdSetIterator[iterators.size()]));
}
示例7: getLeafCollector
import org.apache.lucene.search.Weight; //导入方法依赖的package包/类
@Override
public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException {
IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(ctx);
IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
Weight weight = searcher.createNormalizedWeight(childFilter, false);
Scorer childDocsScorer = weight.scorer(ctx);
final BitSet parentDocs = parentFilter.getBitSet(ctx);
final DocIdSetIterator childDocs = childDocsScorer != null ? childDocsScorer.iterator() : null;
return new LeafBucketCollectorBase(sub, null) {
@Override
public void collect(int parentDoc, long bucket) throws IOException {
// if parentDoc is 0 then this means that this parent doesn't have child docs (b/c these appear always before the parent
// doc), so we can skip:
if (parentDoc == 0 || parentDocs == null || childDocs == null) {
return;
}
final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
int childDocId = childDocs.docID();
if (childDocId <= prevParentDoc) {
childDocId = childDocs.advance(prevParentDoc + 1);
}
for (; childDocId < parentDoc; childDocId = childDocs.nextDoc()) {
collectBucket(sub, childDocId, bucket);
}
}
};
}
示例8: innerDocs
import org.apache.lucene.search.Weight; //导入方法依赖的package包/类
/**
* Get a {@link DocIdSet} that matches the inner documents.
*/
public DocIdSetIterator innerDocs(LeafReaderContext ctx) throws IOException {
final IndexReaderContext topLevelCtx = ReaderUtil.getTopLevelContext(ctx);
IndexSearcher indexSearcher = new IndexSearcher(topLevelCtx);
Weight weight = indexSearcher.createNormalizedWeight(innerQuery, false);
Scorer s = weight.scorer(ctx);
return s == null ? null : s.iterator();
}
示例9: testSpanNearScorerSkipTo1
import org.apache.lucene.search.Weight; //导入方法依赖的package包/类
/**
* not a direct test of NearSpans, but a demonstration of how/when
* this causes problems
*/
public void testSpanNearScorerSkipTo1() throws Exception {
SpanNearQuery q = makeQuery();
Weight w = searcher.createNormalizedWeight(q);
IndexReaderContext topReaderContext = searcher.getTopReaderContext();
AtomicReaderContext leave = topReaderContext.leaves().get(0);
Scorer s = w.scorer(leave, leave.reader().getLiveDocs());
assertEquals(1, s.advance(1));
}
示例10: testSpanNearScorerSkipTo1
import org.apache.lucene.search.Weight; //导入方法依赖的package包/类
/**
* not a direct test of NearSpans, but a demonstration of how/when
* this causes problems
*/
public void testSpanNearScorerSkipTo1() throws Exception {
SpanNearQuery q = makeQuery();
Weight w = searcher.createNormalizedWeight(q);
IndexReaderContext topReaderContext = searcher.getTopReaderContext();
AtomicReaderContext leave = topReaderContext.leaves().get(0);
Scorer s = w.scorer(leave, true, false, leave.reader().getLiveDocs());
assertEquals(1, s.advance(1));
}
示例11: getInternalNestedIdentity
import org.apache.lucene.search.Weight; //导入方法依赖的package包/类
private SearchHit.NestedIdentity getInternalNestedIdentity(SearchContext context, int nestedSubDocId, LeafReaderContext subReaderContext, DocumentMapper documentMapper, ObjectMapper nestedObjectMapper) throws IOException {
int currentParent = nestedSubDocId;
ObjectMapper nestedParentObjectMapper;
ObjectMapper current = nestedObjectMapper;
String originalName = nestedObjectMapper.name();
SearchHit.NestedIdentity nestedIdentity = null;
do {
Query parentFilter;
nestedParentObjectMapper = documentMapper.findParentObjectMapper(current);
if (nestedParentObjectMapper != null) {
if (nestedParentObjectMapper.nested().isNested() == false) {
current = nestedParentObjectMapper;
continue;
}
parentFilter = nestedParentObjectMapper.nestedTypeFilter();
} else {
parentFilter = Queries.newNonNestedFilter();
}
Query childFilter = nestedObjectMapper.nestedTypeFilter();
if (childFilter == null) {
current = nestedParentObjectMapper;
continue;
}
final Weight childWeight = context.searcher().createNormalizedWeight(childFilter, false);
Scorer childScorer = childWeight.scorer(subReaderContext);
if (childScorer == null) {
current = nestedParentObjectMapper;
continue;
}
DocIdSetIterator childIter = childScorer.iterator();
BitSet parentBits = context.bitsetFilterCache().getBitSetProducer(parentFilter).getBitSet(subReaderContext);
int offset = 0;
int nextParent = parentBits.nextSetBit(currentParent);
for (int docId = childIter.advance(currentParent + 1); docId < nextParent && docId != DocIdSetIterator.NO_MORE_DOCS; docId = childIter.nextDoc()) {
offset++;
}
currentParent = nextParent;
current = nestedObjectMapper = nestedParentObjectMapper;
int currentPrefix = current == null ? 0 : current.name().length() + 1;
nestedIdentity = new SearchHit.NestedIdentity(originalName.substring(currentPrefix), offset, nestedIdentity);
if (current != null) {
originalName = current.name();
}
} while (current != null);
return nestedIdentity;
}
示例12: hitsExecute
import org.apache.lucene.search.Weight; //导入方法依赖的package包/类
@Override
public void hitsExecute(SearchContext context, SearchHit[] hits) {
if (hits.length == 0 ||
// in case the request has only suggest, parsed query is null
context.parsedQuery() == null) {
return;
}
hits = hits.clone(); // don't modify the incoming hits
Arrays.sort(hits, (a, b) -> Integer.compare(a.docId(), b.docId()));
@SuppressWarnings("unchecked")
List<String>[] matchedQueries = new List[hits.length];
for (int i = 0; i < matchedQueries.length; ++i) {
matchedQueries[i] = new ArrayList<>();
}
Map<String, Query> namedQueries = new HashMap<>(context.parsedQuery().namedFilters());
if (context.parsedPostFilter() != null) {
namedQueries.putAll(context.parsedPostFilter().namedFilters());
}
try {
for (Map.Entry<String, Query> entry : namedQueries.entrySet()) {
String name = entry.getKey();
Query query = entry.getValue();
int readerIndex = -1;
int docBase = -1;
Weight weight = context.searcher().createNormalizedWeight(query, false);
Bits matchingDocs = null;
final IndexReader indexReader = context.searcher().getIndexReader();
for (int i = 0; i < hits.length; ++i) {
SearchHit hit = hits[i];
int hitReaderIndex = ReaderUtil.subIndex(hit.docId(), indexReader.leaves());
if (readerIndex != hitReaderIndex) {
readerIndex = hitReaderIndex;
LeafReaderContext ctx = indexReader.leaves().get(readerIndex);
docBase = ctx.docBase;
// scorers can be costly to create, so reuse them across docs of the same segment
Scorer scorer = weight.scorer(ctx);
matchingDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), scorer);
}
if (matchingDocs.get(hit.docId() - docBase)) {
matchedQueries[i].add(name);
}
}
}
for (int i = 0; i < hits.length; ++i) {
hits[i].matchedQueries(matchedQueries[i].toArray(new String[matchedQueries[i].size()]));
}
} catch (IOException e) {
throw ExceptionsHelper.convertToElastic(e);
} finally {
context.clearReleasables(Lifetime.COLLECTION);
}
}
示例13: createWeight
import org.apache.lucene.search.Weight; //导入方法依赖的package包/类
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
final Weight childWeight = childFilter.createWeight(searcher, false);
return new ConstantScoreWeight(this) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
// Nested docs only reside in a single segment, so no need to evaluate all segments
if (!context.reader().getCoreCacheKey().equals(leafReader.getCoreCacheKey())) {
return null;
}
// If docId == 0 then we a parent doc doesn't have child docs, because child docs are stored
// before the parent doc and because parent doc is 0 we can safely assume that there are no child docs.
if (docId == 0) {
return null;
}
final BitSet parents = parentFilter.getBitSet(context);
final int firstChildDocId = parents.prevSetBit(docId - 1) + 1;
// A parent doc doesn't have child docs, so we can early exit here:
if (firstChildDocId == docId) {
return null;
}
final Scorer childrenScorer = childWeight.scorer(context);
if (childrenScorer == null) {
return null;
}
DocIdSetIterator childrenIterator = childrenScorer.iterator();
final DocIdSetIterator it = new DocIdSetIterator() {
int doc = -1;
@Override
public int docID() {
return doc;
}
@Override
public int nextDoc() throws IOException {
return advance(doc + 1);
}
@Override
public int advance(int target) throws IOException {
target = Math.max(firstChildDocId, target);
if (target >= docId) {
// We're outside the child nested scope, so it is done
return doc = NO_MORE_DOCS;
} else {
int advanced = childrenIterator.advance(target);
if (advanced >= docId) {
// We're outside the child nested scope, so it is done
return doc = NO_MORE_DOCS;
} else {
return doc = advanced;
}
}
}
@Override
public long cost() {
return Math.min(childrenIterator.cost(), docId - firstChildDocId);
}
};
return new ConstantScoreScorer(this, score(), it);
}
};
}
示例14: getLeafCollector
import org.apache.lucene.search.Weight; //导入方法依赖的package包/类
@Override
public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException {
// Reset parentFilter, so we resolve the parentDocs for each new segment being searched
this.parentFilter = null;
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(ctx);
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
final Weight weight = searcher.createNormalizedWeight(childFilter, false);
Scorer childDocsScorer = weight.scorer(ctx);
if (childDocsScorer == null) {
childDocs = null;
} else {
childDocs = childDocsScorer.iterator();
}
return new LeafBucketCollectorBase(sub, null) {
@Override
public void collect(int parentDoc, long bucket) throws IOException {
// here we translate the parent doc to a list of its nested docs, and then call super.collect for evey one of them so they'll be collected
// if parentDoc is 0 then this means that this parent doesn't have child docs (b/c these appear always before the parent doc), so we can skip:
if (parentDoc == 0 || childDocs == null) {
return;
}
if (parentFilter == null) {
// The aggs are instantiated in reverse, first the most inner nested aggs and lastly the top level aggs
// So at the time a nested 'nested' aggs is parsed its closest parent nested aggs hasn't been constructed.
// So the trick is to set at the last moment just before needed and we can use its child filter as the
// parent filter.
// Additional NOTE: Before this logic was performed in the setNextReader(...) method, but the the assumption
// that aggs instances are constructed in reverse doesn't hold when buckets are constructed lazily during
// aggs execution
Query parentFilterNotCached = findClosestNestedPath(parent());
if (parentFilterNotCached == null) {
parentFilterNotCached = Queries.newNonNestedFilter();
}
parentFilter = context.searchContext().bitsetFilterCache().getBitSetProducer(parentFilterNotCached);
parentDocs = parentFilter.getBitSet(ctx);
if (parentDocs == null) {
// There are no parentDocs in the segment, so return and set childDocs to null, so we exit early for future invocations.
childDocs = null;
return;
}
}
final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
int childDocId = childDocs.docID();
if (childDocId <= prevParentDoc) {
childDocId = childDocs.advance(prevParentDoc + 1);
}
for (; childDocId < parentDoc; childDocId = childDocs.nextDoc()) {
collectBucket(sub, childDocId, bucket);
}
}
};
}
示例15: getInternalNestedIdentity
import org.apache.lucene.search.Weight; //导入方法依赖的package包/类
private InternalSearchHit.InternalNestedIdentity getInternalNestedIdentity(SearchContext context, int nestedSubDocId, LeafReaderContext subReaderContext, DocumentMapper documentMapper, ObjectMapper nestedObjectMapper) throws IOException {
int currentParent = nestedSubDocId;
ObjectMapper nestedParentObjectMapper;
ObjectMapper current = nestedObjectMapper;
String originalName = nestedObjectMapper.name();
InternalSearchHit.InternalNestedIdentity nestedIdentity = null;
do {
Query parentFilter;
nestedParentObjectMapper = documentMapper.findParentObjectMapper(current);
if (nestedParentObjectMapper != null) {
if (nestedParentObjectMapper.nested().isNested() == false) {
current = nestedParentObjectMapper;
continue;
}
parentFilter = nestedParentObjectMapper.nestedTypeFilter();
} else {
parentFilter = Queries.newNonNestedFilter();
}
Query childFilter = nestedObjectMapper.nestedTypeFilter();
if (childFilter == null) {
current = nestedParentObjectMapper;
continue;
}
final Weight childWeight = context.searcher().createNormalizedWeight(childFilter, false);
Scorer childScorer = childWeight.scorer(subReaderContext);
if (childScorer == null) {
current = nestedParentObjectMapper;
continue;
}
DocIdSetIterator childIter = childScorer.iterator();
BitSet parentBits = context.bitsetFilterCache().getBitSetProducer(parentFilter).getBitSet(subReaderContext);
int offset = 0;
int nextParent = parentBits.nextSetBit(currentParent);
for (int docId = childIter.advance(currentParent + 1); docId < nextParent && docId != DocIdSetIterator.NO_MORE_DOCS; docId = childIter.nextDoc()) {
offset++;
}
currentParent = nextParent;
current = nestedObjectMapper = nestedParentObjectMapper;
int currentPrefix = current == null ? 0 : current.name().length() + 1;
nestedIdentity = new InternalSearchHit.InternalNestedIdentity(originalName.substring(currentPrefix), offset, nestedIdentity);
if (current != null) {
originalName = current.name();
}
} while (current != null);
return nestedIdentity;
}