本文整理汇总了Java中org.apache.lucene.search.IndexSearcher.createNormalizedWeight方法的典型用法代码示例。如果您正苦于以下问题:Java IndexSearcher.createNormalizedWeight方法的具体用法?Java IndexSearcher.createNormalizedWeight怎么用?Java IndexSearcher.createNormalizedWeight使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.search.IndexSearcher
的用法示例。
在下文中一共展示了IndexSearcher.createNormalizedWeight方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: FiltersAggregatorFactory
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public FiltersAggregatorFactory(String name, List<KeyedFilter> filters, boolean keyed, boolean otherBucket,
String otherBucketKey, SearchContext context, AggregatorFactory<?> parent, AggregatorFactories.Builder subFactories,
Map<String, Object> metaData) throws IOException {
super(name, context, parent, subFactories, metaData);
this.keyed = keyed;
this.otherBucket = otherBucket;
this.otherBucketKey = otherBucketKey;
IndexSearcher contextSearcher = context.searcher();
weights = new Weight[filters.size()];
keys = new String[filters.size()];
for (int i = 0; i < filters.size(); ++i) {
KeyedFilter keyedFilter = filters.get(i);
this.keys[i] = keyedFilter.key();
Query filter = keyedFilter.filter().toFilter(context.getQueryShardContext());
this.weights[i] = contextSearcher.createNormalizedWeight(filter, false);
}
}
示例2: exists
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
/**
* Check whether there is one or more documents matching the provided query.
*/
public static boolean exists(IndexSearcher searcher, Query query) throws IOException {
final Weight weight = searcher.createNormalizedWeight(query, false);
// the scorer API should be more efficient at stopping after the first
// match than the bulk scorer API
for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
final Scorer scorer = weight.scorer(context);
if (scorer == null) {
continue;
}
final Bits liveDocs = context.reader().getLiveDocs();
final DocIdSetIterator iterator = scorer.iterator();
for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
if (liveDocs == null || liveDocs.get(doc)) {
return true;
}
}
}
return false;
}
示例3: AdjacencyMatrixAggregatorFactory
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public AdjacencyMatrixAggregatorFactory(String name, List<KeyedFilter> filters, String separator,
SearchContext context, AggregatorFactory<?> parent, AggregatorFactories.Builder subFactories,
Map<String, Object> metaData) throws IOException {
super(name, context, parent, subFactories, metaData);
IndexSearcher contextSearcher = context.searcher();
this.separator = separator;
weights = new Weight[filters.size()];
keys = new String[filters.size()];
for (int i = 0; i < filters.size(); ++i) {
KeyedFilter keyedFilter = filters.get(i);
this.keys[i] = keyedFilter.key();
Query filter = keyedFilter.filter().toFilter(context.getQueryShardContext());
this.weights[i] = contextSearcher.createNormalizedWeight(filter, false);
}
}
示例4: getLeafCollector
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
@Override
public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException {
IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(ctx);
IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
Weight weight = searcher.createNormalizedWeight(childFilter, false);
Scorer childDocsScorer = weight.scorer(ctx);
final BitSet parentDocs = parentFilter.getBitSet(ctx);
final DocIdSetIterator childDocs = childDocsScorer != null ? childDocsScorer.iterator() : null;
return new LeafBucketCollectorBase(sub, null) {
@Override
public void collect(int parentDoc, long bucket) throws IOException {
// if parentDoc is 0 then this means that this parent doesn't have child docs (b/c these appear always before the parent
// doc), so we can skip:
if (parentDoc == 0 || parentDocs == null || childDocs == null) {
return;
}
final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
int childDocId = childDocs.docID();
if (childDocId <= prevParentDoc) {
childDocId = childDocs.advance(prevParentDoc + 1);
}
for (; childDocId < parentDoc; childDocId = childDocs.nextDoc()) {
collectBucket(sub, childDocId, bucket);
}
}
};
}
示例5: FilterAggregatorFactory
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public FilterAggregatorFactory(String name, QueryBuilder filterBuilder, SearchContext context,
AggregatorFactory<?> parent, AggregatorFactories.Builder subFactoriesBuilder, Map<String, Object> metaData) throws IOException {
super(name, context, parent, subFactoriesBuilder, metaData);
IndexSearcher contextSearcher = context.searcher();
Query filter = filterBuilder.toQuery(context.getQueryShardContext());
weight = contextSearcher.createNormalizedWeight(filter, false);
}
示例6: innerDocs
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
/**
* Get a {@link DocIdSet} that matches the inner documents.
*/
public DocIdSetIterator innerDocs(LeafReaderContext ctx) throws IOException {
final IndexReaderContext topLevelCtx = ReaderUtil.getTopLevelContext(ctx);
IndexSearcher indexSearcher = new IndexSearcher(topLevelCtx);
Weight weight = indexSearcher.createNormalizedWeight(innerQuery, false);
Scorer s = weight.scorer(ctx);
return s == null ? null : s.iterator();
}
示例7: createWeight
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
if (needsScores == false && minScore == null) {
return subQuery.createWeight(searcher, needsScores);
}
boolean subQueryNeedsScores = combineFunction != CombineFunction.REPLACE;
Weight[] filterWeights = new Weight[filterFunctions.length];
for (int i = 0; i < filterFunctions.length; ++i) {
subQueryNeedsScores |= filterFunctions[i].function.needsScores();
filterWeights[i] = searcher.createNormalizedWeight(filterFunctions[i].filter, false);
}
Weight subQueryWeight = subQuery.createWeight(searcher, subQueryNeedsScores);
return new CustomBoostFactorWeight(this, subQueryWeight, filterWeights, subQueryNeedsScores);
}
示例8: createInternal
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
@Override
public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket,
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
IndexSearcher contextSearcher = context.searchContext().searcher();
if (searcher != contextSearcher) {
searcher = contextSearcher;
weights = new Weight[filters.size()];
for (int i = 0; i < filters.size(); ++i) {
KeyedFilter keyedFilter = filters.get(i);
this.weights[i] = contextSearcher.createNormalizedWeight(keyedFilter.filter, false);
}
}
return new FiltersAggregator(name, factories, keys, weights, keyed, otherBucketKey, context, parent, pipelineAggregators, metaData);
}
示例9: createInternal
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
@Override
public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket,
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
IndexSearcher contextSearcher = context.searchContext().searcher();
if (searcher != contextSearcher) {
searcher = contextSearcher;
weight = contextSearcher.createNormalizedWeight(filter, false);
}
return new FilterAggregator(name, weight, factories, context, parent, pipelineAggregators, metaData);
}
示例10: testNestedChildrenFilter
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public void testNestedChildrenFilter() throws Exception {
int numParentDocs = scaledRandomIntBetween(0, 32);
int maxChildDocsPerParent = scaledRandomIntBetween(8, 16);
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
for (int i = 0; i < numParentDocs; i++) {
int numChildDocs = scaledRandomIntBetween(0, maxChildDocsPerParent);
List<Document> docs = new ArrayList<>(numChildDocs + 1);
for (int j = 0; j < numChildDocs; j++) {
Document childDoc = new Document();
childDoc.add(new StringField("type", "child", Field.Store.NO));
docs.add(childDoc);
}
Document parenDoc = new Document();
parenDoc.add(new StringField("type", "parent", Field.Store.NO));
parenDoc.add(new LegacyIntField("num_child_docs", numChildDocs, Field.Store.YES));
docs.add(parenDoc);
writer.addDocuments(docs);
}
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(reader);
FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
BitSetProducer parentFilter = new QueryBitSetProducer(new TermQuery(new Term("type", "parent")));
Query childFilter = new TermQuery(new Term("type", "child"));
int checkedParents = 0;
final Weight parentsWeight = searcher.createNormalizedWeight(new TermQuery(new Term("type", "parent")), false);
for (LeafReaderContext leaf : reader.leaves()) {
DocIdSetIterator parents = parentsWeight.scorer(leaf).iterator();
for (int parentDoc = parents.nextDoc(); parentDoc != DocIdSetIterator.NO_MORE_DOCS ; parentDoc = parents.nextDoc()) {
int expectedChildDocs = leaf.reader().document(parentDoc).getField("num_child_docs").numericValue().intValue();
hitContext.reset(null, leaf, parentDoc, searcher);
NestedChildrenQuery nestedChildrenFilter = new NestedChildrenQuery(parentFilter, childFilter, hitContext);
TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
searcher.search(new ConstantScoreQuery(nestedChildrenFilter), totalHitCountCollector);
assertThat(totalHitCountCollector.getTotalHits(), equalTo(expectedChildDocs));
checkedParents++;
}
}
assertThat(checkedParents, equalTo(numParentDocs));
reader.close();
dir.close();
}
示例11: getFunctionScoreExplanation
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public Explanation getFunctionScoreExplanation(IndexSearcher searcher, ScoreFunction scoreFunction) throws IOException {
FunctionScoreQuery functionScoreQuery = new FunctionScoreQuery(new TermQuery(TERM), scoreFunction, 0.0f, CombineFunction.AVG, 100);
Weight weight = searcher.createNormalizedWeight(functionScoreQuery, true);
Explanation explanation = weight.explain(searcher.getIndexReader().leaves().get(0), 0);
return explanation.getDetails()[1];
}
示例12: getExplanation
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
protected Explanation getExplanation(IndexSearcher searcher, FiltersFunctionScoreQuery filtersFunctionScoreQuery) throws IOException {
Weight weight = searcher.createNormalizedWeight(filtersFunctionScoreQuery, true);
return weight.explain(searcher.getIndexReader().leaves().get(0), 0);
}
示例13: getLeafCollector
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
@Override
public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException {
// Reset parentFilter, so we resolve the parentDocs for each new segment being searched
this.parentFilter = null;
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(ctx);
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
final Weight weight = searcher.createNormalizedWeight(childFilter, false);
Scorer childDocsScorer = weight.scorer(ctx);
if (childDocsScorer == null) {
childDocs = null;
} else {
childDocs = childDocsScorer.iterator();
}
return new LeafBucketCollectorBase(sub, null) {
@Override
public void collect(int parentDoc, long bucket) throws IOException {
// here we translate the parent doc to a list of its nested docs, and then call super.collect for evey one of them so they'll be collected
// if parentDoc is 0 then this means that this parent doesn't have child docs (b/c these appear always before the parent doc), so we can skip:
if (parentDoc == 0 || childDocs == null) {
return;
}
if (parentFilter == null) {
// The aggs are instantiated in reverse, first the most inner nested aggs and lastly the top level aggs
// So at the time a nested 'nested' aggs is parsed its closest parent nested aggs hasn't been constructed.
// So the trick is to set at the last moment just before needed and we can use its child filter as the
// parent filter.
// Additional NOTE: Before this logic was performed in the setNextReader(...) method, but the the assumption
// that aggs instances are constructed in reverse doesn't hold when buckets are constructed lazily during
// aggs execution
Query parentFilterNotCached = findClosestNestedPath(parent());
if (parentFilterNotCached == null) {
parentFilterNotCached = Queries.newNonNestedFilter();
}
parentFilter = context.searchContext().bitsetFilterCache().getBitSetProducer(parentFilterNotCached);
parentDocs = parentFilter.getBitSet(ctx);
if (parentDocs == null) {
// There are no parentDocs in the segment, so return and set childDocs to null, so we exit early for future invocations.
childDocs = null;
return;
}
}
final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
int childDocId = childDocs.docID();
if (childDocId <= prevParentDoc) {
childDocId = childDocs.advance(prevParentDoc + 1);
}
for (; childDocId < parentDoc; childDocId = childDocs.nextDoc()) {
collectBucket(sub, childDocId, bucket);
}
}
};
}
示例14: createWeight
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
final Weight boundingBoxWeight;
if (boundingBoxFilter != null) {
boundingBoxWeight = searcher.createNormalizedWeight(boundingBoxFilter, false);
} else {
boundingBoxWeight = null;
}
return new ConstantScoreWeight(this) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
final DocIdSetIterator approximation;
if (boundingBoxWeight != null) {
Scorer s = boundingBoxWeight.scorer(context);
if (s == null) {
// if the approximation does not match anything, we're done
return null;
}
approximation = s.iterator();
} else {
approximation = DocIdSetIterator.all(context.reader().maxDoc());
}
final MultiGeoPointValues values = indexFieldData.load(context).getGeoPointValues();
final TwoPhaseIterator twoPhaseIterator = new TwoPhaseIterator(approximation) {
@Override
public boolean matches() throws IOException {
final int doc = approximation.docID();
values.setDocument(doc);
final int length = values.count();
for (int i = 0; i < length; i++) {
GeoPoint point = values.valueAt(i);
if (distanceBoundingCheck.isWithin(point.lat(), point.lon())) {
double d = fixedSourceDistance.calculate(point.lat(), point.lon());
if (d >= inclusiveLowerPoint && d <= inclusiveUpperPoint) {
return true;
}
}
}
return false;
}
@Override
public float matchCost() {
if (distanceBoundingCheck == GeoDistance.ALWAYS_INSTANCE) {
return 0.0f;
} else {
// TODO: is this right (up to 4 comparisons from GeoDistance.SimpleDistanceBoundingCheck)?
return 4.0f;
}
}
};
return new ConstantScoreScorer(this, score(), twoPhaseIterator);
}
};
}