本文整理匯總了Java中org.apache.lucene.search.Scorer類的典型用法代碼示例。如果您正苦於以下問題:Java Scorer類的具體用法?Java Scorer怎麽用?Java Scorer使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
Scorer類屬於org.apache.lucene.search包,在下文中一共展示了Scorer類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: findNestedObjectMapper
import org.apache.lucene.search.Scorer; //導入依賴的package包/類
/**
* Returns the best nested {@link ObjectMapper} instances that is in the scope of the specified nested docId.
*/
public ObjectMapper findNestedObjectMapper(int nestedDocId, SearchContext sc, LeafReaderContext context) throws IOException {
ObjectMapper nestedObjectMapper = null;
for (ObjectMapper objectMapper : objectMappers().values()) {
if (!objectMapper.nested().isNested()) {
continue;
}
Query filter = objectMapper.nestedTypeFilter();
if (filter == null) {
continue;
}
// We can pass down 'null' as acceptedDocs, because nestedDocId is a doc to be fetched and
// therefor is guaranteed to be a live doc.
final Weight nestedWeight = filter.createWeight(sc.searcher(), false);
Scorer scorer = nestedWeight.scorer(context);
if (scorer == null) {
continue;
}
if (scorer.iterator().advance(nestedDocId) == nestedDocId) {
if (nestedObjectMapper == null) {
nestedObjectMapper = objectMapper;
} else {
if (nestedObjectMapper.fullPath().length() < objectMapper.fullPath().length()) {
nestedObjectMapper = objectMapper;
}
}
}
}
return nestedObjectMapper;
}
示例2: getLeafCollector
import org.apache.lucene.search.Scorer; //導入依賴的package包/類
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException {
perSegCollector = new PerSegmentCollects(ctx);
entries.add(perSegCollector);
// Deferring collector
return new LeafBucketCollector() {
@Override
public void setScorer(Scorer scorer) throws IOException {
perSegCollector.setScorer(scorer);
}
@Override
public void collect(int doc, long bucket) throws IOException {
perSegCollector.collect(doc, bucket);
}
};
}
示例3: scorer
import org.apache.lucene.search.Scorer; //導入依賴的package包/類
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
DocIdSet childrenDocIdSet = childrenFilter.getDocIdSet(context, null);
if (Lucene.isEmpty(childrenDocIdSet)) {
return null;
}
SortedDocValues globalValues = globalIfd.load(context).getOrdinalsValues(parentType);
if (globalValues != null) {
// we forcefully apply live docs here so that deleted children don't give matching parents
childrenDocIdSet = BitsFilteredDocIdSet.wrap(childrenDocIdSet, context.reader().getLiveDocs());
DocIdSetIterator innerIterator = childrenDocIdSet.iterator();
if (innerIterator != null) {
ChildrenDocIdIterator childrenDocIdIterator = new ChildrenDocIdIterator(
innerIterator, parentOrds, globalValues
);
return ConstantScorer.create(childrenDocIdIterator, this, queryWeight);
}
}
return null;
}
示例4: exists
import org.apache.lucene.search.Scorer; //導入依賴的package包/類
/**
* Check whether there is one or more documents matching the provided query.
*/
public static boolean exists(IndexSearcher searcher, Query query) throws IOException {
final Weight weight = searcher.createNormalizedWeight(query, false);
// the scorer API should be more efficient at stopping after the first
// match than the bulk scorer API
for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
final Scorer scorer = weight.scorer(context);
if (scorer == null) {
continue;
}
final Bits liveDocs = context.reader().getLiveDocs();
final DocIdSetIterator iterator = scorer.iterator();
for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
if (liveDocs == null || liveDocs.get(doc)) {
return true;
}
}
}
return false;
}
示例5: illegalScorer
import org.apache.lucene.search.Scorer; //導入依賴的package包/類
/**
* Return a Scorer that throws an ElasticsearchIllegalStateException
* on all operations with the given message.
*/
public static Scorer illegalScorer(final String message) {
return new Scorer(null) {
@Override
public float score() throws IOException {
throw new IllegalStateException(message);
}
@Override
public int freq() throws IOException {
throw new IllegalStateException(message);
}
@Override
public int docID() {
throw new IllegalStateException(message);
}
@Override
public DocIdSetIterator iterator() {
throw new IllegalStateException(message);
}
};
}
示例6: addMatchedQueries
import org.apache.lucene.search.Scorer; //導入依賴的package包/類
private void addMatchedQueries(HitContext hitContext, ImmutableMap<String, Query> namedQueries, List<String> matchedQueries) throws IOException {
for (Map.Entry<String, Query> entry : namedQueries.entrySet()) {
String name = entry.getKey();
Query filter = entry.getValue();
final Weight weight = hitContext.topLevelSearcher().createNormalizedWeight(filter, false);
final Scorer scorer = weight.scorer(hitContext.readerContext());
if (scorer == null) {
continue;
}
final TwoPhaseIterator twoPhase = scorer.twoPhaseIterator();
if (twoPhase == null) {
if (scorer.iterator().advance(hitContext.docId()) == hitContext.docId()) {
matchedQueries.add(name);
}
} else {
if (twoPhase.approximation().advance(hitContext.docId()) == hitContext.docId() && twoPhase.matches()) {
matchedQueries.add(name);
}
}
}
}
示例7: scorer
import org.apache.lucene.search.Scorer; //導入依賴的package包/類
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
final Scorer parentScorer = parentWeight.scorer(context);
// no matches
if (parentScorer == null) {
return null;
}
BitSet parents = parentsFilter.getBitSet(context);
if (parents == null) {
// No matches
return null;
}
int firstParentDoc = parentScorer.iterator().nextDoc();
if (firstParentDoc == DocIdSetIterator.NO_MORE_DOCS) {
// No matches
return null;
}
return new IncludeNestedDocsScorer(this, parentScorer, parents, firstParentDoc);
}
示例8: IncludeNestedDocsScorer
import org.apache.lucene.search.Scorer; //導入依賴的package包/類
IncludeNestedDocsScorer(Weight weight, Scorer parentScorer, BitSet parentBits, int currentParentPointer) {
super(weight);
this.parentScorer = parentScorer;
this.parentBits = parentBits;
this.currentParentPointer = currentParentPointer;
if (currentParentPointer == 0) {
currentChildPointer = 0;
} else {
this.currentChildPointer = this.parentBits.prevSetBit(currentParentPointer - 1);
if (currentChildPointer == -1) {
// no previous set parent, we delete from doc 0
currentChildPointer = 0;
} else {
currentChildPointer++; // we only care about children
}
}
currentDoc = currentChildPointer;
}
示例9: scorer
import org.apache.lucene.search.Scorer; //導入依賴的package包/類
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
DocIdSet childrenDocSet = childrenFilter.getDocIdSet(context, null);
// we forcefully apply live docs here so that deleted children don't give matching parents
childrenDocSet = BitsFilteredDocIdSet.wrap(childrenDocSet, context.reader().getLiveDocs());
if (Lucene.isEmpty(childrenDocSet)) {
return null;
}
final DocIdSetIterator childIterator = childrenDocSet.iterator();
if (childIterator == null) {
return null;
}
SortedDocValues bytesValues = globalIfd.load(context).getOrdinalsValues(parentType);
if (bytesValues == null) {
return null;
}
return new ChildScorer(this, parentIdxs, scores, childIterator, bytesValues);
}
示例10: BaseScorer
import org.apache.lucene.search.Scorer; //導入依賴的package包/類
BaseScorer(Weight weight, Scorer approximation, CheckedFunction<Integer, Query, IOException> percolatorQueries,
IndexSearcher percolatorIndexSearcher) {
super(weight);
this.approximation = approximation;
this.percolatorQueries = percolatorQueries;
this.percolatorIndexSearcher = percolatorIndexSearcher;
}
示例11: getScore
import org.apache.lucene.search.Scorer; //導入依賴的package包/類
private static double getScore(Scorer scorer) {
try {
return scorer.score();
} catch (IOException e) {
throw new ElasticsearchException("couldn't lookup score", e);
}
}
示例12: scorer
import org.apache.lucene.search.Scorer; //導入依賴的package包/類
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
if (remaining == 0) {
return null;
}
if (shortCircuitFilter != null) {
DocIdSet docIdSet = shortCircuitFilter.getDocIdSet(context, null);
if (!Lucene.isEmpty(docIdSet)) {
DocIdSetIterator iterator = docIdSet.iterator();
if (iterator != null) {
return ConstantScorer.create(iterator, this, queryWeight);
}
}
return null;
}
DocIdSet parentDocIdSet = this.parentFilter.getDocIdSet(context, null);
if (!Lucene.isEmpty(parentDocIdSet)) {
// We can't be sure of the fact that liveDocs have been applied, so we apply it here. The "remaining"
// count down (short circuit) logic will then work as expected.
parentDocIdSet = BitsFilteredDocIdSet.wrap(parentDocIdSet, context.reader().getLiveDocs());
DocIdSetIterator innerIterator = parentDocIdSet.iterator();
if (innerIterator != null) {
LongBitSet parentOrds = collector.parentOrds;
SortedDocValues globalValues = globalIfd.load(context).getOrdinalsValues(parentType);
if (globalValues != null) {
DocIdSetIterator parentIdIterator = new ParentOrdIterator(innerIterator, parentOrds, globalValues, this);
return ConstantScorer.create(parentIdIterator, this, queryWeight);
}
}
}
return null;
}
示例13: getLeafSearchScript
import org.apache.lucene.search.Scorer; //導入依賴的package包/類
@Override
public LeafSearchScript getLeafSearchScript(LeafReaderContext context) throws IOException {
LeafSearchLookup leafLookup = lookup.getLeafSearchLookup(context);
Map<String, Object> ctx = new HashMap<>();
ctx.putAll(leafLookup.asMap());
if (vars != null) {
ctx.putAll(vars);
}
AbstractSearchScript leafSearchScript = new AbstractSearchScript() {
@Override
public Object run() {
return script.apply(ctx);
}
@Override
public void setNextVar(String name, Object value) {
ctx.put(name, value);
}
@Override
public void setScorer(Scorer scorer) {
super.setScorer(scorer);
ctx.put("_score", new ScoreAccessor(scorer));
}
};
leafSearchScript.setLookup(leafLookup);
return leafSearchScript;
}
示例14: functionScorer
import org.apache.lucene.search.Scorer; //導入依賴的package包/類
private FiltersFunctionFactorScorer functionScorer(LeafReaderContext context) throws IOException {
Scorer subQueryScorer = subQueryWeight.scorer(context);
if (subQueryScorer == null) {
return null;
}
final LeafScoreFunction[] functions = new LeafScoreFunction[filterFunctions.length];
final Bits[] docSets = new Bits[filterFunctions.length];
for (int i = 0; i < filterFunctions.length; i++) {
FilterFunction filterFunction = filterFunctions[i];
functions[i] = filterFunction.function.getLeafScoreFunction(context);
Scorer filterScorer = filterWeights[i].scorer(context);
docSets[i] = Lucene.asSequentialAccessBits(context.reader().maxDoc(), filterScorer);
}
return new FiltersFunctionFactorScorer(this, subQueryScorer, scoreMode, filterFunctions, maxBoost, functions, docSets, combineFunction, needsScores);
}
示例15: getAndLoadIfNotPresent
import org.apache.lucene.search.Scorer; //導入依賴的package包/類
private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext context) throws IOException, ExecutionException {
final Object coreCacheReader = context.reader().getCoreCacheKey();
final ShardId shardId = ShardUtils.extractShardId(context.reader());
if (shardId != null // can't require it because of the percolator
&& index.getName().equals(shardId.getIndex()) == false) {
// insanity
throw new IllegalStateException("Trying to load bit set for index [" + shardId.getIndex()
+ "] with cache of index [" + index.getName() + "]");
}
Cache<Query, Value> filterToFbs = loadedFilters.get(coreCacheReader, new Callable<Cache<Query, Value>>() {
@Override
public Cache<Query, Value> call() throws Exception {
context.reader().addCoreClosedListener(BitsetFilterCache.this);
return CacheBuilder.newBuilder().build();
}
});
return filterToFbs.get(query,new Callable<Value>() {
@Override
public Value call() throws Exception {
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
final Weight weight = searcher.createNormalizedWeight(query, false);
final Scorer s = weight.scorer(context);
final BitSet bitSet;
if (s == null) {
bitSet = null;
} else {
bitSet = BitSet.of(s.iterator(), context.reader().maxDoc());
}
Value value = new Value(bitSet, shardId);
listener.onCache(shardId, value.bitset);
return value;
}
}).bitset;
}