本文整理汇总了Java中org.apache.lucene.search.IndexSearcher.setQueryCache方法的典型用法代码示例。如果您正苦于以下问题:Java IndexSearcher.setQueryCache方法的具体用法?Java IndexSearcher.setQueryCache怎么用?Java IndexSearcher.setQueryCache使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.search.IndexSearcher
的用法示例。
在下文中一共展示了IndexSearcher.setQueryCache方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: loadQueries
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
private int loadQueries(IndexShard shard) {
shard.refresh("percolator_load_queries");
// NOTE: we acquire the searcher via the engine directly here since this is executed right
// before the shard is marked as POST_RECOVERY
try (Engine.Searcher searcher = shard.engine().acquireSearcher("percolator_load_queries")) {
Query query = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME));
QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger, mapperService, indexFieldDataService);
IndexSearcher indexSearcher = new IndexSearcher(searcher.reader());
indexSearcher.setQueryCache(null);
indexSearcher.search(query, queryCollector);
Map<BytesRef, Query> queries = queryCollector.queries();
for (Map.Entry<BytesRef, Query> entry : queries.entrySet()) {
Query previousQuery = percolateQueries.put(entry.getKey(), entry.getValue());
shardPercolateService.addedQuery(entry.getKey(), previousQuery, entry.getValue());
}
return queries.size();
} catch (Exception e) {
throw new PercolatorException(shardId.index(), "failed to load queries from percolator index", e);
}
}
示例2: getLeafCollector
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
@Override
public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException {
IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(ctx);
IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
Weight weight = searcher.createNormalizedWeight(childFilter, false);
Scorer childDocsScorer = weight.scorer(ctx);
final BitSet parentDocs = parentFilter.getBitSet(ctx);
final DocIdSetIterator childDocs = childDocsScorer != null ? childDocsScorer.iterator() : null;
return new LeafBucketCollectorBase(sub, null) {
@Override
public void collect(int parentDoc, long bucket) throws IOException {
// if parentDoc is 0 then this means that this parent doesn't have child docs (b/c these appear always before the parent
// doc), so we can skip:
if (parentDoc == 0 || parentDocs == null || childDocs == null) {
return;
}
final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
int childDocId = childDocs.docID();
if (childDocId <= prevParentDoc) {
childDocId = childDocs.advance(prevParentDoc + 1);
}
for (; childDocId < parentDoc; childDocId = childDocs.nextDoc()) {
collectBucket(sub, childDocId, bucket);
}
}
};
}
示例3: rewrite
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
@Override
public Query rewrite(IndexReader reader) throws IOException {
Query rewritten = super.rewrite(reader);
if (rewritten != this) {
return rewritten;
}
if (reader instanceof DirectoryReader) {
String joinField = ParentFieldMapper.joinField(parentType);
IndexSearcher indexSearcher = new IndexSearcher(reader);
indexSearcher.setQueryCache(null);
indexSearcher.setSimilarity(similarity);
IndexParentChildFieldData indexParentChildFieldData = parentChildIndexFieldData.loadGlobal((DirectoryReader) reader);
MultiDocValues.OrdinalMap ordinalMap = ParentChildIndexFieldData.getOrdinalMap(indexParentChildFieldData, parentType);
return JoinUtil.createJoinQuery(joinField, innerQuery, toQuery, indexSearcher, scoreMode,
ordinalMap, minChildren, maxChildren);
} else {
if (reader.leaves().isEmpty() && reader.numDocs() == 0) {
// asserting reader passes down a MultiReader during rewrite which makes this
// blow up since for this query to work we have to have a DirectoryReader otherwise
// we can't load global ordinals - for this to work we simply check if the reader has no leaves
// and rewrite to match nothing
return new MatchNoDocsQuery();
}
throw new IllegalStateException("can't load global ordinals for reader of type: " +
reader.getClass() + " must be a DirectoryReader");
}
}
示例4: newSearcher
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
@Override
public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) throws IOException {
IndexSearcher searcher = super.newSearcher(reader, previousReader);
searcher.setQueryCache(engineConfig.getQueryCache());
searcher.setQueryCachingPolicy(engineConfig.getQueryCachingPolicy());
searcher.setSimilarity(engineConfig.getSimilarity());
return searcher;
}
示例5: testDuelSpecificQueries
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public void testDuelSpecificQueries() throws Exception {
List<ParseContext.Document> documents = new ArrayList<>();
CommonTermsQuery commonTermsQuery = new CommonTermsQuery(BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD, 128);
commonTermsQuery.add(new Term("field", "quick"));
commonTermsQuery.add(new Term("field", "brown"));
commonTermsQuery.add(new Term("field", "fox"));
addQuery(commonTermsQuery, documents);
BlendedTermQuery blendedTermQuery = BlendedTermQuery.booleanBlendedQuery(new Term[]{new Term("field", "quick"),
new Term("field", "brown"), new Term("field", "fox")}, false);
addQuery(blendedTermQuery, documents);
SpanNearQuery spanNearQuery = new SpanNearQuery.Builder("field", true)
.addClause(new SpanTermQuery(new Term("field", "quick")))
.addClause(new SpanTermQuery(new Term("field", "brown")))
.addClause(new SpanTermQuery(new Term("field", "fox")))
.build();
addQuery(spanNearQuery, documents);
SpanNearQuery spanNearQuery2 = new SpanNearQuery.Builder("field", true)
.addClause(new SpanTermQuery(new Term("field", "the")))
.addClause(new SpanTermQuery(new Term("field", "lazy")))
.addClause(new SpanTermQuery(new Term("field", "doc")))
.build();
SpanOrQuery spanOrQuery = new SpanOrQuery(
spanNearQuery,
spanNearQuery2
);
addQuery(spanOrQuery, documents);
SpanNotQuery spanNotQuery = new SpanNotQuery(spanNearQuery, spanNearQuery);
addQuery(spanNotQuery, documents);
long lowerLong = randomIntBetween(0, 256);
long upperLong = lowerLong + randomIntBetween(0, 32);
addQuery(LongPoint.newRangeQuery("long_field", lowerLong, upperLong), documents);
indexWriter.addDocuments(documents);
indexWriter.close();
directoryReader = DirectoryReader.open(directory);
IndexSearcher shardSearcher = newSearcher(directoryReader);
// Disable query cache, because ControlQuery cannot be cached...
shardSearcher.setQueryCache(null);
Document document = new Document();
document.add(new TextField("field", "the quick brown fox jumps over the lazy dog", Field.Store.NO));
long randomLong = randomIntBetween((int) lowerLong, (int) upperLong);
document.add(new LongPoint("long_field", randomLong));
MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer());
duelRun(queryStore, memoryIndex, shardSearcher);
}
示例6: LeafIndexLookup
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public LeafIndexLookup(LeafReaderContext ctx) {
reader = ctx.reader();
parentReader = ReaderUtil.getTopLevelContext(ctx).reader();
indexSearcher = new IndexSearcher(parentReader);
indexSearcher.setQueryCache(null);
}
示例7: wrap
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
/**
* If there are configured {@link IndexSearcherWrapper} instances, the {@link IndexSearcher} of the provided engine searcher
* gets wrapped and a new {@link Engine.Searcher} instances is returned, otherwise the provided {@link Engine.Searcher} is returned.
*
* This is invoked each time a {@link Engine.Searcher} is requested to do an operation. (for example search)
*/
public final Engine.Searcher wrap(Engine.Searcher engineSearcher) throws IOException {
final ElasticsearchDirectoryReader elasticsearchDirectoryReader = ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(engineSearcher.getDirectoryReader());
if (elasticsearchDirectoryReader == null) {
throw new IllegalStateException("Can't wrap non elasticsearch directory reader");
}
NonClosingReaderWrapper nonClosingReaderWrapper = new NonClosingReaderWrapper(engineSearcher.getDirectoryReader());
DirectoryReader reader = wrap(nonClosingReaderWrapper);
if (reader != nonClosingReaderWrapper) {
if (reader.getCoreCacheKey() != elasticsearchDirectoryReader.getCoreCacheKey()) {
throw new IllegalStateException("wrapped directory reader doesn't delegate IndexReader#getCoreCacheKey, wrappers must override this method and delegate" +
" to the original readers core cache key. Wrapped readers can't be used as cache keys since their are used only per request which would lead to subtle bugs");
}
if (ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(reader) != elasticsearchDirectoryReader) {
// prevent that somebody wraps with a non-filter reader
throw new IllegalStateException("wrapped directory reader hides actual ElasticsearchDirectoryReader but shouldn't");
}
}
final IndexSearcher origIndexSearcher = engineSearcher.searcher();
final IndexSearcher innerIndexSearcher = new IndexSearcher(reader);
innerIndexSearcher.setQueryCache(origIndexSearcher.getQueryCache());
innerIndexSearcher.setQueryCachingPolicy(origIndexSearcher.getQueryCachingPolicy());
innerIndexSearcher.setSimilarity(origIndexSearcher.getSimilarity(true));
// TODO: Right now IndexSearcher isn't wrapper friendly, when it becomes wrapper friendly we should revise this extension point
// For example if IndexSearcher#rewrite() is overwritten than also IndexSearcher#createNormalizedWeight needs to be overwritten
// This needs to be fixed before we can allow the IndexSearcher from Engine to be wrapped multiple times
final IndexSearcher indexSearcher = wrap(innerIndexSearcher);
if (reader == nonClosingReaderWrapper && indexSearcher == innerIndexSearcher) {
return engineSearcher;
} else {
return new Engine.Searcher(engineSearcher.source(), indexSearcher) {
@Override
public void close() throws ElasticsearchException {
try {
reader().close();
// we close the reader to make sure wrappers can release resources if needed....
// our NonClosingReaderWrapper makes sure that our reader is not closed
} catch (IOException e) {
throw new ElasticsearchException("failed to close reader", e);
} finally {
engineSearcher.close();
}
}
};
}
}
示例8: testBasics
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public void testBasics() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
w.addDocument(new Document());
DirectoryReader r = DirectoryReader.open(w);
w.close();
ShardId shard = new ShardId("index", "_na_", 0);
r = ElasticsearchDirectoryReader.wrap(r, shard);
IndexSearcher s = new IndexSearcher(r);
s.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE);
Settings settings = Settings.builder()
.put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING.getKey(), 10)
.put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), true)
.build();
IndicesQueryCache cache = new IndicesQueryCache(settings);
s.setQueryCache(cache);
QueryCacheStats stats = cache.getStats(shard);
assertEquals(0L, stats.getCacheSize());
assertEquals(0L, stats.getCacheCount());
assertEquals(0L, stats.getHitCount());
assertEquals(0L, stats.getMissCount());
assertEquals(1, s.count(new DummyQuery(0)));
stats = cache.getStats(shard);
assertEquals(1L, stats.getCacheSize());
assertEquals(1L, stats.getCacheCount());
assertEquals(0L, stats.getHitCount());
assertEquals(1L, stats.getMissCount());
for (int i = 1; i < 20; ++i) {
assertEquals(1, s.count(new DummyQuery(i)));
}
stats = cache.getStats(shard);
assertEquals(10L, stats.getCacheSize());
assertEquals(20L, stats.getCacheCount());
assertEquals(0L, stats.getHitCount());
assertEquals(20L, stats.getMissCount());
s.count(new DummyQuery(10));
stats = cache.getStats(shard);
assertEquals(10L, stats.getCacheSize());
assertEquals(20L, stats.getCacheCount());
assertEquals(1L, stats.getHitCount());
assertEquals(20L, stats.getMissCount());
IOUtils.close(r, dir);
// got emptied, but no changes to other metrics
stats = cache.getStats(shard);
assertEquals(0L, stats.getCacheSize());
assertEquals(20L, stats.getCacheCount());
assertEquals(1L, stats.getHitCount());
assertEquals(20L, stats.getMissCount());
cache.onClose(shard);
// forgot everything
stats = cache.getStats(shard);
assertEquals(0L, stats.getCacheSize());
assertEquals(0L, stats.getCacheCount());
assertEquals(0L, stats.getHitCount());
assertEquals(0L, stats.getMissCount());
cache.close(); // this triggers some assertions
}
示例9: testStatsOnEviction
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public void testStatsOnEviction() throws IOException {
Directory dir1 = newDirectory();
IndexWriter w1 = new IndexWriter(dir1, newIndexWriterConfig());
w1.addDocument(new Document());
DirectoryReader r1 = DirectoryReader.open(w1);
w1.close();
ShardId shard1 = new ShardId("index", "_na_", 0);
r1 = ElasticsearchDirectoryReader.wrap(r1, shard1);
IndexSearcher s1 = new IndexSearcher(r1);
s1.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE);
Directory dir2 = newDirectory();
IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig());
w2.addDocument(new Document());
DirectoryReader r2 = DirectoryReader.open(w2);
w2.close();
ShardId shard2 = new ShardId("index", "_na_", 1);
r2 = ElasticsearchDirectoryReader.wrap(r2, shard2);
IndexSearcher s2 = new IndexSearcher(r2);
s2.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE);
Settings settings = Settings.builder()
.put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING.getKey(), 10)
.put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), true)
.build();
IndicesQueryCache cache = new IndicesQueryCache(settings);
s1.setQueryCache(cache);
s2.setQueryCache(cache);
assertEquals(1, s1.count(new DummyQuery(0)));
for (int i = 1; i <= 20; ++i) {
assertEquals(1, s2.count(new DummyQuery(i)));
}
QueryCacheStats stats1 = cache.getStats(shard1);
assertEquals(0L, stats1.getCacheSize());
assertEquals(1L, stats1.getCacheCount());
// this used to fail because we were evicting an empty cache on
// the segment from r1
IOUtils.close(r1, dir1);
cache.onClose(shard1);
IOUtils.close(r2, dir2);
cache.onClose(shard2);
cache.close(); // this triggers some assertions
}
示例10: getLeafCollector
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
@Override
public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException {
// Reset parentFilter, so we resolve the parentDocs for each new segment being searched
this.parentFilter = null;
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(ctx);
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
final Weight weight = searcher.createNormalizedWeight(childFilter, false);
Scorer childDocsScorer = weight.scorer(ctx);
if (childDocsScorer == null) {
childDocs = null;
} else {
childDocs = childDocsScorer.iterator();
}
return new LeafBucketCollectorBase(sub, null) {
@Override
public void collect(int parentDoc, long bucket) throws IOException {
// here we translate the parent doc to a list of its nested docs, and then call super.collect for evey one of them so they'll be collected
// if parentDoc is 0 then this means that this parent doesn't have child docs (b/c these appear always before the parent doc), so we can skip:
if (parentDoc == 0 || childDocs == null) {
return;
}
if (parentFilter == null) {
// The aggs are instantiated in reverse, first the most inner nested aggs and lastly the top level aggs
// So at the time a nested 'nested' aggs is parsed its closest parent nested aggs hasn't been constructed.
// So the trick is to set at the last moment just before needed and we can use its child filter as the
// parent filter.
// Additional NOTE: Before this logic was performed in the setNextReader(...) method, but the the assumption
// that aggs instances are constructed in reverse doesn't hold when buckets are constructed lazily during
// aggs execution
Query parentFilterNotCached = findClosestNestedPath(parent());
if (parentFilterNotCached == null) {
parentFilterNotCached = Queries.newNonNestedFilter();
}
parentFilter = context.searchContext().bitsetFilterCache().getBitSetProducer(parentFilterNotCached);
parentDocs = parentFilter.getBitSet(ctx);
if (parentDocs == null) {
// There are no parentDocs in the segment, so return and set childDocs to null, so we exit early for future invocations.
childDocs = null;
return;
}
}
final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
int childDocId = childDocs.docID();
if (childDocId <= prevParentDoc) {
childDocId = childDocs.advance(prevParentDoc + 1);
}
for (; childDocId < parentDoc; childDocId = childDocs.nextDoc()) {
collectBucket(sub, childDocId, bucket);
}
}
};
}
示例11: wrap
import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
/**
* If there are configured {@link IndexSearcherWrapper} instances, the {@link IndexSearcher} of the provided engine searcher
* gets wrapped and a new {@link Engine.Searcher} instances is returned, otherwise the provided {@link Engine.Searcher} is returned.
*
* This is invoked each time a {@link Engine.Searcher} is requested to do an operation. (for example search)
*/
public final Engine.Searcher wrap(EngineConfig engineConfig, final Engine.Searcher engineSearcher) throws IOException {
final ElasticsearchDirectoryReader elasticsearchDirectoryReader = ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(engineSearcher.getDirectoryReader());
if (elasticsearchDirectoryReader == null) {
throw new IllegalStateException("Can't wrap non elasticsearch directory reader");
}
if (wrapper == null) {
return engineSearcher;
}
NonClosingReaderWrapper nonClosingReaderWrapper = new NonClosingReaderWrapper(engineSearcher.getDirectoryReader());
DirectoryReader reader = wrapper.wrap(nonClosingReaderWrapper);
if (reader != nonClosingReaderWrapper) {
if (reader.getCoreCacheKey() != elasticsearchDirectoryReader.getCoreCacheKey()) {
throw new IllegalStateException("wrapped directory reader doesn't delegate IndexReader#getCoreCacheKey, wrappers must override this method and delegate" +
" to the original readers core cache key. Wrapped readers can't be used as cache keys since their are used only per request which would lead to subtle bugs");
}
if (ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(reader) != elasticsearchDirectoryReader) {
// prevent that somebody wraps with a non-filter reader
throw new IllegalStateException("wrapped directory reader hides actual ElasticsearchDirectoryReader but shouldn't");
}
}
final IndexSearcher innerIndexSearcher = new IndexSearcher(reader);
innerIndexSearcher.setQueryCache(engineConfig.getQueryCache());
innerIndexSearcher.setQueryCachingPolicy(engineConfig.getQueryCachingPolicy());
innerIndexSearcher.setSimilarity(engineConfig.getSimilarity());
// TODO: Right now IndexSearcher isn't wrapper friendly, when it becomes wrapper friendly we should revise this extension point
// For example if IndexSearcher#rewrite() is overwritten than also IndexSearcher#createNormalizedWeight needs to be overwritten
// This needs to be fixed before we can allow the IndexSearcher from Engine to be wrapped multiple times
final IndexSearcher indexSearcher = wrapper.wrap(engineConfig, innerIndexSearcher);
if (reader == nonClosingReaderWrapper && indexSearcher == innerIndexSearcher) {
return engineSearcher;
} else {
return new Engine.Searcher(engineSearcher.source(), indexSearcher) {
@Override
public void close() throws ElasticsearchException {
try {
reader().close();
// we close the reader to make sure wrappers can release resources if needed....
// our NonClosingReaderWrapper makes sure that our reader is not closed
} catch (IOException e) {
throw new ElasticsearchException("failed to close reader", e);
} finally {
engineSearcher.close();
}
}
};
}
}