本文整理汇总了Java中org.apache.lucene.index.MultiReader类的典型用法代码示例。如果您正苦于以下问题:Java MultiReader类的具体用法?Java MultiReader怎么用?Java MultiReader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
MultiReader类属于org.apache.lucene.index包,在下文中一共展示了MultiReader类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testPostFilterDisablesCountOptimization
import org.apache.lucene.index.MultiReader; //导入依赖的package包/类
public void testPostFilterDisablesCountOptimization() throws Exception {
TestSearchContext context = new TestSearchContext(null);
context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
context.setSize(0);
context.setTask(new SearchTask(123L, "", "", "", null));
final AtomicBoolean collected = new AtomicBoolean();
IndexSearcher contextSearcher = new IndexSearcher(new MultiReader()) {
protected void search(List<LeafReaderContext> leaves, Weight weight, Collector collector) throws IOException {
collected.set(true);
super.search(leaves, weight, collector);
}
};
QueryPhase.execute(context, contextSearcher);
assertEquals(0, context.queryResult().topDocs().totalHits);
assertFalse(collected.get());
context.parsedPostFilter(new ParsedQuery(new MatchNoDocsQuery()));
QueryPhase.execute(context, contextSearcher);
assertEquals(0, context.queryResult().topDocs().totalHits);
assertTrue(collected.get());
}
示例2: testMinScoreDisablesCountOptimization
import org.apache.lucene.index.MultiReader; //导入依赖的package包/类
public void testMinScoreDisablesCountOptimization() throws Exception {
TestSearchContext context = new TestSearchContext(null);
context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
context.setSize(0);
context.setTask(new SearchTask(123L, "", "", "", null));
final AtomicBoolean collected = new AtomicBoolean();
IndexSearcher contextSearcher = new IndexSearcher(new MultiReader()) {
protected void search(List<LeafReaderContext> leaves, Weight weight, Collector collector) throws IOException {
collected.set(true);
super.search(leaves, weight, collector);
}
};
QueryPhase.execute(context, contextSearcher);
assertEquals(0, context.queryResult().topDocs().totalHits);
assertFalse(collected.get());
context.minimumScore(1);
QueryPhase.execute(context, contextSearcher);
assertEquals(0, context.queryResult().topDocs().totalHits);
assertTrue(collected.get());
}
示例3: testRewriteEmptyReader
import org.apache.lucene.index.MultiReader; //导入依赖的package包/类
public void testRewriteEmptyReader() throws Exception {
IndexService indexService = createIndex("test");
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties")
.startObject("foo")
.field("type", "date")
.endObject()
.endObject()
.endObject().endObject().string();
indexService.mapperService().merge("type",
new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false);
IndexReader reader = new MultiReader();
QueryRewriteContext context = new QueryShardContext(0, indexService.getIndexSettings(), null, null, indexService.mapperService(),
null, null, xContentRegistry(), null, reader, null);
RangeQueryBuilder range = new RangeQueryBuilder("foo");
// no values -> DISJOINT
assertEquals(Relation.DISJOINT, range.getRelation(context));
}
示例4: testRangeQuery
import org.apache.lucene.index.MultiReader; //导入依赖的package包/类
public void testRangeQuery() throws IOException {
Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).build();
QueryShardContext context = new QueryShardContext(0,
new IndexSettings(IndexMetaData.builder("foo").settings(indexSettings).build(), indexSettings),
null, null, null, null, null, xContentRegistry(), null, null, () -> nowInMillis);
MappedFieldType ft = createDefaultFieldType();
ft.setName("field");
String date1 = "2015-10-12T14:10:55";
String date2 = "2016-04-28T11:33:52";
long instant1 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime(date1).getMillis();
long instant2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime(date2).getMillis() + 999;
ft.setIndexOptions(IndexOptions.DOCS);
Query expected = new IndexOrDocValuesQuery(
LongPoint.newRangeQuery("field", instant1, instant2),
SortedNumericDocValuesField.newRangeQuery("field", instant1, instant2));
assertEquals(expected,
ft.rangeQuery(date1, date2, true, true, context).rewrite(new MultiReader()));
ft.setIndexOptions(IndexOptions.NONE);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> ft.rangeQuery(date1, date2, true, true, context));
assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage());
}
示例5: process
import org.apache.lucene.index.MultiReader; //导入依赖的package包/类
/**
* Process an intermediate form by carrying out, on the Lucene instance of
* the shard, the deletes and the inserts (a ram index) in the form.
* @param form the intermediate form containing deletes and a ram index
* @throws IOException
*/
public void process(IntermediateForm form, FacetsConfig facetsConfig) throws IOException {
if (facetsConfig != null) {
DirectoryTaxonomyWriter.OrdinalMap map = new DirectoryTaxonomyWriter.MemoryOrdinalMap();
// merge the taxonomies
taxoWriter.addTaxonomy(form.getTaxoDirectory(), map);
int ordinalMap[] = map.getMap();
DirectoryReader reader = DirectoryReader.open(form.getDirectory());
try {
List<AtomicReaderContext> leaves = reader.leaves();
int numReaders = leaves.size();
AtomicReader wrappedLeaves[] = new AtomicReader[numReaders];
for (int i = 0; i < numReaders; i++) {
wrappedLeaves[i] = new OrdinalMappingAtomicReader(leaves.get(i).reader(), ordinalMap, facetsConfig);
}
writer.addIndexes(new MultiReader(wrappedLeaves));
} finally {
reader.close();
}
} else {
writer.addIndexes(new Directory[] { form.getDirectory() });
}
numForms++;
}
示例6: getMergeReaders
import org.apache.lucene.index.MultiReader; //导入依赖的package包/类
@Override
public List<AtomicReader> getMergeReaders() throws IOException {
if (unsortedReaders == null) {
unsortedReaders = super.getMergeReaders();
final AtomicReader atomicView;
if (unsortedReaders.size() == 1) {
atomicView = unsortedReaders.get(0);
} else {
final IndexReader multiReader = new MultiReader(unsortedReaders.toArray(new AtomicReader[unsortedReaders.size()]));
atomicView = SlowCompositeReaderWrapper.wrap(multiReader);
}
docMap = sorter.sort(atomicView);
sortedView = SortingAtomicReader.wrap(atomicView, docMap);
}
// a null doc map means that the readers are already sorted
return docMap == null ? unsortedReaders : Collections.singletonList(sortedView);
}
示例7: merge
import org.apache.lucene.index.MultiReader; //导入依赖的package包/类
/**
* Merges the given taxonomy and index directories and commits the changes to
* the given writers.
*/
public static void merge(Directory srcIndexDir, Directory srcTaxoDir, OrdinalMap map, IndexWriter destIndexWriter,
DirectoryTaxonomyWriter destTaxoWriter, FacetsConfig srcConfig) throws IOException {
// merge the taxonomies
destTaxoWriter.addTaxonomy(srcTaxoDir, map);
int ordinalMap[] = map.getMap();
DirectoryReader reader = DirectoryReader.open(srcIndexDir);
try {
List<AtomicReaderContext> leaves = reader.leaves();
int numReaders = leaves.size();
AtomicReader wrappedLeaves[] = new AtomicReader[numReaders];
for (int i = 0; i < numReaders; i++) {
wrappedLeaves[i] = new OrdinalMappingAtomicReader(leaves.get(i).reader(), ordinalMap, srcConfig);
}
destIndexWriter.addIndexes(new MultiReader(wrappedLeaves));
// commit changes to taxonomy and index respectively.
destTaxoWriter.commit();
destIndexWriter.commit();
} finally {
reader.close();
}
}
示例8: testEmptyIndex
import org.apache.lucene.index.MultiReader; //导入依赖的package包/类
public void testEmptyIndex() throws Exception {
IndexSearcher empty = newSearcher(new MultiReader());
Query query = new TermQuery(new Term("contents", "foo"));
Sort sort = new Sort();
sort.setSort(new SortedSetSortField("sortedset", false));
TopDocs td = empty.search(query, null, 10, sort, true, true);
assertEquals(0, td.totalHits);
// for an empty index, any selector should work
for (SortedSetSortField.Selector v : SortedSetSortField.Selector.values()) {
sort.setSort(new SortedSetSortField("sortedset", false, v));
td = empty.search(query, null, 10, sort, true, true);
assertEquals(0, td.totalHits);
}
}
示例9: testEmptyIndex
import org.apache.lucene.index.MultiReader; //导入依赖的package包/类
public void testEmptyIndex() throws Exception {
IndexSearcher empty = newSearcher(new MultiReader());
Query query = new TermQuery(new Term("contents", "foo"));
Sort sort = new Sort();
sort.setSort(new SortedNumericSortField("sortednumeric", SortField.Type.LONG));
TopDocs td = empty.search(query, null, 10, sort, true, true);
assertEquals(0, td.totalHits);
// for an empty index, any selector should work
for (SortedNumericSelector.Type v : SortedNumericSelector.Type.values()) {
sort.setSort(new SortedNumericSortField("sortednumeric", SortField.Type.LONG, false, v));
td = empty.search(query, null, 10, sort, true, true);
assertEquals(0, td.totalHits);
}
}
示例10: getIndexSearcher
import org.apache.lucene.index.MultiReader; //导入依赖的package包/类
public IndexSearcher getIndexSearcher(final ClusterSegment... clusterSegments) {
final Set<KCluster> clusters;
if (clusterSegments == null || clusterSegments.length == 0) {
clusters = new HashSet<KCluster>(indexes.keySet());
} else {
clusters = new HashSet<KCluster>(clusterSegments.length);
for (final ClusterSegment clusterSegment : clusterSegments) {
clusters.add(new KClusterImpl(clusterSegment.getClusterId()));
}
}
final Collection<IndexReader> readers = new ArrayList<IndexReader>(clusters.size());
for (final KCluster cluster : clusters) {
final LuceneIndex index = indexes.get(cluster);
readers.add(index.nrtReader());
}
try {
return new SearcherFactory().newSearcher(new MultiReader(readers.toArray(new IndexReader[readers.size()])),
null);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
示例11: shouldAddMultipleTermsFromRedisOnSrandmemberCommandWithExplicitCount
import org.apache.lucene.index.MultiReader; //导入依赖的package包/类
@Test
public void shouldAddMultipleTermsFromRedisOnSrandmemberCommandWithExplicitCount() throws SyntaxError, IOException {
when(localParamsMock.get("command")).thenReturn("srandmember");
when(localParamsMock.get("key")).thenReturn("simpleKey");
when(localParamsMock.get("count")).thenReturn("2");
when(localParamsMock.get(QueryParsing.V)).thenReturn("string_field");
when(jedisMock.srandmember(anyString(), anyInt())).thenReturn(Arrays.asList("123", "321"));
when(requestMock.getSchema()).thenReturn(schema);
when(schema.getQueryAnalyzer()).thenReturn(new StandardAnalyzer());
redisQParser = new RedisQParser("string_field", localParamsMock, paramsMock, requestMock, commandHandler);
final Query query = redisQParser.parse();
verify(jedisMock).srandmember("simpleKey", 2);
IndexSearcher searcher = new IndexSearcher(new MultiReader());
final Set<Term> terms = extractTerms(searcher, query);
Assert.assertEquals(2, terms.size());
}
示例12: shouldAddTermsFromRedisOnSinterCommand
import org.apache.lucene.index.MultiReader; //导入依赖的package包/类
@Test
public void shouldAddTermsFromRedisOnSinterCommand() throws SyntaxError, IOException {
when(localParamsMock.get("command")).thenReturn("sinter");
when(localParamsMock.get("key")).thenReturn("key1");
when(localParamsMock.get("key1")).thenReturn("key2");
when(localParamsMock.getParameterNamesIterator()).thenReturn(Arrays.asList("command", "key", "key1").iterator());
when(localParamsMock.get(QueryParsing.V)).thenReturn("string_field");
when(jedisMock.sinter(anyString(), anyString())).thenReturn(new HashSet<>(Arrays.asList("123", "321")));
when(requestMock.getSchema()).thenReturn(schema);
when(schema.getQueryAnalyzer()).thenReturn(new StandardAnalyzer());
redisQParser = new RedisQParser("string_field", localParamsMock, paramsMock, requestMock, commandHandler);
final Query query = redisQParser.parse();
verify(jedisMock).sinter("key1", "key2");
IndexSearcher searcher = new IndexSearcher(new MultiReader());
final Set<Term> terms = extractTerms(searcher, query);
Assert.assertEquals(2, terms.size());
}
示例13: shouldReturnEmptyQueryOnEmptyListOfSinter
import org.apache.lucene.index.MultiReader; //导入依赖的package包/类
@Test
public void shouldReturnEmptyQueryOnEmptyListOfSinter() throws SyntaxError, IOException {
when(localParamsMock.get("command")).thenReturn("sinter");
when(localParamsMock.get("key")).thenReturn("key1");
when(localParamsMock.get("key1")).thenReturn("key2");
when(localParamsMock.getParameterNamesIterator()).thenReturn(Arrays.asList("command", "key", "key1").iterator());
when(localParamsMock.get(QueryParsing.V)).thenReturn("string_field");
when(jedisMock.sinter(anyString(), anyString())).thenReturn(new HashSet<String>());
when(requestMock.getSchema()).thenReturn(schema);
when(schema.getQueryAnalyzer()).thenReturn(new StandardAnalyzer());
redisQParser = new RedisQParser("string_field", localParamsMock, paramsMock, requestMock, commandHandler);
final Query query = redisQParser.parse();
verify(jedisMock).sinter("key1", "key2");
IndexSearcher searcher = new IndexSearcher(new MultiReader());
final Set<Term> terms = extractTerms(searcher, query);
Assert.assertEquals(0, terms.size());
}
示例14: shouldAddTermsFromRedisOnSdiffCommand
import org.apache.lucene.index.MultiReader; //导入依赖的package包/类
@Test
public void shouldAddTermsFromRedisOnSdiffCommand() throws SyntaxError, IOException {
when(localParamsMock.get("command")).thenReturn("sdiff");
when(localParamsMock.get("key")).thenReturn("key1");
when(localParamsMock.get("key1")).thenReturn("key2");
when(localParamsMock.getParameterNamesIterator()).thenReturn(Arrays.asList("command", "key", "key1").iterator());
when(localParamsMock.get(QueryParsing.V)).thenReturn("string_field");
when(jedisMock.sdiff(anyString(), anyString())).thenReturn(new HashSet<>(Arrays.asList("123", "321")));
when(requestMock.getSchema()).thenReturn(schema);
when(schema.getQueryAnalyzer()).thenReturn(new StandardAnalyzer());
redisQParser = new RedisQParser("string_field", localParamsMock, paramsMock, requestMock, commandHandler);
final Query query = redisQParser.parse();
verify(jedisMock).sdiff("key1", "key2");
IndexSearcher searcher = new IndexSearcher(new MultiReader());
final Set<Term> terms = extractTerms(searcher, query);
Assert.assertEquals(2, terms.size());
}
示例15: shouldReturnEmptyQueryOnEmptyListOfSdiff
import org.apache.lucene.index.MultiReader; //导入依赖的package包/类
@Test
public void shouldReturnEmptyQueryOnEmptyListOfSdiff() throws SyntaxError, IOException {
when(localParamsMock.get("command")).thenReturn("sdiff");
when(localParamsMock.get("key")).thenReturn("key1");
when(localParamsMock.get("key1")).thenReturn("key2");
when(localParamsMock.getParameterNamesIterator()).thenReturn(Arrays.asList("command", "key", "key1").iterator());
when(localParamsMock.get(QueryParsing.V)).thenReturn("string_field");
when(jedisMock.sdiff(anyString(), anyString())).thenReturn(new HashSet<String>());
when(requestMock.getSchema()).thenReturn(schema);
when(schema.getQueryAnalyzer()).thenReturn(new StandardAnalyzer());
redisQParser = new RedisQParser("string_field", localParamsMock, paramsMock, requestMock, commandHandler);
final Query query = redisQParser.parse();
verify(jedisMock).sdiff("key1", "key2");
IndexSearcher searcher = new IndexSearcher(new MultiReader());
final Set<Term> terms = extractTerms(searcher, query);
Assert.assertEquals(0, terms.size());
}