本文整理汇总了Java中org.apache.lucene.search.MultiCollector类的典型用法代码示例。如果您正苦于以下问题:Java MultiCollector类的具体用法?Java MultiCollector怎么用?Java MultiCollector使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
MultiCollector类属于org.apache.lucene.search包,在下文中一共展示了MultiCollector类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: execute
import org.apache.lucene.search.MultiCollector; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public void execute() throws IOException {
final int nrOfCommands = commands.size();
List<Collector> collectors = new ArrayList<>(nrOfCommands);
for (Command command : commands) {
collectors.addAll(command.create());
}
ProcessedFilter filter = searcher.getProcessedFilter
(queryCommand.getFilter(), queryCommand.getFilterList());
Query query = QueryUtils.makeQueryable(queryCommand.getQuery());
if (truncateGroups) {
docSet = computeGroupedDocSet(query, filter, collectors);
} else if (needDocset) {
docSet = computeDocSet(query, filter, collectors);
} else if (!collectors.isEmpty()) {
searchWithTimeLimiter(query, filter, MultiCollector.wrap(collectors.toArray(new Collector[nrOfCommands])));
} else {
searchWithTimeLimiter(query, filter, null);
}
}
示例2: performSearch
import org.apache.lucene.search.MultiCollector; //导入依赖的package包/类
private FacetsCollector performSearch(FacetIndexingParams iParams, TaxonomyReader tr, IndexReader ir,
IndexSearcher searcher) throws IOException {
// step 1: collect matching documents into a collector
Query q = new MatchAllDocsQuery();
TopScoreDocCollector topDocsCollector = TopScoreDocCollector.create(10, true);
List<FacetRequest> facetRequests = new ArrayList<FacetRequest>();
facetRequests.add(new CountFacetRequest(new CategoryPath("Band"), 10));
CountFacetRequest bandDepth = new CountFacetRequest(new CategoryPath("Band"), 10);
bandDepth.setDepth(2);
// makes it easier to check the results in the test.
bandDepth.setResultMode(ResultMode.GLOBAL_FLAT);
facetRequests.add(bandDepth);
facetRequests.add(new CountFacetRequest(new CategoryPath("Author"), 10));
facetRequests.add(new CountFacetRequest(new CategoryPath("Band", "Rock & Pop"), 10));
// Faceted search parameters indicate which facets are we interested in
FacetSearchParams facetSearchParams = new FacetSearchParams(iParams, facetRequests);
// perform documents search and facets accumulation
FacetsCollector facetsCollector = FacetsCollector.create(facetSearchParams, ir, tr);
searcher.search(q, MultiCollector.wrap(topDocsCollector, facetsCollector));
return facetsCollector;
}
示例3: verifyDrillDown
import org.apache.lucene.search.MultiCollector; //导入依赖的package包/类
private void verifyDrillDown(Map<String,Integer> expectedCounts, FacetIndexingParams fip, DirectoryReader indexReader,
TaxonomyReader taxoReader, IndexSearcher searcher) throws IOException {
// verify drill-down
for (String dim : expectedCounts.keySet()) {
CategoryPath drillDownCP = new CategoryPath(dim);
FacetSearchParams fsp = new FacetSearchParams(fip, new CountFacetRequest(drillDownCP, 10));
DrillDownQuery drillDown = new DrillDownQuery(fip, new MatchAllDocsQuery());
drillDown.add(drillDownCP);
TotalHitCountCollector total = new TotalHitCountCollector();
FacetsCollector fc = FacetsCollector.create(fsp, indexReader, taxoReader);
searcher.search(drillDown, MultiCollector.wrap(fc, total));
assertTrue("no results for drill-down query " + drillDown, total.getTotalHits() > 0);
List<FacetResult> facetResults = fc.getFacetResults();
assertEquals(1, facetResults.size());
FacetResultNode rootNode = facetResults.get(0).getFacetResultNode();
assertEquals("wrong count for " + dim, expectedCounts.get(dim).intValue(), (int) rootNode.value);
}
}
示例4: execute
import org.apache.lucene.search.MultiCollector; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public void execute() throws IOException {
final int nrOfCommands = commands.size();
List<Collector> collectors = new ArrayList<Collector>(nrOfCommands);
for (Command command : commands) {
collectors.addAll(command.create());
}
ProcessedFilter filter = searcher.getProcessedFilter
(queryCommand.getFilter(), queryCommand.getFilterList());
Query query = QueryUtils.makeQueryable(queryCommand.getQuery());
if (truncateGroups) {
docSet = computeGroupedDocSet(query, filter, collectors);
} else if (needDocset) {
docSet = computeDocSet(query, filter, collectors);
} else if (!collectors.isEmpty()) {
searchWithTimeLimiter(query, filter, MultiCollector.wrap(collectors.toArray(new Collector[nrOfCommands])));
} else {
searchWithTimeLimiter(query, filter, null);
}
}
示例5: computeGroupedDocSet
import org.apache.lucene.search.MultiCollector; //导入依赖的package包/类
private DocSet computeGroupedDocSet(Query query, ProcessedFilter filter, List<Collector> collectors) throws IOException {
Command firstCommand = commands.get(0);
AbstractAllGroupHeadsCollector termAllGroupHeadsCollector =
TermAllGroupHeadsCollector.create(firstCommand.getKey(), firstCommand.getSortWithinGroup());
if (collectors.isEmpty()) {
searchWithTimeLimiter(query, filter, termAllGroupHeadsCollector);
} else {
collectors.add(termAllGroupHeadsCollector);
searchWithTimeLimiter(query, filter, MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()])));
}
return new BitDocSet(termAllGroupHeadsCollector.retrieveGroupHeads(searcher.maxDoc()));
}
示例6: computeDocSet
import org.apache.lucene.search.MultiCollector; //导入依赖的package包/类
private DocSet computeDocSet(Query query, ProcessedFilter filter, List<Collector> collectors) throws IOException {
int maxDoc = searcher.maxDoc();
DocSetCollector docSetCollector;
if (collectors.isEmpty()) {
docSetCollector = new DocSetCollector(maxDoc >> 6, maxDoc);
} else {
Collector wrappedCollectors = MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()]));
docSetCollector = new DocSetDelegateCollector(maxDoc >> 6, maxDoc, wrappedCollectors);
}
searchWithTimeLimiter(query, filter, docSetCollector);
return docSetCollector.getDocSet();
}
示例7: testSumScoreAggregator
import org.apache.lucene.search.MultiCollector; //导入依赖的package包/类
@Test
public void testSumScoreAggregator() throws Exception {
Directory indexDir = newDirectory();
Directory taxoDir = newDirectory();
TaxonomyWriter taxonomyWriter = new DirectoryTaxonomyWriter(taxoDir);
IndexWriter iw = new IndexWriter(indexDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
FacetFields facetFields = new FacetFields(taxonomyWriter);
for(int i = atLeast(30); i > 0; --i) {
Document doc = new Document();
doc.add(new StringField("f", "v", Store.NO));
facetFields.addFields(doc, Collections.singletonList(new CategoryPath("a")));
iw.addDocument(doc);
}
taxonomyWriter.close();
iw.close();
DirectoryReader r = DirectoryReader.open(indexDir);
DirectoryTaxonomyReader taxo = new DirectoryTaxonomyReader(taxoDir);
FacetSearchParams sParams = new FacetSearchParams(new SumScoreFacetRequest(new CategoryPath("a"), 10));
FacetsAccumulator fa = new FacetsAccumulator(sParams, r, taxo) {
@Override
public FacetsAggregator getAggregator() {
return new SumScoreFacetsAggregator();
}
};
FacetsCollector fc = FacetsCollector.create(fa);
TopScoreDocCollector topDocs = TopScoreDocCollector.create(10, false);
new IndexSearcher(r).search(new MatchAllDocsQuery(), MultiCollector.wrap(fc, topDocs));
List<FacetResult> res = fc.getFacetResults();
double value = res.get(0).getFacetResultNode().value;
double expected = topDocs.topDocs().getMaxScore() * r.numDocs();
assertEquals(expected, value, 1E-10);
IOUtils.close(taxo, taxoDir, r, indexDir);
}
示例8: search
import org.apache.lucene.search.MultiCollector; //导入依赖的package包/类
/** User runs a query and counts facets. */
private List<FacetResult> search() throws IOException {
DirectoryReader indexReader = DirectoryReader.open(directory);
IndexSearcher searcher = new IndexSearcher(indexReader);
SortedSetDocValuesReaderState state = new DefaultSortedSetDocValuesReaderState(indexReader);
// Aggregates the facet counts
FacetsCollector fc = new FacetsCollector();
// MatchAllDocsQuery is for "browsing" (counts facets
// for all non-deleted docs in the index); normally
// you'd use a "normal" query:
//FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, fc);
TotalHitCountCollector collector = new TotalHitCountCollector();
searcher.search(new MatchAllDocsQuery(), MultiCollector.wrap(collector, fc));
// Retrieve results
Facets facets = new SortedSetDocValuesFacetCounts(state, fc);
List<FacetResult> results = new ArrayList<>();
results.add(facets.getTopChildren(10, "Author"));
results.add(facets.getTopChildren(10, "Publish Year"));
indexReader.close();
return results;
}
示例9: search
import org.apache.lucene.search.MultiCollector; //导入依赖的package包/类
/**
* Provided a search has been created, this method will execute
* that search and aggregate its results into the Search object's
* collectors. These collectors may be accessed using the
* accessors of the Search object.
* @throws IOException A fatal exception occurred while interacting
* with the index.
*/
public void search() throws IOException {
if (search != null) {
final Collector collector = search.getCollector();
final FacetsCollector facetsCollector = search.getFacetsCollector(indexReader, taxonomyReader);
indexSearcher.search(
search.getQuery(),
MultiCollector.wrap(collector, facetsCollector)
);
}
}
示例10: testAggregateSearchResultItem
import org.apache.lucene.search.MultiCollector; //导入依赖的package包/类
/**
* Test method for {@link com.fuerve.villageelder.actions.results.SearchResult#aggregate(com.fuerve.villageelder.actions.results.SearchResultItem)}.
*/
@Test
public final void testAggregateSearchResultItem() throws Exception {
Directory indexDirectoryExpected = new RAMDirectory();
Directory taxonomyDirectoryExpected = new RAMDirectory();
buildDummyIndex(indexDirectoryExpected, taxonomyDirectoryExpected);
IndexReader reader = DirectoryReader.open(indexDirectoryExpected);
IndexSearcher searcher = new IndexSearcher(reader);
TaxonomyReader taxo = new DirectoryTaxonomyReader(taxonomyDirectoryExpected);
QueryParser parser =
new SearchQueryParser(
Lucene.LUCENE_VERSION,
Lucene.DEFAULT_QUERY_FIELD,
Lucene.getPerFieldAnalyzer()
);
TopFieldCollector indexCollector = getDummyCollector();
FacetsCollector facetsCollector = getDummyFacetsCollector((DirectoryReader) reader, taxo);
Collector collector = MultiCollector.wrap(indexCollector, facetsCollector);
searcher.search(parser.parse("Revision:5*"), collector);
facetsCollector.getFacetResults();
SearchResult target = new SearchResult();
target.aggregate(new SearchResultItem(indexCollector.topDocs(), facetsCollector.getFacetResults()));
assertEquals(2, target.getTopDocs().totalHits);
assertEquals(1, target.getFacetResults().size());
}
示例11: testSearchResultItem
import org.apache.lucene.search.MultiCollector; //导入依赖的package包/类
/**
* Test method for {@link com.fuerve.villageelder.actions.results.SearchResultItem#SearchResultItem(org.apache.lucene.search.TopDocs, java.util.List)}.
*/
@Test
public final void testSearchResultItem() throws Exception {
Directory indexDirectoryExpected = new RAMDirectory();
Directory taxonomyDirectoryExpected = new RAMDirectory();
buildDummyIndex(indexDirectoryExpected, taxonomyDirectoryExpected);
IndexReader reader = DirectoryReader.open(indexDirectoryExpected);
IndexSearcher searcher = new IndexSearcher(reader);
TaxonomyReader taxo = new DirectoryTaxonomyReader(taxonomyDirectoryExpected);
QueryParser parser =
new SearchQueryParser(
Lucene.LUCENE_VERSION,
Lucene.DEFAULT_QUERY_FIELD,
Lucene.getPerFieldAnalyzer()
);
TopFieldCollector indexCollector = getDummyCollector();
FacetsCollector facetsCollector = getDummyFacetsCollector((DirectoryReader) reader, taxo);
Collector collector = MultiCollector.wrap(indexCollector, facetsCollector);
searcher.search(parser.parse("Revision:5*"), collector);
facetsCollector.getFacetResults();
SearchResultItem target = new SearchResultItem(indexCollector.topDocs(), facetsCollector.getFacetResults());
assertEquals(2, target.getTopDocs().totalHits);
assertEquals(1, target.getFacetResults().size());
}
示例12: testSumScoreAggregator
import org.apache.lucene.search.MultiCollector; //导入依赖的package包/类
@Test
public void testSumScoreAggregator() throws Exception {
Directory indexDir = newDirectory();
Directory taxoDir = newDirectory();
TaxonomyWriter taxonomyWriter = new DirectoryTaxonomyWriter(taxoDir);
IndexWriter iw = new IndexWriter(indexDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
FacetFields facetFields = new FacetFields(taxonomyWriter);
for(int i = atLeast(30); i > 0; --i) {
Document doc = new Document();
if (random().nextBoolean()) { // don't match all documents
doc.add(new StringField("f", "v", Store.NO));
}
facetFields.addFields(doc, Collections.singletonList(new CategoryPath("a")));
iw.addDocument(doc);
}
taxonomyWriter.close();
iw.close();
DirectoryReader r = DirectoryReader.open(indexDir);
DirectoryTaxonomyReader taxo = new DirectoryTaxonomyReader(taxoDir);
FacetSearchParams fsp = new FacetSearchParams(new SumScoreFacetRequest(new CategoryPath("a"), 10));
FacetsCollector fc = FacetsCollector.create(fsp, r, taxo);
TopScoreDocCollector topDocs = TopScoreDocCollector.create(10, false);
ConstantScoreQuery csq = new ConstantScoreQuery(new MatchAllDocsQuery());
csq.setBoost(2.0f);
newSearcher(r).search(csq, MultiCollector.wrap(fc, topDocs));
List<FacetResult> res = fc.getFacetResults();
float value = (float) res.get(0).getFacetResultNode().value;
TopDocs td = topDocs.topDocs();
int expected = (int) (td.getMaxScore() * td.totalHits);
assertEquals(expected, (int) value);
IOUtils.close(taxo, taxoDir, r, indexDir);
}
示例13: wrap
import org.apache.lucene.search.MultiCollector; //导入依赖的package包/类
public void wrap(Collector collector) {
wrappedCollector = MultiCollector.wrap(collector, wrappedCollector);
}
示例14: testCountAndSumScore
import org.apache.lucene.search.MultiCollector; //导入依赖的package包/类
@Test
public void testCountAndSumScore() throws Exception {
Directory indexDir = newDirectory();
Directory taxoDir = newDirectory();
TaxonomyWriter taxonomyWriter = new DirectoryTaxonomyWriter(taxoDir);
IndexWriter iw = new IndexWriter(indexDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
FacetIndexingParams fip = new PerDimensionIndexingParams(Collections.singletonMap(new CategoryPath("b"), new CategoryListParams("$b")));
FacetFields facetFields = new FacetFields(taxonomyWriter, fip);
for(int i = atLeast(30); i > 0; --i) {
Document doc = new Document();
doc.add(new StringField("f", "v", Store.NO));
List<CategoryPath> cats = new ArrayList<CategoryPath>();
cats.add(new CategoryPath("a"));
cats.add(new CategoryPath("b"));
facetFields.addFields(doc, cats);
iw.addDocument(doc);
}
taxonomyWriter.close();
iw.close();
DirectoryReader r = DirectoryReader.open(indexDir);
DirectoryTaxonomyReader taxo = new DirectoryTaxonomyReader(taxoDir);
FacetSearchParams sParams = new FacetSearchParams(fip,
new CountFacetRequest(new CategoryPath("a"), 10),
new SumScoreFacetRequest(new CategoryPath("b"), 10));
Map<CategoryListParams,FacetsAggregator> aggregators = new HashMap<CategoryListParams,FacetsAggregator>();
aggregators.put(fip.getCategoryListParams(new CategoryPath("a")), new FastCountingFacetsAggregator());
aggregators.put(fip.getCategoryListParams(new CategoryPath("b")), new SumScoreFacetsAggregator());
final FacetsAggregator aggregator = new PerCategoryListAggregator(aggregators, fip);
FacetsAccumulator fa = new FacetsAccumulator(sParams, r, taxo) {
@Override
public FacetsAggregator getAggregator() {
return aggregator;
}
};
FacetsCollector fc = FacetsCollector.create(fa);
TopScoreDocCollector topDocs = TopScoreDocCollector.create(10, false);
new IndexSearcher(r).search(new MatchAllDocsQuery(), MultiCollector.wrap(fc, topDocs));
List<FacetResult> facetResults = fc.getFacetResults();
FacetResult fresA = facetResults.get(0);
assertEquals("unexpected count for " + fresA, r.maxDoc(), (int) fresA.getFacetResultNode().value);
FacetResult fresB = facetResults.get(1);
double expected = topDocs.topDocs().getMaxScore() * r.numDocs();
assertEquals("unexpected value for " + fresB, expected, fresB.getFacetResultNode().value, 1E-10);
IOUtils.close(taxo, taxoDir, r, indexDir);
}
示例15: testSearchCollector
import org.apache.lucene.search.MultiCollector; //导入依赖的package包/类
/**
* Test method for {@link com.fuerve.villageelder.search.Searcher#search(org.apache.lucene.search.Collector)}.
*/
@SuppressWarnings("unused")
@Test
public final void testSearchCollector() throws Exception {
// Gather declared fields.
Field indexDirectoryField = Searcher.class.getDeclaredField("indexDirectory");
Field taxonomyDirectoryField = Searcher.class.getDeclaredField("taxonomyDirectory");
Field indexDirectoryNameField = Searcher.class.getDeclaredField("indexDirectoryName");
Field taxonomyDirectoryNameField = Searcher.class.getDeclaredField("taxonomyDirectoryName");
Field stringDirectoriesField = Searcher.class.getDeclaredField("stringDirectories");
Field initializedField = Searcher.class.getDeclaredField("initialized");
Field searchField = Searcher.class.getDeclaredField("search");
Field indexReaderField = Searcher.class.getDeclaredField("indexReader");
Field indexSearcherField = Searcher.class.getDeclaredField("indexSearcher");
Field taxonomyReaderField = Searcher.class.getDeclaredField("taxonomyReader");
indexDirectoryField.setAccessible(true);
taxonomyDirectoryField.setAccessible(true);
indexDirectoryNameField.setAccessible(true);
taxonomyDirectoryNameField.setAccessible(true);
stringDirectoriesField.setAccessible(true);
initializedField.setAccessible(true);
searchField.setAccessible(true);
indexReaderField.setAccessible(true);
indexSearcherField.setAccessible(true);
taxonomyReaderField.setAccessible(true);
// Setup
Directory indexDirectoryExpected = new RAMDirectory();
Directory taxonomyDirectoryExpected = new RAMDirectory();
buildDummyIndex(indexDirectoryExpected, taxonomyDirectoryExpected);
Searcher target = new Searcher(indexDirectoryExpected, taxonomyDirectoryExpected);
target.initializeSearch();
// Gather field values.
Directory indexDirectoryActual = (Directory) indexDirectoryField.get(target);
Directory taxonomyDirectoryActual = (Directory) taxonomyDirectoryField.get(target);
String indexDirectoryNameActual = (String) indexDirectoryNameField.get(target);
String taxonomyDirectoryNameActual = (String) taxonomyDirectoryNameField.get(target);
boolean stringDirectoriesActual = stringDirectoriesField.getBoolean(target);
boolean initializedActual = initializedField.getBoolean(target);
Search searchFieldActual = (Search) searchField.get(target);
IndexReader indexReaderActual = (IndexReader) indexReaderField.get(target);
IndexSearcher indexSearcherActual = (IndexSearcher) indexSearcherField.get(target);
TaxonomyReader taxonomyReaderActual = (TaxonomyReader) taxonomyReaderField.get(target);
// Create the Collector to be passed in and execute a search to populate it.
final TopFieldCollector collector = getDummyCollector();
final FacetsCollector facetsCollector =
getDummyFacetsCollector((DirectoryReader) indexReaderActual, taxonomyReaderActual);
final Collector testCollector = MultiCollector.wrap(collector, facetsCollector);
target.createSearch("Revision:5*");
target.search(testCollector);
// Test
assertEquals(true, initializedActual);
assertEquals(2, collector.topDocs().totalHits);
}