当前位置: 首页>>代码示例>>Java>>正文


Java IndexSearcher.search方法代码示例

本文整理汇总了Java中org.apache.lucene.search.IndexSearcher.search方法的典型用法代码示例。如果您正苦于以下问题:Java IndexSearcher.search方法的具体用法?Java IndexSearcher.search怎么用?Java IndexSearcher.search使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.lucene.search.IndexSearcher的用法示例。


在下文中一共展示了IndexSearcher.search方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: assertAvgScoreMode

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
protected void assertAvgScoreMode(Query parentFilter, IndexSearcher searcher) throws IOException {
    MultiValueMode sortMode = MultiValueMode.AVG;
    Query childFilter = Queries.not(parentFilter);
    XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter));
    Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
    Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
    TopDocs topDocs = searcher.search(query, 5, sort);
    assertThat(topDocs.totalHits, equalTo(7));
    assertThat(topDocs.scoreDocs.length, equalTo(5));
    assertThat(topDocs.scoreDocs[0].doc, equalTo(11));
    assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2));
    assertThat(topDocs.scoreDocs[1].doc, equalTo(3));
    assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(3));
    assertThat(topDocs.scoreDocs[2].doc, equalTo(7));
    assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
    assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
    assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(3));
    assertThat(topDocs.scoreDocs[4].doc, equalTo(19));
    assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(4));
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:21,代码来源:AbstractNumberNestedSortingTestCase.java

示例2: search

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
/**
 * Search sample. 
 * 
 * @param directory the index directory.
 * @throws IOException in case of I/O failure.
 * @throws ParseException in case of Query parse exception.
 */	
public static void search(Directory directory) throws IOException, ParseException {
	IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(directory));
	
	Query query = new QueryParser("title", new StandardAnalyzer()).parse("title:Solr");
	TopDocs matches = searcher.search(query, 10);
	
	System.out.println("Search returned " + matches.totalHits + " matches.");
	Arrays.stream(matches.scoreDocs)
		.map(scoreDoc -> luceneDoc(scoreDoc, searcher))
		.forEach(doc -> {
			System.out.println("-------------------------------------");				
			System.out.println("ID:\t" + doc.get("id"));
			System.out.println("TITLE:\t" + doc.get("title"));
			System.out.println("AUTHOR:\t" + doc.get("author"));
			System.out.println("SCORE:\t" + doc.get("score"));
			
		});
}
 
开发者ID:agazzarini,项目名称:as-full-text-search-server,代码行数:26,代码来源:LuceneBasicFlowExample.java

示例3: synTokenQuery

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
private void synTokenQuery(String search, final int numbOfResults, final double minLuceneScore,
		Map<String, Float> result, IndexSearcher searcher) throws ParseException, IOException {

	QueryParser parser = new QueryParser(Version.LUCENE_46, "surfaceFormTokens",
			new StandardAnalyzer(Version.LUCENE_46));

	search = QueryParser.escape(search);

	Query q = parser.parse(search);
	/*
	 * Works only in String field!!
	 */
	// Query q = new FuzzyQuery(new Term("surfaceFormTokens",
	// QueryParser.escape(search)), 2);

	TopDocs top = searcher.search(q, numbOfResults);

	for (ScoreDoc doc : top.scoreDocs) {
		if (doc.score >= minLuceneScore) {
			final String key = searcher.doc(doc.doc).get("conceptID");
			if (result.getOrDefault(key, 0f) < doc.score) {
				result.put(key, doc.score);
			}
		}
	}
}
 
开发者ID:ag-sc,项目名称:JLink,代码行数:27,代码来源:LuceneRetrieval.java

示例4: testCase

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
private void testCase(Query query, String field, int precision, CheckedConsumer<RandomIndexWriter, IOException> buildIndex,
                      Consumer<InternalGeoHashGrid> verify) throws IOException {
    Directory directory = newDirectory();
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
    buildIndex.accept(indexWriter);
    indexWriter.close();

    IndexReader indexReader = DirectoryReader.open(directory);
    IndexSearcher indexSearcher = newSearcher(indexReader, true, true);

    GeoGridAggregationBuilder aggregationBuilder = new GeoGridAggregationBuilder("_name").field(field);
    aggregationBuilder.precision(precision);
    MappedFieldType fieldType = new GeoPointFieldMapper.GeoPointFieldType();
    fieldType.setHasDocValues(true);
    fieldType.setName(FIELD_NAME);
    try (Aggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
        aggregator.preCollection();
        indexSearcher.search(query, aggregator);
        aggregator.postCollection();
        verify.accept((InternalGeoHashGrid) aggregator.buildAggregation(0L));
    }
    indexReader.close();
    directory.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:25,代码来源:GeoHashGridAggregatorTests.java

示例5: search

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
@Override
public Collection<IndexedItem> search(IndexSearcher searcher) throws IOException
{
	BooleanQuery overall = new BooleanQuery();
	BooleanQuery collections = new BooleanQuery();
	for( Institution inst : institutions )
	{
		collections.add(
			new TermQuery(new Term(FreeTextQuery.FIELD_INSTITUTION, Long.toString(inst.getUniqueId()))),
			Occur.SHOULD);
	}
	overall.add(collections, Occur.MUST);
	overall.add(NumericRangeQuery.newLongRange(FreeTextQuery.FIELD_ID_RANGEABLE, firstId, lastId, true, true),
		Occur.MUST);
	searcher.search(overall, compareDates);
	return compareDates.getModifiedDocs();
}
 
开发者ID:equella,项目名称:Equella,代码行数:18,代码来源:ItemSyncer.java

示例6: testAllTermQuery

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public void testAllTermQuery() throws IOException {
    Directory dir = newDirectory();
    String value = "The quick brown fox.";
    Analyzer analyzer = new StandardAnalyzer();
    IndexReader ir = indexOneDoc(dir, "all", value, analyzer);
    AllTermQuery query = new AllTermQuery(new Term("all", "fox"));
    IndexSearcher searcher = newSearcher(ir);
    TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
    assertThat(topDocs.totalHits, equalTo(1));
    int docId = topDocs.scoreDocs[0].doc;
    CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder());
    CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter(searcher, analyzer,
        passageFormatter, null, value, false);
    Snippet[] snippets = highlighter.highlightField("all", query, docId, 5);
    assertThat(snippets.length, equalTo(1));
    assertThat(snippets[0].getText(), equalTo("The quick brown <b>fox</b>."));
    ir.close();
    dir.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:20,代码来源:CustomUnifiedHighlighterTests.java

示例7: testNoTokens

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public void testNoTokens() throws Exception {
    Directory dir = new RAMDirectory();
    IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.KEYWORD_ANALYZER));

    FieldType allFt = getAllFieldType();
    Document doc = new Document();
    doc.add(new Field("_id", "1", StoredField.TYPE));
    doc.add(new AllField("_all", "", 2.0f, allFt));
    indexWriter.addDocument(doc);

    IndexReader reader = DirectoryReader.open(indexWriter);
    IndexSearcher searcher = new IndexSearcher(reader);

    TopDocs docs = searcher.search(new MatchAllDocsQuery(), 10);
    assertThat(docs.totalHits, equalTo(1));
    assertThat(docs.scoreDocs[0].doc, equalTo(0));
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:18,代码来源:SimpleAllTests.java

示例8: writeEmptyTermVector

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
private void writeEmptyTermVector(TermVectorsResponse outResponse) throws IOException {

        Directory dir = newDirectory();
        IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());
        conf.setOpenMode(OpenMode.CREATE);
        IndexWriter writer = new IndexWriter(dir, conf);
        FieldType type = new FieldType(TextField.TYPE_STORED);
        type.setStoreTermVectorOffsets(true);
        type.setStoreTermVectorPayloads(false);
        type.setStoreTermVectorPositions(true);
        type.setStoreTermVectors(true);
        type.freeze();
        Document d = new Document();
        d.add(new Field("id", "abc", StringField.TYPE_STORED));

        writer.updateDocument(new Term("id", "abc"), d);
        writer.commit();
        writer.close();
        DirectoryReader dr = DirectoryReader.open(dir);
        IndexSearcher s = new IndexSearcher(dr);
        TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
        ScoreDoc[] scoreDocs = search.scoreDocs;
        int doc = scoreDocs[0].doc;
        Fields fields = dr.getTermVectors(doc);
        EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
        outResponse.setFields(fields, null, flags, fields);
        outResponse.setExists(true);
        dr.close();
        dir.close();

    }
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:32,代码来源:TermVectorsUnitTests.java

示例9: facetsOnly

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
/** User runs a query and counts facets only without collecting the matching documents.*/
private List<FacetResult> facetsOnly() throws IOException {
  DirectoryReader indexReader = DirectoryReader.open(indexDir);
  IndexSearcher searcher = new IndexSearcher(indexReader);
  TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);

  FacetsCollector fc = new FacetsCollector();

  // MatchAllDocsQuery is for "browsing" (counts facets
  // for all non-deleted docs in the index); normally
  // you'd use a "normal" query:
  searcher.search(new MatchAllDocsQuery(), fc);

  // Retrieve results
  List<FacetResult> results = new ArrayList<FacetResult>();

  // Count both "Publish Date" and "Author" dimensions
  Facets facets = new FastTaxonomyFacetCounts(taxoReader, config, fc);
 
  results.add(facets.getTopChildren(10, "Author"));
  results.add(facets.getTopChildren(10, "Publish Date"));
  
  indexReader.close();
  taxoReader.close();
  
  return results;
}
 
开发者ID:skeychen,项目名称:dswork,代码行数:28,代码来源:SimpleFacetsExample.java

示例10: testReplay

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public void testReplay() throws Exception {
    Directory directory = newDirectory();
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
    int numDocs = randomIntBetween(1, 128);
    int maxNumValues = randomInt(16);
    for (int i = 0; i < numDocs; i++) {
        Document document = new Document();
        document.add(new StringField("field", String.valueOf(randomInt(maxNumValues)), Field.Store.NO));
        indexWriter.addDocument(document);
    }

    indexWriter.close();
    IndexReader indexReader = DirectoryReader.open(directory);
    IndexSearcher indexSearcher = new IndexSearcher(indexReader);

    TermQuery termQuery = new TermQuery(new Term("field", String.valueOf(randomInt(maxNumValues))));
    TopDocs topDocs = indexSearcher.search(termQuery, numDocs);

    BestDocsDeferringCollector collector =
            new BestDocsDeferringCollector(numDocs, new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService()));
    Set<Integer> deferredCollectedDocIds = new HashSet<>();
    collector.setDeferredCollector(Collections.singleton(testCollector(deferredCollectedDocIds)));
    collector.preCollection();
    indexSearcher.search(termQuery, collector);
    collector.postCollection();
    collector.replay(0);

    assertEquals(topDocs.scoreDocs.length, deferredCollectedDocIds.size());
    for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
        assertTrue("expected docid [" + scoreDoc.doc + "] is missing", deferredCollectedDocIds.contains(scoreDoc.doc));
    }
    collector.close();
    indexReader.close();
    directory.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:36,代码来源:BestDocsDeferringCollectorTests.java

示例11: testEmptyNumericSegment

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public void testEmptyNumericSegment() throws Exception {
    final Directory dir = newDirectory();
    final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
    Document doc = new Document();
    doc.add(new NumericDocValuesField("group", 0));
    w.addDocument(doc);
    doc.clear();
    doc.add(new NumericDocValuesField("group", 1));
    w.addDocument(doc);
    w.commit();
    doc.clear();
    doc.add(new NumericDocValuesField("group", 10));
    w.addDocument(doc);
    w.commit();
    doc.clear();
    doc.add(new NumericDocValuesField("category", 0));
    w.addDocument(doc);
    w.commit();
    final IndexReader reader = w.getReader();
    final IndexSearcher searcher = newSearcher(reader);
    SortField sortField = new SortField("group", SortField.Type.LONG);
    sortField.setMissingValue(Long.MAX_VALUE);
    Sort sort = new Sort(sortField);
    final CollapsingTopDocsCollector collapsingCollector =
            CollapsingTopDocsCollector.createNumeric("group", sort, 10, false);
    searcher.search(new MatchAllDocsQuery(), collapsingCollector);
    CollapseTopFieldDocs collapseTopFieldDocs = collapsingCollector.getTopDocs();
    assertEquals(4, collapseTopFieldDocs.scoreDocs.length);
    assertEquals(4, collapseTopFieldDocs.collapseValues.length);
    assertEquals(0L, collapseTopFieldDocs.collapseValues[0]);
    assertEquals(1L, collapseTopFieldDocs.collapseValues[1]);
    assertEquals(10L, collapseTopFieldDocs.collapseValues[2]);
    assertNull(collapseTopFieldDocs.collapseValues[3]);
    w.close();
    reader.close();
    dir.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:38,代码来源:CollapsingTopDocsCollectorTests.java

示例12: testEmptySortedSegment

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public void testEmptySortedSegment() throws Exception {
    final Directory dir = newDirectory();
    final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
    Document doc = new Document();
    doc.add(new SortedDocValuesField("group", new BytesRef("0")));
    w.addDocument(doc);
    doc.clear();
    doc.add(new SortedDocValuesField("group", new BytesRef("1")));
    w.addDocument(doc);
    w.commit();
    doc.clear();
    doc.add(new SortedDocValuesField("group", new BytesRef("10")));
    w.addDocument(doc);
    w.commit();
    doc.clear();
    doc.add(new NumericDocValuesField("category", 0));
    w.addDocument(doc);
    w.commit();
    final IndexReader reader = w.getReader();
    final IndexSearcher searcher = newSearcher(reader);
    Sort sort = new Sort(new SortField("group", SortField.Type.STRING_VAL));
    final CollapsingTopDocsCollector collapsingCollector =
        CollapsingTopDocsCollector.createKeyword("group", sort, 10, false);
    searcher.search(new MatchAllDocsQuery(), collapsingCollector);
    CollapseTopFieldDocs collapseTopFieldDocs = collapsingCollector.getTopDocs();
    assertEquals(4, collapseTopFieldDocs.scoreDocs.length);
    assertEquals(4, collapseTopFieldDocs.collapseValues.length);
    assertNull(collapseTopFieldDocs.collapseValues[0]);
    assertEquals(new BytesRef("0"), collapseTopFieldDocs.collapseValues[1]);
    assertEquals(new BytesRef("1"), collapseTopFieldDocs.collapseValues[2]);
    assertEquals(new BytesRef("10"), collapseTopFieldDocs.collapseValues[3]);
    w.close();
    reader.close();
    dir.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:36,代码来源:CollapsingTopDocsCollectorTests.java

示例13: testCase

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
private void testCase(Query query,
                      CheckedConsumer<RandomIndexWriter, IOException> indexer,
                      Consumer<Sum> verify) throws IOException {

    try (Directory directory = newDirectory()) {
        try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
            indexer.accept(indexWriter);
        }

        try (IndexReader indexReader = DirectoryReader.open(directory)) {
            IndexSearcher indexSearcher = newSearcher(indexReader, true, true);

            MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
            fieldType.setName(FIELD_NAME);
            fieldType.setHasDocValues(true);

            SumAggregationBuilder aggregationBuilder = new SumAggregationBuilder("_name");
            aggregationBuilder.field(FIELD_NAME);

            try (SumAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
                aggregator.preCollection();
                indexSearcher.search(query, aggregator);
                aggregator.postCollection();

                verify.accept((Sum) aggregator.buildAggregation(0L));
            }
        }
    }
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:30,代码来源:SumAggregatorTests.java

示例14: writeStandardTermVector

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
private void writeStandardTermVector(TermVectorsResponse outResponse) throws IOException {

        Directory dir = newDirectory();
        IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());

        conf.setOpenMode(OpenMode.CREATE);
        IndexWriter writer = new IndexWriter(dir, conf);
        FieldType type = new FieldType(TextField.TYPE_STORED);
        type.setStoreTermVectorOffsets(true);
        type.setStoreTermVectorPayloads(false);
        type.setStoreTermVectorPositions(true);
        type.setStoreTermVectors(true);
        type.freeze();
        Document d = new Document();
        d.add(new Field("id", "abc", StringField.TYPE_STORED));
        d.add(new Field("title", "the1 quick brown fox jumps over  the1 lazy dog", type));
        d.add(new Field("desc", "the1 quick brown fox jumps over  the1 lazy dog", type));

        writer.updateDocument(new Term("id", "abc"), d);
        writer.commit();
        writer.close();
        DirectoryReader dr = DirectoryReader.open(dir);
        IndexSearcher s = new IndexSearcher(dr);
        TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
        ScoreDoc[] scoreDocs = search.scoreDocs;
        int doc = scoreDocs[0].doc;
        Fields termVectors = dr.getTermVectors(doc);
        EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
        outResponse.setFields(termVectors, null, flags, termVectors);
        dr.close();
        dir.close();

    }
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:34,代码来源:TermVectorsUnitTests.java

示例15: searchForTicket

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
private static List<TicketGroup> searchForTicket(Ticket ticket, int maxCount) throws IndexException {
	if (ticket == null) {
		throw new IllegalArgumentException(ERROR_NULL_ARGUMENT);
	}

	Directory directory = IndexProvider.getInstance().getDirectory();
	
	try (IndexReader reader = DirectoryReader.open(directory);) {
		Query query = new QueryParser(SUBJECT, analyzer).parse(QueryParser.escape(ticket.getSubject()));
		IndexSearcher searcher = new IndexSearcher(reader);
		TopDocs docs = searcher.search(query, maxCount);
		ScoreDoc[] foundDocuments = docs.scoreDocs;

		List<TicketGroup> result = new ArrayList<>();
		for(ScoreDoc document: foundDocuments){
			String[] ids = searcher.doc(document.doc).getValues(ID);
			if (!(ids.length == 1 && ids[0].equals(ticket.getId()))){
				result.add(new TicketGroup(Arrays.asList(ids)));
			}
		}
		
		return result;
	} catch (IOException | ParseException e) {
		LOGGER.error(ERROR_SEARCHING_FAILED, e);
		throw new IndexException(ERROR_SEARCHING_FAILED, e);
	}
}
 
开发者ID:SAP,项目名称:cloud-c4c-ticket-duplicate-finder-ext,代码行数:28,代码来源:IndexService.java


注:本文中的org.apache.lucene.search.IndexSearcher.search方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。