当前位置: 首页>>代码示例>>Java>>正文


Java IndexReader.close方法代码示例

本文整理汇总了Java中org.apache.lucene.index.IndexReader.close方法的典型用法代码示例。如果您正苦于以下问题:Java IndexReader.close方法的具体用法?Java IndexReader.close怎么用?Java IndexReader.close使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.lucene.index.IndexReader的用法示例。


在下文中一共展示了IndexReader.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testMultiPhrasePrefixQuery

import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
public void testMultiPhrasePrefixQuery() throws Exception {
    Analyzer analyzer = new StandardAnalyzer();
    Directory dir = newDirectory();
    String value = "The quick brown fox.";
    IndexReader ir = indexOneDoc(dir, "text", value, analyzer);
    MultiPhrasePrefixQuery query = new MultiPhrasePrefixQuery();
    query.add(new Term("text", "quick"));
    query.add(new Term("text", "brown"));
    query.add(new Term("text", "fo"));
    IndexSearcher searcher = newSearcher(ir);
    TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
    assertThat(topDocs.totalHits, equalTo(1));
    int docId = topDocs.scoreDocs[0].doc;
    CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder());
    CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter(searcher, analyzer,
        passageFormatter, null, value, false);
    Snippet[] snippets = highlighter.highlightField("text", query, docId, 5);
    assertThat(snippets.length, equalTo(1));
    assertThat(snippets[0].getText(), equalTo("The <b>quick</b> <b>brown</b> <b>fox</b>."));
    ir.close();
    dir.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:23,代码来源:CustomUnifiedHighlighterTests.java

示例2: closeDeltaIndexReader

import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
/**
 * Manage closing and unregistering an index reader.
 * 
 * @param id String
 * @throws IOException
 */
public void closeDeltaIndexReader(String id) throws IOException
{
    if (id == null)
    {
        throw new IndexerException("\"null\" is not a valid identifier for a transaction");
    }

    // No lock required as the delta applied to one thread. The delta is
    // still active.
    IndexReader reader = indexReaders.remove(id);
    if (reader != null)
    {
        reader.close();
    }
}
 
开发者ID:Alfresco,项目名称:alfresco-repository,代码行数:22,代码来源:IndexInfo.java

示例3: testCase

import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
private void testCase(Query query, CheckedConsumer<RandomIndexWriter, IOException> buildIndex, Consumer<InternalMax> verify)
        throws IOException {
    Directory directory = newDirectory();
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
    buildIndex.accept(indexWriter);
    indexWriter.close();

    IndexReader indexReader = DirectoryReader.open(directory);
    IndexSearcher indexSearcher = newSearcher(indexReader, true, true);

    MaxAggregationBuilder aggregationBuilder = new MaxAggregationBuilder("_name").field("number");
    MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
    fieldType.setName("number");
    try (MaxAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
        aggregator.preCollection();
        indexSearcher.search(query, aggregator);
        aggregator.postCollection();
        verify.accept((InternalMax) aggregator.buildAggregation(0L));
    }
    indexReader.close();
    directory.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:23,代码来源:MaxAggregatorTests.java

示例4: testGetParentIdNoParentField

import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
public void testGetParentIdNoParentField() throws Exception {
    ParentFieldMapper fieldMapper = createParentFieldMapper();
    Directory directory = newDirectory();
    IndexWriter indexWriter = new IndexWriter(directory, newIndexWriterConfig());
    Document document = new Document();
    document.add(new SortedDocValuesField("different_field", new BytesRef("1")));
    indexWriter.addDocument(document);
    indexWriter.close();

    IndexReader indexReader = DirectoryReader.open(directory);
    String id = ParentFieldSubFetchPhase.getParentId(fieldMapper, indexReader.leaves().get(0).reader(), 0);
    assertNull(id);

    indexReader.close();
    directory.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:17,代码来源:ParentFieldSubFetchPhaseTests.java

示例5: testMinAggregator_noDocs

import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
public void testMinAggregator_noDocs() throws Exception {
    Directory directory = newDirectory();
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
    indexWriter.close();

    IndexReader indexReader = DirectoryReader.open(directory);
    IndexSearcher indexSearcher = newSearcher(indexReader, true, true);

    MinAggregationBuilder aggregationBuilder = new MinAggregationBuilder("_name").field("number");
    MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
    fieldType.setName("number");
    try (MinAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
        aggregator.preCollection();
        indexSearcher.search(new MatchAllDocsQuery(), aggregator);
        aggregator.postCollection();
        InternalMin result = (InternalMin) aggregator.buildAggregation(0L);
        assertEquals(Double.POSITIVE_INFINITY, result.getValue(), 0);
    }
    indexReader.close();
    directory.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:22,代码来源:MinAggregatorTests.java

示例6: testEmpty

import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
public void testEmpty() throws Exception {
    Directory directory = newDirectory();
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
    indexWriter.close();
    IndexReader indexReader = DirectoryReader.open(directory);
    IndexSearcher indexSearcher = newSearcher(indexReader, true, true);
    int numFilters = randomIntBetween(1, 10);
    QueryBuilder[] filters = new QueryBuilder[numFilters];
    for (int i = 0; i < filters.length; i++) {
        filters[i] = QueryBuilders.termQuery("field", randomAsciiOfLength(5));
    }
    FiltersAggregationBuilder builder = new FiltersAggregationBuilder("test", filters);
    builder.otherBucketKey("other");
    InternalFilters response = search(indexSearcher, new MatchAllDocsQuery(), builder, fieldType);
    assertEquals(response.getBuckets().size(), numFilters);
    for (InternalFilters.InternalBucket filter : response.getBuckets()) {
        assertEquals(filter.getDocCount(), 0);
    }
    indexReader.close();
    directory.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:22,代码来源:FiltersAggregatorTests.java

示例7: searchIndex

import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
/**
 * 查询索引
 * 
 * @param keywords
 * @return
 * @throws Exception
 */
public List<Document> searchIndex(Integer typeId, String keywords) throws Exception {
    // 1.init searcher
    Analyzer analyzer = new PaodingAnalyzer();
    IndexReader reader = IndexReader.open(typeId == appConfig.getGameTypeId() ? appConfig.getGameIndexDir()
            : appConfig.getSoftIndexDir());
    BooleanClause.Occur[] flags = new BooleanClause.Occur[] { BooleanClause.Occur.SHOULD,
            BooleanClause.Occur.SHOULD };
    Query query = MultiFieldQueryParser.parse(keywords, appConfig.getQueryFields(), flags, analyzer);
    query = query.rewrite(reader);

    // 2.search
    List<Document> docs = new ArrayList<Document>();
    Hits hits = (typeId == appConfig.getGameTypeId() ? gameSearcher.search(query, Sort.RELEVANCE) : softSearcher
            .search(query, Sort.RELEVANCE));// searcher.search(query,
                                            // Sort.RELEVANCE);
    for (int i = 0; i < hits.length(); i++) {
        docs.add(hits.doc(i));
    }

    // 3.return
    reader.close();
    return docs;
}
 
开发者ID:zhaoxi1988,项目名称:sjk,代码行数:31,代码来源:SearchServiceImpl.java

示例8: clearValidityCache

import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
private void clearValidityCache(final LuceneIndex index) throws NoSuchFieldException, IllegalArgumentException, IllegalAccessException, IOException {
    final Class<LuceneIndex> li = LuceneIndex.class;
    final java.lang.reflect.Field dirCache = li.getDeclaredField("dirCache");   //NOI18N
    dirCache.setAccessible(true);
    Object  o = dirCache.get(index);
    final java.lang.reflect.Field reader = o.getClass().getDeclaredField("reader");
    reader.setAccessible(true);
    IndexReader r = (IndexReader) reader.get(o);
    if (r != null) {
        r.close();
    }
    reader.set(o,null);
}
 
开发者ID:apache,项目名称:incubator-netbeans,代码行数:14,代码来源:LuceneIndexTest.java

示例9: getNumberOfFields

import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
public int getNumberOfFields() throws IOException
{

    IndexReader reader = getMainIndexReferenceCountingReadOnlyIndexReader();
    try
    {
        return reader.getFieldNames(IndexReader.FieldOption.ALL).size();
    }
    finally
    {
        reader.close();
    }
}
 
开发者ID:Alfresco,项目名称:alfresco-repository,代码行数:14,代码来源:IndexInfo.java

示例10: search

import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
public static void search(String indexDir, String q) throws Exception {
  Directory dir = FSDirectory.open(Paths.get(indexDir));
  IndexReader reader = DirectoryReader.open(dir);
  IndexSearcher is = new IndexSearcher(reader);
  // Analyzer analyzer=new StandardAnalyzer(); // 标准分词器
  SmartChineseAnalyzer analyzer = new SmartChineseAnalyzer();
  QueryParser parser = new QueryParser("desc", analyzer);
  Query query = parser.parse(q);

  long start = System.currentTimeMillis();
  TopDocs hits = is.search(query, 10);
  long end = System.currentTimeMillis();
  System.out.println("匹配 " + q + " ,总共花费" + (end - start) + "毫秒" + "查询到" + hits.totalHits + "个记录");

  QueryScorer scorer = new QueryScorer(query);
  Fragmenter fragmenter = new SimpleSpanFragmenter(scorer);
  SimpleHTMLFormatter simpleHTMLFormatter = new SimpleHTMLFormatter("<b><font color='red'>", "</font></b>");
  Highlighter highlighter = new Highlighter(simpleHTMLFormatter, scorer);
  highlighter.setTextFragmenter(fragmenter);
  for (ScoreDoc scoreDoc : hits.scoreDocs) {
    Document doc = is.doc(scoreDoc.doc);
    System.out.println(doc.get("city"));
    System.out.println(doc.get("desc"));
    String desc = doc.get("desc");
    if (desc != null) {
      TokenStream tokenStream = analyzer.tokenStream("desc", new StringReader(desc));
      System.out.println(highlighter.getBestFragment(tokenStream, desc));
    }
  }
  reader.close();
}
 
开发者ID:MiniPa,项目名称:cjs_ssms,代码行数:32,代码来源:SearcherTest.java

示例11: getNumberOfDocuments

import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
public int getNumberOfDocuments() throws IOException
{
    IndexReader reader = getMainIndexReferenceCountingReadOnlyIndexReader();
    try
    {
        return reader.numDocs();
    }
    finally
    {
        reader.close();
    }
}
 
开发者ID:Alfresco,项目名称:alfresco-repository,代码行数:13,代码来源:IndexInfo.java

示例12: testReplay

import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
public void testReplay() throws Exception {
    Directory directory = newDirectory();
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
    int numDocs = randomIntBetween(1, 128);
    int maxNumValues = randomInt(16);
    for (int i = 0; i < numDocs; i++) {
        Document document = new Document();
        document.add(new StringField("field", String.valueOf(randomInt(maxNumValues)), Field.Store.NO));
        indexWriter.addDocument(document);
    }

    indexWriter.close();
    IndexReader indexReader = DirectoryReader.open(directory);
    IndexSearcher indexSearcher = new IndexSearcher(indexReader);

    TermQuery termQuery = new TermQuery(new Term("field", String.valueOf(randomInt(maxNumValues))));
    TopDocs topDocs = indexSearcher.search(termQuery, numDocs);

    BestDocsDeferringCollector collector =
            new BestDocsDeferringCollector(numDocs, new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService()));
    Set<Integer> deferredCollectedDocIds = new HashSet<>();
    collector.setDeferredCollector(Collections.singleton(testCollector(deferredCollectedDocIds)));
    collector.preCollection();
    indexSearcher.search(termQuery, collector);
    collector.postCollection();
    collector.replay(0);

    assertEquals(topDocs.scoreDocs.length, deferredCollectedDocIds.size());
    for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
        assertTrue("expected docid [" + scoreDoc.doc + "] is missing", deferredCollectedDocIds.contains(scoreDoc.doc));
    }
    collector.close();
    indexReader.close();
    directory.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:36,代码来源:BestDocsDeferringCollectorTests.java

示例13: testReplay

import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
public void testReplay() throws Exception {
    Directory directory = newDirectory();
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
    int numDocs = randomInt(128);
    int maxNumValues = randomInt(16);
    for (int i = 0; i < numDocs; i++) {
        Document document = new Document();
        document.add(new StringField("field", String.valueOf(randomInt(maxNumValues)), Field.Store.NO));
        indexWriter.addDocument(document);
    }

    indexWriter.close();
    IndexReader indexReader = DirectoryReader.open(directory);
    IndexSearcher indexSearcher = new IndexSearcher(indexReader);

    TermQuery termQuery = new TermQuery(new Term("field", String.valueOf(randomInt(maxNumValues))));
    TopDocs topDocs = indexSearcher.search(termQuery, numDocs);

    SearchContext searchContext = createSearchContext(indexSearcher, createIndexSettings());
    BestBucketsDeferringCollector collector = new BestBucketsDeferringCollector(searchContext);
    Set<Integer> deferredCollectedDocIds = new HashSet<>();
    collector.setDeferredCollector(Collections.singleton(bla(deferredCollectedDocIds)));
    collector.preCollection();
    indexSearcher.search(termQuery, collector);
    collector.postCollection();
    collector.replay(0);

    assertEquals(topDocs.scoreDocs.length, deferredCollectedDocIds.size());
    for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
        assertTrue("expected docid [" + scoreDoc.doc + "] is missing", deferredCollectedDocIds.contains(scoreDoc.doc));
    }
    indexReader.close();
    directory.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:35,代码来源:BestBucketsDeferringCollectorTests.java

示例14: testNestedChildrenFilter

import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
public void testNestedChildrenFilter() throws Exception {
    int numParentDocs = scaledRandomIntBetween(0, 32);
    int maxChildDocsPerParent = scaledRandomIntBetween(8, 16);

    Directory dir = newDirectory();
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    for (int i = 0; i < numParentDocs; i++) {
        int numChildDocs = scaledRandomIntBetween(0, maxChildDocsPerParent);
        List<Document> docs = new ArrayList<>(numChildDocs + 1);
        for (int j = 0; j < numChildDocs; j++) {
            Document childDoc = new Document();
            childDoc.add(new StringField("type", "child", Field.Store.NO));
            docs.add(childDoc);
        }

        Document parenDoc = new Document();
        parenDoc.add(new StringField("type", "parent", Field.Store.NO));
        parenDoc.add(new LegacyIntField("num_child_docs", numChildDocs, Field.Store.YES));
        docs.add(parenDoc);
        writer.addDocuments(docs);
    }

    IndexReader reader = writer.getReader();
    writer.close();

    IndexSearcher searcher = new IndexSearcher(reader);
    FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
    BitSetProducer parentFilter = new QueryBitSetProducer(new TermQuery(new Term("type", "parent")));
    Query childFilter = new TermQuery(new Term("type", "child"));
    int checkedParents = 0;
    final Weight parentsWeight = searcher.createNormalizedWeight(new TermQuery(new Term("type", "parent")), false);
    for (LeafReaderContext leaf : reader.leaves()) {
        DocIdSetIterator parents = parentsWeight.scorer(leaf).iterator();
        for (int parentDoc = parents.nextDoc(); parentDoc != DocIdSetIterator.NO_MORE_DOCS ; parentDoc = parents.nextDoc()) {
            int expectedChildDocs = leaf.reader().document(parentDoc).getField("num_child_docs").numericValue().intValue();
            hitContext.reset(null, leaf, parentDoc, searcher);
            NestedChildrenQuery nestedChildrenFilter = new NestedChildrenQuery(parentFilter, childFilter, hitContext);
            TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
            searcher.search(new ConstantScoreQuery(nestedChildrenFilter), totalHitCountCollector);
            assertThat(totalHitCountCollector.getTotalHits(), equalTo(expectedChildDocs));
            checkedParents++;
        }
    }
    assertThat(checkedParents, equalTo(numParentDocs));
    reader.close();
    dir.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:48,代码来源:NestedChildrenFilterTests.java

示例15: testDoubleIndexingSameDoc

import org.apache.lucene.index.IndexReader; //导入方法依赖的package包/类
public void testDoubleIndexingSameDoc() throws Exception {
    Directory dir = newDirectory();
    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(random(), Lucene.STANDARD_ANALYZER));

    String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
            .startObject("properties").endObject()
            .endObject().endObject().string();
    IndexService index = createIndex("test");
    client().admin().indices().preparePutMapping("test").setType("type").setSource(mapping, XContentType.JSON).get();
    DocumentMapper mapper = index.mapperService().documentMapper("type");
    QueryShardContext context = index.newQueryShardContext(0, null, () -> 0L);

    ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
            .startObject()
            .field("field1", "value1")
            .field("field2", 1)
            .field("field3", 1.1)
            .field("field4", "2010-01-01")
            .startArray("field5").value(1).value(2).value(3).endArray()
            .endObject()
            .bytes());
    assertNotNull(doc.dynamicMappingsUpdate());
    client().admin().indices().preparePutMapping("test").setType("type")
        .setSource(doc.dynamicMappingsUpdate().toString(), XContentType.JSON).get();
    mapper = index.mapperService().documentMapper("type");

    writer.addDocument(doc.rootDoc());
    writer.addDocument(doc.rootDoc());

    IndexReader reader = DirectoryReader.open(writer);
    IndexSearcher searcher = new IndexSearcher(reader);

    TopDocs topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field1").fieldType().termQuery("value1", context), 10);
    assertThat(topDocs.totalHits, equalTo(2));

    topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field2").fieldType().termQuery("1", context), 10);
    assertThat(topDocs.totalHits, equalTo(2));

    topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field3").fieldType().termQuery("1.1", context), 10);
    assertThat(topDocs.totalHits, equalTo(2));

    topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field4").fieldType().termQuery("2010-01-01", context), 10);
    assertThat(topDocs.totalHits, equalTo(2));

    topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("1", context), 10);
    assertThat(topDocs.totalHits, equalTo(2));

    topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("2", context), 10);
    assertThat(topDocs.totalHits, equalTo(2));

    topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("3", context), 10);
    assertThat(topDocs.totalHits, equalTo(2));
    writer.close();
    reader.close();
    dir.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:57,代码来源:DoubleIndexingDocTests.java


注:本文中的org.apache.lucene.index.IndexReader.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。