当前位置: 首页>>代码示例>>Java>>正文


Java IndexReaderContext.leaves方法代码示例

本文整理汇总了Java中org.apache.lucene.index.IndexReaderContext.leaves方法的典型用法代码示例。如果您正苦于以下问题:Java IndexReaderContext.leaves方法的具体用法?Java IndexReaderContext.leaves怎么用?Java IndexReaderContext.leaves使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.lucene.index.IndexReaderContext的用法示例。


在下文中一共展示了IndexReaderContext.leaves方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: ShardState

import org.apache.lucene.index.IndexReaderContext; //导入方法依赖的package包/类
public ShardState(IndexSearcher s) {
  final IndexReaderContext ctx = s.getTopReaderContext();
  final List<AtomicReaderContext> leaves = ctx.leaves();
  subSearchers = new ShardSearcher[leaves.size()];
  for(int searcherIDX=0;searcherIDX<subSearchers.length;searcherIDX++) {
    subSearchers[searcherIDX] = new ShardSearcher(leaves.get(searcherIDX), ctx);
  }

  docStarts = new int[subSearchers.length];
  for(int subIDX=0;subIDX<docStarts.length;subIDX++) {
    docStarts[subIDX] = leaves.get(subIDX).docBase;
    //System.out.println("docStarts[" + subIDX + "]=" + docStarts[subIDX]);
  }
}
 
开发者ID:europeana,项目名称:search,代码行数:15,代码来源:TestGrouping.java

示例2: wrap

import org.apache.lucene.index.IndexReaderContext; //导入方法依赖的package包/类
public static Spans wrap(IndexReaderContext topLevelReaderContext, SpanQuery query) throws IOException {
  Map<Term,TermContext> termContexts = new HashMap<>();
  TreeSet<Term> terms = new TreeSet<>();
  query.extractTerms(terms);
  for (Term term : terms) {
    termContexts.put(term, TermContext.build(topLevelReaderContext, term));
  }
  final List<AtomicReaderContext> leaves = topLevelReaderContext.leaves();
  if(leaves.size() == 1) {
    final AtomicReaderContext ctx = leaves.get(0);
    return query.getSpans(ctx, ctx.reader().getLiveDocs(), termContexts);
  }
  return new MultiSpansWrapper(leaves, query, termContexts);
}
 
开发者ID:europeana,项目名称:search,代码行数:15,代码来源:MultiSpansWrapper.java

示例3: getStringVal

import org.apache.lucene.index.IndexReaderContext; //导入方法依赖的package包/类
private String getStringVal(SolrQueryRequest sqr, String field, int doc) throws IOException {
  SchemaField sf = sqr.getSchema().getField(field);
  ValueSource vs = sf.getType().getValueSource(sf, null);
  Map context = ValueSource.newContext(sqr.getSearcher());
  vs.createWeight(context, sqr.getSearcher());
  IndexReaderContext topReaderContext = sqr.getSearcher().getTopReaderContext();
  List<AtomicReaderContext> leaves = topReaderContext.leaves();
  int idx = ReaderUtil.subIndex(doc, leaves);
  AtomicReaderContext leaf = leaves.get(idx);
  FunctionValues vals = vs.getValues(context, leaf);
  return vals.strVal(doc-leaf.docBase);
}
 
开发者ID:europeana,项目名称:search,代码行数:13,代码来源:TestIndexSearcher.java

示例4: wrap

import org.apache.lucene.index.IndexReaderContext; //导入方法依赖的package包/类
public static Spans wrap(IndexReaderContext topLevelReaderContext, SpanQuery query) throws IOException {
  Map<Term,TermContext> termContexts = new HashMap<Term,TermContext>();
  TreeSet<Term> terms = new TreeSet<Term>();
  query.extractTerms(terms);
  for (Term term : terms) {
    termContexts.put(term, TermContext.build(topLevelReaderContext, term, true));
  }
  final List<AtomicReaderContext> leaves = topLevelReaderContext.leaves();
  if(leaves.size() == 1) {
    final AtomicReaderContext ctx = leaves.get(0);
    return query.getSpans(ctx, ctx.reader().getLiveDocs(), termContexts);
  }
  return new MultiSpansWrapper(leaves, query, termContexts);
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:15,代码来源:MultiSpansWrapper.java

示例5: wrap

import org.apache.lucene.index.IndexReaderContext; //导入方法依赖的package包/类
public static Spans wrap(IndexReaderContext topLevelReaderContext, SpanQuery query) throws IOException {
  Map<Term,TermContext> termContexts = new HashMap<Term,TermContext>();
  TreeSet<Term> terms = new TreeSet<Term>();
  query.extractTerms(terms);
  for (Term term : terms) {
    termContexts.put(term, TermContext.build(topLevelReaderContext, term));
  }
  final List<AtomicReaderContext> leaves = topLevelReaderContext.leaves();
  if(leaves.size() == 1) {
    final AtomicReaderContext ctx = leaves.get(0);
    return query.getSpans(ctx, ctx.reader().getLiveDocs(), termContexts);
  }
  return new MultiSpansWrapper(leaves, query, termContexts);
}
 
开发者ID:jimaguere,项目名称:Maskana-Gestor-de-Conocimiento,代码行数:15,代码来源:MultiSpansWrapper.java

示例6: testSpanScorerZeroSloppyFreq

import org.apache.lucene.index.IndexReaderContext; //导入方法依赖的package包/类
public void testSpanScorerZeroSloppyFreq() throws Exception {
  boolean ordered = true;
  int slop = 1;
  IndexReaderContext topReaderContext = searcher.getTopReaderContext();
  List<AtomicReaderContext> leaves = topReaderContext.leaves();
  int subIndex = ReaderUtil.subIndex(11, leaves);
  for (int i = 0, c = leaves.size(); i < c; i++) {
    final AtomicReaderContext ctx = leaves.get(i);
   
    final Similarity sim = new DefaultSimilarity() {
      @Override
      public float sloppyFreq(int distance) {
        return 0.0f;
      }
    };

    final Similarity oldSim = searcher.getSimilarity();
    Scorer spanScorer;
    try {
      searcher.setSimilarity(sim);
      SpanNearQuery snq = new SpanNearQuery(
                              new SpanQuery[] {
                                makeSpanTermQuery("t1"),
                                makeSpanTermQuery("t2") },
                              slop,
                              ordered);

      spanScorer = searcher.createNormalizedWeight(snq).scorer(ctx, ctx.reader().getLiveDocs());
    } finally {
      searcher.setSimilarity(oldSim);
    }
    if (i == subIndex) {
      assertTrue("first doc", spanScorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
      assertEquals("first doc number", spanScorer.docID() + ctx.docBase, 11);
      float score = spanScorer.score();
      assertTrue("first doc score should be zero, " + score, score == 0.0f);
    }  else {
      assertTrue("no second doc", spanScorer.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
    }
  }
}
 
开发者ID:europeana,项目名称:search,代码行数:42,代码来源:TestSpans.java

示例7: highlightFields

import org.apache.lucene.index.IndexReaderContext; //导入方法依赖的package包/类
/**
 * Highlights the top-N passages from multiple fields.
 * <p>
 * Conceptually, this behaves as a more efficient form of:
 * <pre class="prettyprint">
 * Map m = new HashMap();
 * for (String field : fields) {
 *   m.put(field, highlight(field, query, searcher, topDocs, maxPassages));
 * }
 * return m;
 * </pre>
 * 
 * @param fields field names to highlight. 
 *        Must have a stored string value and also be indexed with offsets.
 * @param query query to highlight.
 * @param searcher searcher that was previously used to execute the query.
 * @param topDocs TopDocs containing the summary result documents to highlight.
 * @param maxPassages The maximum number of top-N ranked passages per-field used to 
 *        form the highlighted snippets.
 * @return Map keyed on field name, containing the array of formatted snippets 
 *         corresponding to the documents in <code>topDocs</code>. 
 *         If no highlights were found for a document, its value is <code>null</code>.
 * @throws IOException if an I/O error occurred during processing
 * @throws IllegalArgumentException if <code>field</code> was indexed without 
 *         {@link IndexOptions#DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS}
 */
public Map<String,String[]> highlightFields(String fields[], Query query, IndexSearcher searcher, TopDocs topDocs, int maxPassages) throws IOException {
  final IndexReader reader = searcher.getIndexReader();
  final ScoreDoc scoreDocs[] = topDocs.scoreDocs;
  query = rewrite(query);
  SortedSet<Term> queryTerms = new TreeSet<Term>();
  query.extractTerms(queryTerms);

  int docids[] = new int[scoreDocs.length];
  for (int i = 0; i < docids.length; i++) {
    docids[i] = scoreDocs[i].doc;
  }
  IndexReaderContext readerContext = reader.getContext();
  List<AtomicReaderContext> leaves = readerContext.leaves();

  BreakIterator bi = (BreakIterator)breakIterator.clone();

  // sort for sequential io
  Arrays.sort(docids);
  Arrays.sort(fields);
  
  // pull stored data:
  LimitedStoredFieldVisitor visitor = new LimitedStoredFieldVisitor(fields, maxLength);
  String contents[][] = new String[fields.length][docids.length];
  for (int i = 0; i < docids.length; i++) {
    searcher.doc(docids[i], visitor);
    for (int j = 0; j < fields.length; j++) {
      contents[j][i] = visitor.getValue(j).toString();
    }
    visitor.reset();
  }
  
  Map<String,String[]> highlights = new HashMap<String,String[]>();
  for (int i = 0; i < fields.length; i++) {
    String field = fields[i];
    Term floor = new Term(field, "");
    Term ceiling = new Term(field, UnicodeUtil.BIG_TERM);
    SortedSet<Term> fieldTerms = queryTerms.subSet(floor, ceiling);
    // TODO: should we have some reasonable defaults for term pruning? (e.g. stopwords)
    Term terms[] = fieldTerms.toArray(new Term[fieldTerms.size()]);
    Map<Integer,String> fieldHighlights = highlightField(field, contents[i], bi, terms, docids, leaves, maxPassages);
      
    String[] result = new String[scoreDocs.length];
    for (int j = 0; j < scoreDocs.length; j++) {
      result[j] = fieldHighlights.get(scoreDocs[j].doc);
    }
    highlights.put(field, result);
  }
  return highlights;
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:76,代码来源:PostingsHighlighter.java

示例8: testSpanScorerZeroSloppyFreq

import org.apache.lucene.index.IndexReaderContext; //导入方法依赖的package包/类
public void testSpanScorerZeroSloppyFreq() throws Exception {
  boolean ordered = true;
  int slop = 1;
  IndexReaderContext topReaderContext = searcher.getTopReaderContext();
  List<AtomicReaderContext> leaves = topReaderContext.leaves();
  int subIndex = ReaderUtil.subIndex(11, leaves);
  for (int i = 0, c = leaves.size(); i < c; i++) {
    final AtomicReaderContext ctx = leaves.get(i);
   
    final Similarity sim = new DefaultSimilarity() {
      @Override
      public float sloppyFreq(int distance) {
        return 0.0f;
      }
    };

    final Similarity oldSim = searcher.getSimilarity();
    Scorer spanScorer;
    try {
      searcher.setSimilarity(sim);
      SpanNearQuery snq = new SpanNearQuery(
                              new SpanQuery[] {
                                makeSpanTermQuery("t1"),
                                makeSpanTermQuery("t2") },
                              slop,
                              ordered);

      spanScorer = searcher.createNormalizedWeight(snq).scorer(ctx, true, false, ctx.reader().getLiveDocs());
    } finally {
      searcher.setSimilarity(oldSim);
    }
    if (i == subIndex) {
      assertTrue("first doc", spanScorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
      assertEquals("first doc number", spanScorer.docID() + ctx.docBase, 11);
      float score = spanScorer.score();
      assertTrue("first doc score should be zero, " + score, score == 0.0f);
    }  else {
      assertTrue("no second doc", spanScorer.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
    }
  }
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:42,代码来源:TestSpans.java


注:本文中的org.apache.lucene.index.IndexReaderContext.leaves方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。