当前位置: 首页>>代码示例>>Java>>正文


Java ReaderUtil.subIndex方法代码示例

本文整理汇总了Java中org.apache.lucene.index.ReaderUtil.subIndex方法的典型用法代码示例。如果您正苦于以下问题:Java ReaderUtil.subIndex方法的具体用法?Java ReaderUtil.subIndex怎么用?Java ReaderUtil.subIndex使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.lucene.index.ReaderUtil的用法示例。


在下文中一共展示了ReaderUtil.subIndex方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: apply

import org.apache.lucene.index.ReaderUtil; //导入方法依赖的package包/类
@Nullable
@Override
public Row apply(@Nullable ScoreDoc input) {
    if (input == null) {
        return null;
    }
    FieldDoc fieldDoc = (FieldDoc) input;
    scorer.score(fieldDoc.score);
    for (OrderByCollectorExpression orderByCollectorExpression : orderByCollectorExpressions) {
        orderByCollectorExpression.setNextFieldDoc(fieldDoc);
    }
    List<LeafReaderContext> leaves = indexReader.leaves();
    int readerIndex = ReaderUtil.subIndex(fieldDoc.doc, leaves);
    LeafReaderContext subReaderContext = leaves.get(readerIndex);
    int subDoc = fieldDoc.doc - subReaderContext.docBase;
    for (LuceneCollectorExpression<?> expression : expressions) {
        expression.setNextReader(subReaderContext);
        expression.setNextDocId(subDoc);
    }
    return inputRow;
}
 
开发者ID:baidu,项目名称:Elasticsearch,代码行数:22,代码来源:ScoreDocRowFunction.java

示例2: explain

import org.apache.lucene.index.ReaderUtil; //导入方法依赖的package包/类
@Override
public Explanation explain(IndexSearcher searcher, Explanation firstPassExplanation, int docID) throws IOException {
  Explanation result = super.explain(searcher, firstPassExplanation, docID);

  List<AtomicReaderContext> leaves = searcher.getIndexReader().leaves();
  int subReader = ReaderUtil.subIndex(docID, leaves);
  AtomicReaderContext readerContext = leaves.get(subReader);
  int docIDInSegment = docID - readerContext.docBase;
  Map<String,Object> context = new HashMap<>();

  FakeScorer fakeScorer = new FakeScorer();
  fakeScorer.score = firstPassExplanation.getValue();
  fakeScorer.doc = docIDInSegment;

  context.put("scorer", fakeScorer);

  for(String variable : expression.variables) {
    result.addDetail(new Explanation((float) bindings.getValueSource(variable).getValues(context, readerContext).doubleVal(docIDInSegment),
                                     "variable \"" + variable + "\""));
  }

  return result;
}
 
开发者ID:europeana,项目名称:search,代码行数:24,代码来源:ExpressionRescorer.java

示例3: getWeight

import org.apache.lucene.index.ReaderUtil; //导入方法依赖的package包/类
/** 
 * Returns the weight for the current <code>docId</code> as computed 
 * by the <code>weightsValueSource</code>
 * */
@Override
protected long getWeight(Document doc, int docId) {    
  if (currentWeightValues == null) {
    return 0;
  }
  int subIndex = ReaderUtil.subIndex(docId, starts);
  if (subIndex != currentLeafIndex) {
    currentLeafIndex = subIndex;
    try {
      currentWeightValues = weightsValueSource.getValues(new HashMap<String, Object>(), leaves.get(currentLeafIndex));
    } catch (IOException e) {
      throw new RuntimeException();
    }
  }
  return currentWeightValues.longVal(docId - starts[subIndex]);
}
 
开发者ID:europeana,项目名称:search,代码行数:21,代码来源:DocumentValueSourceDictionary.java

示例4: transform

import org.apache.lucene.index.ReaderUtil; //导入方法依赖的package包/类
@Override
public void transform(SolrDocument doc, int docid) {
  // This is only good for random-access functions

  try {

    // TODO: calculate this stuff just once across diff functions
    int idx = ReaderUtil.subIndex(docid, readerContexts);
    AtomicReaderContext rcontext = readerContexts.get(idx);
    FunctionValues values = docValuesArr[idx];
    if (values == null) {
      docValuesArr[idx] = values = valueSource.getValues(fcontext, rcontext);
    }

    int localId = docid - rcontext.docBase;
    Object val = values.objectVal(localId);
    if (val != null) {
      doc.setField( name, val );
    }
  } catch (IOException e) {
    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "exception at docid " + docid + " for valuesource " + valueSource, e);
  }
}
 
开发者ID:europeana,项目名称:search,代码行数:24,代码来源:ValueSourceAugmenter.java

示例5: objectVal

import org.apache.lucene.index.ReaderUtil; //导入方法依赖的package包/类
Object objectVal(int topDocId) throws IOException {
  // lookup segment level stuff:
  int segIdx = ReaderUtil.subIndex(topDocId, readerContexts);
  LeafReaderContext rcontext = readerContexts.get(segIdx);
  int segDocId = topDocId - rcontext.docBase;
  // unfortunately Lucene 7.0 requires forward only traversal (with no reset method).
  //   So we need to track our last docId (per segment) and re-fetch the FunctionValues. :-(
  FunctionValues functionValues = functionValuesPerSeg[segIdx];
  if (functionValues == null || segDocId < functionValuesDocIdPerSeg[segIdx]) {
    functionValues = functionValuesPerSeg[segIdx] = valueSource.getValues(fContext, rcontext);
  }
  functionValuesDocIdPerSeg[segIdx] = segDocId;

  // get value:
  return functionValues.objectVal(segDocId);
}
 
开发者ID:OpenSextant,项目名称:SolrTextTagger,代码行数:17,代码来源:TaggerRequestHandler.java

示例6: collect

import org.apache.lucene.index.ReaderUtil; //导入方法依赖的package包/类
public void collect(IntContainer docIds, StreamBucket.Builder builder) throws IOException {
    for (IntCursor cursor : docIds) {
        final int docId = cursor.value;
        int readerIndex = ReaderUtil.subIndex(docId, readerContexts);
        LeafReaderContext subReaderContext = readerContexts.get(readerIndex);
        setNextReader(subReaderContext);
        setNextDocId(docId - subReaderContext.docBase);
        builder.add(row);
    }
}
 
开发者ID:baidu,项目名称:Elasticsearch,代码行数:11,代码来源:FetchCollector.java

示例7: getStringVal

import org.apache.lucene.index.ReaderUtil; //导入方法依赖的package包/类
private String getStringVal(SolrQueryRequest sqr, String field, int doc) throws IOException {
  SchemaField sf = sqr.getSchema().getField(field);
  ValueSource vs = sf.getType().getValueSource(sf, null);
  Map context = ValueSource.newContext(sqr.getSearcher());
  vs.createWeight(context, sqr.getSearcher());
  IndexReaderContext topReaderContext = sqr.getSearcher().getTopReaderContext();
  List<AtomicReaderContext> leaves = topReaderContext.leaves();
  int idx = ReaderUtil.subIndex(doc, leaves);
  AtomicReaderContext leaf = leaves.get(idx);
  FunctionValues vals = vs.getValues(context, leaf);
  return vals.strVal(doc-leaf.docBase);
}
 
开发者ID:europeana,项目名称:search,代码行数:13,代码来源:TestIndexSearcher.java

示例8: fetchNextOrNull

import org.apache.lucene.index.ReaderUtil; //导入方法依赖的package包/类
@Override
protected ScoreDoc fetchNextOrNull()
{
    if ( !iterator.hasNext() )
    {
        return null;
    }
    currentDoc = iterator.next();
    int subIndex = ReaderUtil.subIndex( currentDoc.doc, docStarts );
    LeafReaderContext context = contexts[subIndex];
    onNextDoc( currentDoc.doc - context.docBase, context );
    return currentDoc;
}
 
开发者ID:neo4j-contrib,项目名称:neo4j-lucene5-index,代码行数:14,代码来源:DocValuesCollector.java

示例9: highlightField

import org.apache.lucene.index.ReaderUtil; //导入方法依赖的package包/类
private Map<Integer,String> highlightField(String field, String contents[], BreakIterator bi, Term terms[], int[] docids, List<AtomicReaderContext> leaves, int maxPassages) throws IOException {  
  Map<Integer,String> highlights = new HashMap<Integer,String>();
  
  // reuse in the real sense... for docs in same segment we just advance our old enum
  DocsAndPositionsEnum postings[] = null;
  TermsEnum termsEnum = null;
  int lastLeaf = -1;
  
  for (int i = 0; i < docids.length; i++) {
    String content = contents[i];
    if (content.length() == 0) {
      continue; // nothing to do
    }
    bi.setText(content);
    int doc = docids[i];
    int leaf = ReaderUtil.subIndex(doc, leaves);
    AtomicReaderContext subContext = leaves.get(leaf);
    AtomicReader r = subContext.reader();
    Terms t = r.terms(field);
    if (t == null) {
      continue; // nothing to do
    }
    if (leaf != lastLeaf) {
      termsEnum = t.iterator(null);
      postings = new DocsAndPositionsEnum[terms.length];
    }
    Passage passages[] = highlightDoc(field, terms, content.length(), bi, doc - subContext.docBase, termsEnum, postings, maxPassages);
    if (passages.length > 0) {
      // otherwise a null snippet
      highlights.put(doc, formatter.format(passages, content));
    }
    lastLeaf = leaf;
  }
  
  return highlights;
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:37,代码来源:PostingsHighlighter.java

示例10: getWeight

import org.apache.lucene.index.ReaderUtil; //导入方法依赖的package包/类
@Override
protected long getWeight(int docId) {
  int subIndex = ReaderUtil.subIndex(docId, starts);
  if (subIndex != currentLeafIndex) {
    currentLeafIndex = subIndex;
    try {
      currentWeightValues = weightsValueSource.getValues(new HashMap<String, Object>(), leaves.get(currentLeafIndex));
    } catch (IOException e) {
      throw new RuntimeException();
    }
  }
  return currentWeightValues.longVal(docId - starts[subIndex]);
}
 
开发者ID:jimaguere,项目名称:Maskana-Gestor-de-Conocimiento,代码行数:14,代码来源:DocumentExpressionDictionary.java

示例11: hitsExecute

import org.apache.lucene.index.ReaderUtil; //导入方法依赖的package包/类
@Override
public void hitsExecute(SearchContext context, SearchHit[] hits) {
    if (hits.length == 0 ||
        // in case the request has only suggest, parsed query is null
        context.parsedQuery() == null) {
        return;
    }
    hits = hits.clone(); // don't modify the incoming hits
    Arrays.sort(hits, (a, b) -> Integer.compare(a.docId(), b.docId()));
    @SuppressWarnings("unchecked")
    List<String>[] matchedQueries = new List[hits.length];
    for (int i = 0; i < matchedQueries.length; ++i) {
        matchedQueries[i] = new ArrayList<>();
    }

    Map<String, Query> namedQueries = new HashMap<>(context.parsedQuery().namedFilters());
    if (context.parsedPostFilter() != null) {
        namedQueries.putAll(context.parsedPostFilter().namedFilters());
    }

    try {
        for (Map.Entry<String, Query> entry : namedQueries.entrySet()) {
            String name = entry.getKey();
            Query query = entry.getValue();
            int readerIndex = -1;
            int docBase = -1;
            Weight weight = context.searcher().createNormalizedWeight(query, false);
            Bits matchingDocs = null;
            final IndexReader indexReader = context.searcher().getIndexReader();
            for (int i = 0; i < hits.length; ++i) {
                SearchHit hit = hits[i];
                int hitReaderIndex = ReaderUtil.subIndex(hit.docId(), indexReader.leaves());
                if (readerIndex != hitReaderIndex) {
                    readerIndex = hitReaderIndex;
                    LeafReaderContext ctx = indexReader.leaves().get(readerIndex);
                    docBase = ctx.docBase;
                    // scorers can be costly to create, so reuse them across docs of the same segment
                    Scorer scorer = weight.scorer(ctx);
                    matchingDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), scorer);
                }
                if (matchingDocs.get(hit.docId() - docBase)) {
                    matchedQueries[i].add(name);
                }
            }
        }
        for (int i = 0; i < hits.length; ++i) {
            hits[i].matchedQueries(matchedQueries[i].toArray(new String[matchedQueries[i].size()]));
        }
    } catch (IOException e) {
        throw ExceptionsHelper.convertToElastic(e);
    } finally {
        context.clearReleasables(Lifetime.COLLECTION);
    }
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:55,代码来源:MatchedQueriesFetchSubPhase.java

示例12: testSpanScorerZeroSloppyFreq

import org.apache.lucene.index.ReaderUtil; //导入方法依赖的package包/类
public void testSpanScorerZeroSloppyFreq() throws Exception {
  boolean ordered = true;
  int slop = 1;
  IndexReaderContext topReaderContext = searcher.getTopReaderContext();
  List<AtomicReaderContext> leaves = topReaderContext.leaves();
  int subIndex = ReaderUtil.subIndex(11, leaves);
  for (int i = 0, c = leaves.size(); i < c; i++) {
    final AtomicReaderContext ctx = leaves.get(i);
   
    final Similarity sim = new DefaultSimilarity() {
      @Override
      public float sloppyFreq(int distance) {
        return 0.0f;
      }
    };

    final Similarity oldSim = searcher.getSimilarity();
    Scorer spanScorer;
    try {
      searcher.setSimilarity(sim);
      SpanNearQuery snq = new SpanNearQuery(
                              new SpanQuery[] {
                                makeSpanTermQuery("t1"),
                                makeSpanTermQuery("t2") },
                              slop,
                              ordered);

      spanScorer = searcher.createNormalizedWeight(snq).scorer(ctx, ctx.reader().getLiveDocs());
    } finally {
      searcher.setSimilarity(oldSim);
    }
    if (i == subIndex) {
      assertTrue("first doc", spanScorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
      assertEquals("first doc number", spanScorer.docID() + ctx.docBase, 11);
      float score = spanScorer.score();
      assertTrue("first doc score should be zero, " + score, score == 0.0f);
    }  else {
      assertTrue("no second doc", spanScorer.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
    }
  }
}
 
开发者ID:europeana,项目名称:search,代码行数:42,代码来源:TestSpans.java

示例13: testSpanScorerZeroSloppyFreq

import org.apache.lucene.index.ReaderUtil; //导入方法依赖的package包/类
public void testSpanScorerZeroSloppyFreq() throws Exception {
  boolean ordered = true;
  int slop = 1;
  IndexReaderContext topReaderContext = searcher.getTopReaderContext();
  List<AtomicReaderContext> leaves = topReaderContext.leaves();
  int subIndex = ReaderUtil.subIndex(11, leaves);
  for (int i = 0, c = leaves.size(); i < c; i++) {
    final AtomicReaderContext ctx = leaves.get(i);
   
    final Similarity sim = new DefaultSimilarity() {
      @Override
      public float sloppyFreq(int distance) {
        return 0.0f;
      }
    };

    final Similarity oldSim = searcher.getSimilarity();
    Scorer spanScorer;
    try {
      searcher.setSimilarity(sim);
      SpanNearQuery snq = new SpanNearQuery(
                              new SpanQuery[] {
                                makeSpanTermQuery("t1"),
                                makeSpanTermQuery("t2") },
                              slop,
                              ordered);

      spanScorer = searcher.createNormalizedWeight(snq).scorer(ctx, true, false, ctx.reader().getLiveDocs());
    } finally {
      searcher.setSimilarity(oldSim);
    }
    if (i == subIndex) {
      assertTrue("first doc", spanScorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
      assertEquals("first doc number", spanScorer.docID() + ctx.docBase, 11);
      float score = spanScorer.score();
      assertTrue("first doc score should be zero, " + score, score == 0.0f);
    }  else {
      assertTrue("no second doc", spanScorer.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
    }
  }
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:42,代码来源:TestSpans.java

示例14: highlightField

import org.apache.lucene.index.ReaderUtil; //导入方法依赖的package包/类
private Map<Integer,Object> highlightField(String field, String contents[], BreakIterator bi, BytesRef terms[], int[] docids, List<AtomicReaderContext> leaves, int maxPassages, Query query) throws IOException {  
  Map<Integer,Object> highlights = new HashMap<Integer,Object>();
  
  // reuse in the real sense... for docs in same segment we just advance our old enum
  DocsAndPositionsEnum postings[] = null;
  TermsEnum termsEnum = null;
  int lastLeaf = -1;

  PassageFormatter fieldFormatter = getFormatter(field);
  if (fieldFormatter == null) {
    throw new NullPointerException("PassageFormatter cannot be null");
  }
  
  // check if we should do any multitermprocessing
  Analyzer analyzer = getIndexAnalyzer(field);
  CharacterRunAutomaton automata[] = new CharacterRunAutomaton[0];
  if (analyzer != null) {
    automata = MultiTermHighlighting.extractAutomata(query, field);
  }
  
  final BytesRef allTerms[];
  if (automata.length > 0) {
    allTerms = new BytesRef[terms.length + 1];
    System.arraycopy(terms, 0, allTerms, 0, terms.length);
  } else {
    allTerms = terms;
  }

  for (int i = 0; i < docids.length; i++) {
    String content = contents[i];
    if (content.length() == 0) {
      continue; // nothing to do
    }
    bi.setText(content);
    int doc = docids[i];
    int leaf = ReaderUtil.subIndex(doc, leaves);
    AtomicReaderContext subContext = leaves.get(leaf);
    AtomicReader r = subContext.reader();
    Terms t = r.terms(field);
    if (t == null) {
      continue; // nothing to do
    }
    if (leaf != lastLeaf) {
      termsEnum = t.iterator(null);
      postings = new DocsAndPositionsEnum[allTerms.length];
    }
    if (automata.length > 0) {
      DocsAndPositionsEnum dp = MultiTermHighlighting.getDocsEnum(analyzer.tokenStream(field, content), automata);
      dp.advance(doc - subContext.docBase);
      postings[terms.length] = dp;
    }
    Passage passages[] = highlightDoc(field, allTerms, content.length(), bi, doc - subContext.docBase, termsEnum, postings, maxPassages);
    if (passages.length == 0) {
      passages = getEmptyHighlight(field, bi, maxPassages);
    }
    if (passages.length > 0) {
      // otherwise a null snippet (eg if field is missing
      // entirely from the doc)
      highlights.put(doc, fieldFormatter.format(passages, content));
    }
    lastLeaf = leaf;
  }
  
  return highlights;
}
 
开发者ID:yintaoxue,项目名称:read-open-source-code,代码行数:66,代码来源:PostingsHighlighter.java

示例15: highlightField

import org.apache.lucene.index.ReaderUtil; //导入方法依赖的package包/类
private Map<Integer,Object> highlightField(String field, String contents[], BreakIterator bi, BytesRef terms[], int[] docids, List<AtomicReaderContext> leaves, int maxPassages) throws IOException {  
  Map<Integer,Object> highlights = new HashMap<Integer,Object>();
  
  // reuse in the real sense... for docs in same segment we just advance our old enum
  DocsAndPositionsEnum postings[] = null;
  TermsEnum termsEnum = null;
  int lastLeaf = -1;

  PassageFormatter fieldFormatter = getFormatter(field);
  if (fieldFormatter == null) {
    throw new NullPointerException("PassageFormatter cannot be null");
  }

  for (int i = 0; i < docids.length; i++) {
    String content = contents[i];
    if (content.length() == 0) {
      continue; // nothing to do
    }
    bi.setText(content);
    int doc = docids[i];
    int leaf = ReaderUtil.subIndex(doc, leaves);
    AtomicReaderContext subContext = leaves.get(leaf);
    AtomicReader r = subContext.reader();
    Terms t = r.terms(field);
    if (t == null) {
      continue; // nothing to do
    }
    if (leaf != lastLeaf) {
      termsEnum = t.iterator(null);
      postings = new DocsAndPositionsEnum[terms.length];
    }
    Passage passages[] = highlightDoc(field, terms, content.length(), bi, doc - subContext.docBase, termsEnum, postings, maxPassages);
    if (passages.length == 0) {
      passages = getEmptyHighlight(field, bi, maxPassages);
    }
    if (passages.length > 0) {
      // otherwise a null snippet (eg if field is missing
      // entirely from the doc)
      highlights.put(doc, fieldFormatter.format(passages, content));
    }
    lastLeaf = leaf;
  }
  
  return highlights;
}
 
开发者ID:jimaguere,项目名称:Maskana-Gestor-de-Conocimiento,代码行数:46,代码来源:PostingsHighlighter.java


注:本文中的org.apache.lucene.index.ReaderUtil.subIndex方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。