当前位置: 首页>>代码示例>>Java>>正文


Java IndexReaderContext类代码示例

本文整理汇总了Java中org.apache.lucene.index.IndexReaderContext的典型用法代码示例。如果您正苦于以下问题:Java IndexReaderContext类的具体用法?Java IndexReaderContext怎么用?Java IndexReaderContext使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


IndexReaderContext类属于org.apache.lucene.index包,在下文中一共展示了IndexReaderContext类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: rewrite

import org.apache.lucene.index.IndexReaderContext; //导入依赖的package包/类
@Override
public Query rewrite(IndexReader reader) throws IOException {
    Query rewritten = super.rewrite(reader);
    if (rewritten != this) {
        return rewritten;
    }
    IndexReaderContext context = reader.getContext();
    TermContext[] ctx = new TermContext[terms.length];
    int[] docFreqs = new int[ctx.length];
    for (int i = 0; i < terms.length; i++) {
        ctx[i] = TermContext.build(context, terms[i]);
        docFreqs[i] = ctx[i].docFreq();
    }

    final int maxDoc = reader.maxDoc();
    blend(ctx, maxDoc, reader);
    return topLevelQuery(terms, ctx, docFreqs, maxDoc);
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:19,代码来源:BlendedTermQuery.java

示例2: adjustTTF

import org.apache.lucene.index.IndexReaderContext; //导入依赖的package包/类
private TermContext adjustTTF(IndexReaderContext readerContext, TermContext termContext, long sumTTF) {
    assert termContext.wasBuiltFor(readerContext);
    if (sumTTF == -1 && termContext.totalTermFreq() == -1) {
        return termContext;
    }
    TermContext newTermContext = new TermContext(readerContext);
    List<LeafReaderContext> leaves = readerContext.leaves();
    final int len;
    if (leaves == null) {
        len = 1;
    } else {
        len = leaves.size();
    }
    int df = termContext.docFreq();
    long ttf = sumTTF;
    for (int i = 0; i < len; i++) {
        TermState termState = termContext.get(i);
        if (termState == null) {
            continue;
        }
        newTermContext.register(termState, i, df, ttf);
        df = 0;
        ttf = 0;
    }
    return newTermContext;
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:27,代码来源:BlendedTermQuery.java

示例3: SeqSpanWeight

import org.apache.lucene.index.IndexReaderContext; //导入依赖的package包/类
protected SeqSpanWeight(SeqSpanQuery query, IndexSearcher searcher) throws IOException {
  super(query);
  this.selfQuery = query;
  this.similarity = searcher.getSimilarity(needsScores);
  this.positions = selfQuery.getPositions();
  this.terms = selfQuery.getTerms();
  this.field = terms[0].field();
  if (positions.length < 2) {
    throw new IllegalStateException("PhraseWeight does not support less than 2 terms, call rewrite first");
  } else if (positions[0] != 0) {
    throw new IllegalStateException("PhraseWeight requires that the first position is 0, call rewrite first");
  }
  final IndexReaderContext context = searcher.getTopReaderContext();
  states = new TermContext[terms.length];
  TermStatistics termStats[] = new TermStatistics[terms.length];
  for (int i = 0; i < terms.length; i++) {
    final Term term = terms[i];
    states[i] = TermContext.build(context, term);
    termStats[i] = searcher.termStatistics(term, states[i]);
  }
  stats = similarity.computeWeight(searcher.collectionStatistics(terms[0].field()), termStats);
}
 
开发者ID:sing1ee,项目名称:lucene-custom-query,代码行数:23,代码来源:SeqSpanWeight.java

示例4: createWeight

import org.apache.lucene.index.IndexReaderContext; //导入依赖的package包/类
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
  final IndexReaderContext context = searcher.getTopReaderContext();
  final TermContext termState;
  if (perReaderTermState == null || perReaderTermState.topReaderContext != context) {
    // make TermQuery single-pass if we don't have a PRTS or if the context differs!
    termState = TermContext.build(context, term);
  } else {
   // PRTS was pre-build for this IS
   termState = this.perReaderTermState;
  }

  // we must not ignore the given docFreq - if set use the given value (lie)
  if (docFreq != -1)
    termState.setDocFreq(docFreq);
  
  return new TermWeight(searcher, termState);
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:19,代码来源:TermQuery.java

示例5: SpanWeight

import org.apache.lucene.index.IndexReaderContext; //导入依赖的package包/类
public SpanWeight(SpanQuery query, IndexSearcher searcher)
  throws IOException {
  this.similarity = searcher.getSimilarity();
  this.query = query;
  
  termContexts = new HashMap<>();
  TreeSet<Term> terms = new TreeSet<>();
  query.extractTerms(terms);
  final IndexReaderContext context = searcher.getTopReaderContext();
  final TermStatistics termStats[] = new TermStatistics[terms.size()];
  int i = 0;
  for (Term term : terms) {
    TermContext state = TermContext.build(context, term);
    termStats[i] = searcher.termStatistics(term, state);
    termContexts.put(term, state);
    i++;
  }
  final String field = query.getField();
  if (field != null) {
    stats = similarity.computeWeight(query.getBoost(), 
                                     searcher.collectionStatistics(query.getField()), 
                                     termStats);
  }
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:25,代码来源:SpanWeight.java

示例6: MultiPhraseWeight

import org.apache.lucene.index.IndexReaderContext; //导入依赖的package包/类
public MultiPhraseWeight(IndexSearcher searcher)
  throws IOException {
  this.similarity = searcher.getSimilarity();
  final IndexReaderContext context = searcher.getTopReaderContext();
  
  // compute idf
  ArrayList<TermStatistics> allTermStats = new ArrayList<>();
  for(final Term[] terms: termArrays) {
    for (Term term: terms) {
      TermContext termContext = termContexts.get(term);
      if (termContext == null) {
        termContext = TermContext.build(context, term);
        termContexts.put(term, termContext);
      }
      allTermStats.add(searcher.termStatistics(term, termContext));
    }
  }
  stats = similarity.computeWeight(getBoost(),
      searcher.collectionStatistics(field), 
      allTermStats.toArray(new TermStatistics[allTermStats.size()]));
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:22,代码来源:MultiPhraseQuery.java

示例7: getAllDescendantReaderKeys

import org.apache.lucene.index.IndexReaderContext; //导入依赖的package包/类
/**
 * Checks if the seed is an IndexReader, and if so will walk
 * the hierarchy of subReaders building up a list of the objects 
 * returned by {@code seed.getCoreCacheKey()}
 */
private List<Object> getAllDescendantReaderKeys(Object seed) {
  List<Object> all = new ArrayList<>(17); // will grow as we iter
  all.add(seed);
  for (int i = 0; i < all.size(); i++) {
    final Object obj = all.get(i);
    // TODO: We don't check closed readers here (as getTopReaderContext
    // throws AlreadyClosedException), what should we do? Reflection?
    if (obj instanceof IndexReader) {
      try {
        final List<IndexReaderContext> childs =
          ((IndexReader) obj).getContext().children();
        if (childs != null) { // it is composite reader
          for (final IndexReaderContext ctx : childs) {
            all.add(ctx.reader().getCoreCacheKey());
          }
        }
      } catch (AlreadyClosedException ace) {
        // ignore this reader
      }
    }
  }
  // need to skip the first, because it was the seed
  return all.subList(1, all.size());
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:30,代码来源:FieldCacheSanityChecker.java

示例8: rewrite

import org.apache.lucene.index.IndexReaderContext; //导入依赖的package包/类
@Override
public Query rewrite(IndexReader reader) throws IOException {
    IndexReaderContext context = reader.getContext();
    TermContext[] ctx = new TermContext[terms.length];
    int[] docFreqs = new int[ctx.length];
    for (int i = 0; i < terms.length; i++) {
        ctx[i] = TermContext.build(context, terms[i]);
        docFreqs[i] = ctx[i].docFreq();
    }

    final int maxDoc = reader.maxDoc();
    blend(ctx, maxDoc, reader);
    Query query = topLevelQuery(terms, ctx, docFreqs, maxDoc);
    query.setBoost(getBoost());
    return query;
}
 
开发者ID:baidu,项目名称:Elasticsearch,代码行数:17,代码来源:BlendedTermQuery.java

示例9: createWeight

import org.apache.lucene.index.IndexReaderContext; //导入依赖的package包/类
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {

    IndexReaderContext context = searcher.getTopReaderContext();

    TermContext mainTermState = null;
    TermContext[] similarStates = new TermContext[similarTerms.length];

    if (needsScores) {

        //
        // get the term contexts, for the main term + for each similar term
        //
        mainTermState = TermContext.build(context, mainTerm);

        for (int i = 0; i < similarTerms.length; i++) {
            similarStates[i] = TermContext.build(context, similarTerms[i].term);
        }
    }

    // else:  do not compute the term states, this will help save seeks in the terms
    //        dict on segments that have a cache entry for this query

    return new AugmentedTermWeight(searcher, needsScores, mainTermState, similarStates);
}
 
开发者ID:sebastian-hofstaetter,项目名称:ir-generalized-translation-models,代码行数:26,代码来源:AugmentedTermQuery.java

示例10: createWeight

import org.apache.lucene.index.IndexReaderContext; //导入依赖的package包/类
@Override
public Weight createWeight(final IndexSearcher searcher) throws IOException {
	final IndexReaderContext context = searcher.getTopReaderContext();
	final TermContext termState;
	if ((perReaderTermS == null)
			|| (perReaderTermS.topReaderContext != context)) {
		// make TermQuery single-pass if we don't have a PRTS or if the
		// context differs!
		termState = TermContext.build(context, term);
	} else {
		// PRTS was pre-build for this IS
		termState = perReaderTermS;
	}

	// we must not ignore the given docFreq - if set use the given value
	// (lie)
	if (docFreq != -1) {
		termState.setDocFreq(docFreq);
	}

	return new TermWeight(searcher, termState);
}
 
开发者ID:quhfus,项目名称:DoSeR-Disambiguation,代码行数:23,代码来源:TermQuery.java

示例11: createWeight

import org.apache.lucene.index.IndexReaderContext; //导入依赖的package包/类
@Override
public Weight createWeight(final IndexSearcher searcher) throws IOException {
	final IndexReaderContext context = searcher.getTopReaderContext();
	final TermContext termState;
	if ((perReaderTermS == null)
			|| (perReaderTermS.topReaderContext != context)) {
		// make TermQuery single-pass if we don't have a PRTS or if the
		// context differs!
		termState = TermContext.build(context, term); // cache term
														// lookups!
	} else {
		// PRTS was pre-build for this IS
		termState = perReaderTermS;
	}

	// we must not ignore the given docFreq - if set use the given value
	// (lie)
	if (docFreq != -1) {
		termState.setDocFreq(docFreq);
	}
	return new TermWeight(searcher, termState);
}
 
开发者ID:quhfus,项目名称:DoSeR-Disambiguation,代码行数:23,代码来源:LearnToRankTermQuery.java

示例12: createWeight

import org.apache.lucene.index.IndexReaderContext; //导入依赖的package包/类
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
	final IndexReaderContext context = searcher.getTopReaderContext();
	final TermContext termState;
	if (perReaderTermState == null
			|| perReaderTermState.topReaderContext != context) {
		// make TermQuery single-pass if we don't have a PRTS or if the
		// context differs!
		termState = TermContext.build(context, term, true); // cache term
															// lookups!
	} else {
		// PRTS was pre-build for this IS
		termState = this.perReaderTermState;
	}

	// we must not ignore the given docFreq - if set use the given value
	// (lie)
	if (docFreq != -1)
		termState.setDocFreq(docFreq);
	return new TermWeight(searcher, termState);
}
 
开发者ID:quhfus,项目名称:DoSeR,代码行数:22,代码来源:LearnToRankTermQuery.java

示例13: createWeight

import org.apache.lucene.index.IndexReaderContext; //导入依赖的package包/类
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
  final IndexReaderContext context = searcher.getTopReaderContext();
  final TermContext termState;
  if (perReaderTermState == null || perReaderTermState.topReaderContext != context) {
    // make TermQuery single-pass if we don't have a PRTS or if the context differs!
    termState = TermContext.build(context, term, true); // cache term lookups!
  } else {
   // PRTS was pre-build for this IS
   termState = this.perReaderTermState;
  }

  // we must not ignore the given docFreq - if set use the given value (lie)
  if (docFreq != -1)
    termState.setDocFreq(docFreq);
  
  return new TermWeight(searcher, termState);
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:19,代码来源:TermQuery.java

示例14: SpanWeight

import org.apache.lucene.index.IndexReaderContext; //导入依赖的package包/类
public SpanWeight(SpanQuery query, IndexSearcher searcher)
  throws IOException {
  this.similarity = searcher.getSimilarity();
  this.query = query;
  
  termContexts = new HashMap<Term,TermContext>();
  TreeSet<Term> terms = new TreeSet<Term>();
  query.extractTerms(terms);
  final IndexReaderContext context = searcher.getTopReaderContext();
  final TermStatistics termStats[] = new TermStatistics[terms.size()];
  int i = 0;
  for (Term term : terms) {
    TermContext state = TermContext.build(context, term, true);
    termStats[i] = searcher.termStatistics(term, state);
    termContexts.put(term, state);
    i++;
  }
  final String field = query.getField();
  if (field != null) {
    stats = similarity.computeWeight(query.getBoost(), 
                                     searcher.collectionStatistics(query.getField()), 
                                     termStats);
  }
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:25,代码来源:SpanWeight.java

示例15: MultiPhraseWeight

import org.apache.lucene.index.IndexReaderContext; //导入依赖的package包/类
public MultiPhraseWeight(IndexSearcher searcher)
  throws IOException {
  this.similarity = searcher.getSimilarity();
  final IndexReaderContext context = searcher.getTopReaderContext();
  
  // compute idf
  ArrayList<TermStatistics> allTermStats = new ArrayList<TermStatistics>();
  for(final Term[] terms: termArrays) {
    for (Term term: terms) {
      TermContext termContext = termContexts.get(term);
      if (termContext == null) {
        termContext = TermContext.build(context, term, true);
        termContexts.put(term, termContext);
      }
      allTermStats.add(searcher.termStatistics(term, termContext));
    }
  }
  stats = similarity.computeWeight(getBoost(),
      searcher.collectionStatistics(field), 
      allTermStats.toArray(new TermStatistics[allTermStats.size()]));
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:22,代码来源:MultiPhraseQuery.java


注:本文中的org.apache.lucene.index.IndexReaderContext类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。