当前位置: 首页>>代码示例>>Java>>正文


Java SlowCompositeReaderWrapper类代码示例

本文整理汇总了Java中org.apache.lucene.index.SlowCompositeReaderWrapper的典型用法代码示例。如果您正苦于以下问题:Java SlowCompositeReaderWrapper类的具体用法?Java SlowCompositeReaderWrapper怎么用?Java SlowCompositeReaderWrapper使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


SlowCompositeReaderWrapper类属于org.apache.lucene.index包,在下文中一共展示了SlowCompositeReaderWrapper类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testSortValues

import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入依赖的package包/类
public void testSortValues() throws Exception {
    Directory dir = new RAMDirectory();
    IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
    for (int i = 0; i < 10; i++) {
        Document document = new Document();
        String text = new String(new char[]{(char) (97 + i), (char) (97 + i)});
        document.add(new TextField("str", text, Field.Store.YES));
        document.add(new SortedDocValuesField("str", new BytesRef(text)));
        indexWriter.addDocument(document);
    }
    IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(indexWriter));
    IndexSearcher searcher = new IndexSearcher(reader);
    TopFieldDocs docs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField("str", SortField.Type.STRING)));
    for (int i = 0; i < 10; i++) {
        FieldDoc fieldDoc = (FieldDoc) docs.scoreDocs[i];
        assertThat((BytesRef) fieldDoc.fields[0], equalTo(new BytesRef(new String(new char[]{(char) (97 + i), (char) (97 + i)}))));
    }
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:19,代码来源:SimpleLuceneTests.java

示例2: testNRTSearchOnClosedWriter

import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入依赖的package包/类
public void testNRTSearchOnClosedWriter() throws Exception {
    Directory dir = new RAMDirectory();
    IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
    DirectoryReader reader = DirectoryReader.open(indexWriter);

    for (int i = 0; i < 100; i++) {
        Document document = new Document();
        TextField field = new TextField("_id", Integer.toString(i), Field.Store.YES);
        field.setBoost(i);
        document.add(field);
        indexWriter.addDocument(document);
    }
    reader = refreshReader(reader);

    indexWriter.close();

    TermsEnum termDocs = SlowCompositeReaderWrapper.wrap(reader).terms("_id").iterator();
    termDocs.next();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:20,代码来源:SimpleLuceneTests.java

示例3: getValues

import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入依赖的package包/类
@Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
  final IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader();
  final AtomicReader r = SlowCompositeReaderWrapper.wrap(topReader);
  final int off = readerContext.docBase;

  final SortedDocValues sindex = FieldCache.DEFAULT.getTermsIndex(r, field);
  final int end = sindex.getValueCount();

  return new IntDocValues(this) {
   @Override
    public int intVal(int doc) {
      return (end - sindex.getOrd(doc+off) - 1);
    }
  };
}
 
开发者ID:europeana,项目名称:search,代码行数:17,代码来源:ReverseOrdFieldSource.java

示例4: getMergeReaders

import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入依赖的package包/类
@Override
public List<AtomicReader> getMergeReaders() throws IOException {
  if (unsortedReaders == null) {
    unsortedReaders = super.getMergeReaders();
    final AtomicReader atomicView;
    if (unsortedReaders.size() == 1) {
      atomicView = unsortedReaders.get(0);
    } else {
      final IndexReader multiReader = new MultiReader(unsortedReaders.toArray(new AtomicReader[unsortedReaders.size()]));
      atomicView = SlowCompositeReaderWrapper.wrap(multiReader);
    }
    docMap = sorter.sort(atomicView);
    sortedView = SortingAtomicReader.wrap(atomicView, docMap);
  }
  // a null doc map means that the readers are already sorted
  return docMap == null ? unsortedReaders : Collections.singletonList(sortedView);
}
 
开发者ID:europeana,项目名称:search,代码行数:18,代码来源:SortingMergePolicy.java

示例5: checkOnlineClassification

import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入依赖的package包/类
protected void checkOnlineClassification(Classifier<T> classifier, String inputDoc, T expectedResult, Analyzer analyzer, String textFieldName, String classFieldName, Query query) throws Exception {
  AtomicReader atomicReader = null;
  try {
    populateSampleIndex(analyzer);
    atomicReader = SlowCompositeReaderWrapper.wrap(indexWriter.getReader());
    classifier.train(atomicReader, textFieldName, classFieldName, analyzer, query);
    ClassificationResult<T> classificationResult = classifier.assignClass(inputDoc);
    assertNotNull(classificationResult.getAssignedClass());
    assertEquals("got an assigned class of " + classificationResult.getAssignedClass(), expectedResult, classificationResult.getAssignedClass());
    assertTrue("got a not positive score " + classificationResult.getScore(), classificationResult.getScore() > 0);
    updateSampleIndex(analyzer);
    ClassificationResult<T> secondClassificationResult = classifier.assignClass(inputDoc);
    assertEquals(classificationResult.getAssignedClass(), secondClassificationResult.getAssignedClass());
    assertEquals(Double.valueOf(classificationResult.getScore()), Double.valueOf(secondClassificationResult.getScore()));

  } finally {
    if (atomicReader != null)
      atomicReader.close();
  }
}
 
开发者ID:europeana,项目名称:search,代码行数:21,代码来源:ClassificationTestBase.java

示例6: testGetFilterHandleNumericParseError

import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入依赖的package包/类
public void testGetFilterHandleNumericParseError() throws Exception {
  NumericRangeFilterBuilder filterBuilder = new NumericRangeFilterBuilder();
  filterBuilder.setStrictMode(false);

  String xml = "<NumericRangeFilter fieldName='AGE' type='int' lowerTerm='-1' upperTerm='NaN'/>";
  Document doc = getDocumentFromString(xml);
  Filter filter = filterBuilder.getFilter(doc.getDocumentElement());
  Directory ramDir = newDirectory();
  IndexWriter writer = new IndexWriter(ramDir, newIndexWriterConfig(null));
  writer.commit();
  try {
    AtomicReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(ramDir));
    try {
      assertNull(filter.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
    }
    finally {
      reader.close();
    }
  }
  finally {
    writer.commit();
    writer.close();
    ramDir.close();
  }
}
 
开发者ID:europeana,项目名称:search,代码行数:26,代码来源:TestNumericRangeFilterBuilder.java

示例7: testCachingWorks

import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入依赖的package包/类
public void testCachingWorks() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
  writer.close();

  IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
  AtomicReaderContext context = (AtomicReaderContext) reader.getContext();
  MockFilter filter = new MockFilter();
  CachingWrapperFilter cacher = new CachingWrapperFilter(filter);

  // first time, nested filter is called
  DocIdSet strongRef = cacher.getDocIdSet(context, context.reader().getLiveDocs());
  assertTrue("first time", filter.wasCalled());

  // make sure no exception if cache is holding the wrong docIdSet
  cacher.getDocIdSet(context, context.reader().getLiveDocs());

  // second time, nested filter should not be called
  filter.clear();
  cacher.getDocIdSet(context, context.reader().getLiveDocs());
  assertFalse("second time", filter.wasCalled());

  reader.close();
  dir.close();
}
 
开发者ID:europeana,项目名称:search,代码行数:26,代码来源:TestCachingWrapperFilter.java

示例8: testIsCacheAble

import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入依赖的package包/类
public void testIsCacheAble() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
  writer.addDocument(new Document());
  writer.close();

  IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));

  // not cacheable:
  assertDocIdSetCacheable(reader, new QueryWrapperFilter(new TermQuery(new Term("test","value"))), false);
  // returns default empty docidset, always cacheable:
  assertDocIdSetCacheable(reader, NumericRangeFilter.newIntRange("test", Integer.valueOf(10000), Integer.valueOf(-10000), true, true), true);
  // is cacheable:
  assertDocIdSetCacheable(reader, FieldCacheRangeFilter.newIntRange("test", Integer.valueOf(10), Integer.valueOf(20), true, true), false);
  // a fixedbitset filter is always cacheable
  assertDocIdSetCacheable(reader, new Filter() {
    @Override
    public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) {
      return new FixedBitSet(context.reader().maxDoc());
    }
  }, true);

  reader.close();
  dir.close();
}
 
开发者ID:europeana,项目名称:search,代码行数:26,代码来源:TestCachingWrapperFilter.java

示例9: testBasics

import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入依赖的package包/类
public void testBasics() throws Exception {
  // sanity check of norms writer
  // TODO: generalize
  AtomicReader slow = SlowCompositeReaderWrapper.wrap(reader);
  NumericDocValues fooNorms = slow.getNormValues("foo");
  NumericDocValues barNorms = slow.getNormValues("bar");
  for (int i = 0; i < slow.maxDoc(); i++) {
    assertFalse(fooNorms.get(i) == barNorms.get(i));
  }
  
  // sanity check of searching
  TopDocs foodocs = searcher.search(new TermQuery(new Term("foo", "brown")), 10);
  assertTrue(foodocs.totalHits > 0);
  TopDocs bardocs = searcher.search(new TermQuery(new Term("bar", "brown")), 10);
  assertTrue(bardocs.totalHits > 0);
  assertTrue(foodocs.scoreDocs[0].score < bardocs.scoreDocs[0].score);
}
 
开发者ID:europeana,项目名称:search,代码行数:18,代码来源:TestSimilarityProvider.java

示例10: setUp

import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入依赖的package包/类
@Override
public void setUp() throws Exception {
  super.setUp();
  directory = newDirectory();
  
  RandomIndexWriter writer = new RandomIndexWriter(random(), directory, 
      newIndexWriterConfig(new MockAnalyzer(random()))
      .setMergePolicy(newLogMergePolicy())
      .setSimilarity(new DefaultSimilarity()));
  for (int i = 0; i < values.length; i++) {
    Document doc = new Document();
    doc
        .add(newTextField(FIELD, values[i], Field.Store.YES));
    writer.addDocument(doc);
  }
  indexReader = SlowCompositeReaderWrapper.wrap(writer.getReader());
  writer.close();
  indexSearcher = newSearcher(indexReader);
  indexSearcher.setSimilarity(new DefaultSimilarity());
}
 
开发者ID:europeana,项目名称:search,代码行数:21,代码来源:TestTermScorer.java

示例11: LuceneUtils

import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入依赖的package包/类
/**
 * @param flagConfig Contains all information necessary for configuring LuceneUtils.
 *        {@link FlagConfig#luceneindexpath()} must be non-empty. 
 */
public LuceneUtils(FlagConfig flagConfig) throws IOException {
  if (flagConfig.luceneindexpath().isEmpty()) {
    throw new IllegalArgumentException(
        "-luceneindexpath is a required argument for initializing LuceneUtils instance.");
  }

  this.compositeReader = DirectoryReader.open(
      FSDirectory.open(FileSystems.getDefault().getPath(flagConfig.luceneindexpath())));
  this.leafReader = SlowCompositeReaderWrapper.wrap(compositeReader);
  MultiFields.getFields(compositeReader);
  this.flagConfig = flagConfig;
  if (!flagConfig.stoplistfile().isEmpty())
    loadStopWords(flagConfig.stoplistfile());

  if (!flagConfig.startlistfile().isEmpty())
    loadStartWords(flagConfig.startlistfile());

  VerbatimLogger.info("Initialized LuceneUtils from Lucene index in directory: " + flagConfig.luceneindexpath() + "\n");
  VerbatimLogger.info("Fields in index are: " + String.join(", ", this.getFieldNames()) + "\n");
}
 
开发者ID:semanticvectors,项目名称:semanticvectors,代码行数:25,代码来源:LuceneUtils.java

示例12: getTermsFromIndex

import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入依赖的package包/类
private Map<String, Integer> getTermsFromIndex() {
    Map<String, Integer> indexedTerms = Maps.newLinkedHashMap();
    try {
        DirectoryReader indexReader = indexer.getIndexReader();
        Terms terms = SlowCompositeReaderWrapper.wrap(indexReader).terms(Indexer.INDEX_CONTENT);
        if (terms == null) {
            return indexedTerms;
        }

        TermsEnum termEnum = terms.iterator(null);
        BytesRef byteRef = null;

        while ((byteRef = termEnum.next()) != null) {
            String term = byteRef.utf8ToString();
            int count = indexReader.docFreq(new Term(Indexer.INDEX_CONTENT, byteRef));
            indexedTerms.put(term, Integer.valueOf(count));
        }
        indexReader.close();
    } catch (Exception e) {
        logger.error("Failed to dump index", e);
    }
    return indexedTerms;
}
 
开发者ID:kopl,项目名称:SPLevo,代码行数:24,代码来源:SemanticVPMAnalyzer.java

示例13: getValues

import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入依赖的package包/类
@Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
  final IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader();
  final AtomicReader r = topReader instanceof CompositeReader 
      ? new SlowCompositeReaderWrapper((CompositeReader)topReader) 
      : (AtomicReader) topReader;
  final int off = readerContext.docBase;

  final SortedDocValues sindex = FieldCache.DEFAULT.getTermsIndex(r, field);
  final int end = sindex.getValueCount();

  return new IntDocValues(this) {
   @Override
    public int intVal(int doc) {
      return (end - sindex.getOrd(doc+off) - 1);
    }
  };
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:19,代码来源:ReverseOrdFieldSource.java

示例14: testGetFilterHandleNumericParseError

import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入依赖的package包/类
public void testGetFilterHandleNumericParseError() throws Exception {
  NumericRangeFilterBuilder filterBuilder = new NumericRangeFilterBuilder();
  filterBuilder.setStrictMode(false);

  String xml = "<NumericRangeFilter fieldName='AGE' type='int' lowerTerm='-1' upperTerm='NaN'/>";
  Document doc = getDocumentFromString(xml);
  Filter filter = filterBuilder.getFilter(doc.getDocumentElement());
  Directory ramDir = newDirectory();
  IndexWriter writer = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, null));
  writer.commit();
  try {
    AtomicReader reader = new SlowCompositeReaderWrapper(DirectoryReader.open(ramDir));
    try {
      assertNull(filter.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
    }
    finally {
      reader.close();
    }
  }
  finally {
    writer.commit();
    writer.close();
    ramDir.close();
  }
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:26,代码来源:TestNumericRangeFilterBuilder.java

示例15: testIsCacheAble

import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入依赖的package包/类
public void testIsCacheAble() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
  writer.addDocument(new Document());
  writer.close();

  IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));

  // not cacheable:
  assertDocIdSetCacheable(reader, new QueryWrapperFilter(new TermQuery(new Term("test","value"))), false);
  // returns default empty docidset, always cacheable:
  assertDocIdSetCacheable(reader, NumericRangeFilter.newIntRange("test", Integer.valueOf(10000), Integer.valueOf(-10000), true, true), true);
  // is cacheable:
  assertDocIdSetCacheable(reader, FieldCacheRangeFilter.newIntRange("test", Integer.valueOf(10), Integer.valueOf(20), true, true), true);
  // a fixedbitset filter is always cacheable
  assertDocIdSetCacheable(reader, new Filter() {
    @Override
    public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) {
      return new FixedBitSet(context.reader().maxDoc());
    }
  }, true);

  reader.close();
  dir.close();
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:26,代码来源:TestCachingWrapperFilter.java


注:本文中的org.apache.lucene.index.SlowCompositeReaderWrapper类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。