本文整理汇总了Java中org.apache.lucene.index.SlowCompositeReaderWrapper.wrap方法的典型用法代码示例。如果您正苦于以下问题:Java SlowCompositeReaderWrapper.wrap方法的具体用法?Java SlowCompositeReaderWrapper.wrap怎么用?Java SlowCompositeReaderWrapper.wrap使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.index.SlowCompositeReaderWrapper
的用法示例。
在下文中一共展示了SlowCompositeReaderWrapper.wrap方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testSortValues
import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入方法依赖的package包/类
public void testSortValues() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
for (int i = 0; i < 10; i++) {
Document document = new Document();
String text = new String(new char[]{(char) (97 + i), (char) (97 + i)});
document.add(new TextField("str", text, Field.Store.YES));
document.add(new SortedDocValuesField("str", new BytesRef(text)));
indexWriter.addDocument(document);
}
IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(indexWriter));
IndexSearcher searcher = new IndexSearcher(reader);
TopFieldDocs docs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField("str", SortField.Type.STRING)));
for (int i = 0; i < 10; i++) {
FieldDoc fieldDoc = (FieldDoc) docs.scoreDocs[i];
assertThat((BytesRef) fieldDoc.fields[0], equalTo(new BytesRef(new String(new char[]{(char) (97 + i), (char) (97 + i)}))));
}
}
示例2: getValues
import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入方法依赖的package包/类
@Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
final IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader();
final AtomicReader r = SlowCompositeReaderWrapper.wrap(topReader);
final int off = readerContext.docBase;
final SortedDocValues sindex = FieldCache.DEFAULT.getTermsIndex(r, field);
final int end = sindex.getValueCount();
return new IntDocValues(this) {
@Override
public int intVal(int doc) {
return (end - sindex.getOrd(doc+off) - 1);
}
};
}
示例3: getMergeReaders
import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入方法依赖的package包/类
@Override
public List<AtomicReader> getMergeReaders() throws IOException {
if (unsortedReaders == null) {
unsortedReaders = super.getMergeReaders();
final AtomicReader atomicView;
if (unsortedReaders.size() == 1) {
atomicView = unsortedReaders.get(0);
} else {
final IndexReader multiReader = new MultiReader(unsortedReaders.toArray(new AtomicReader[unsortedReaders.size()]));
atomicView = SlowCompositeReaderWrapper.wrap(multiReader);
}
docMap = sorter.sort(atomicView);
sortedView = SortingAtomicReader.wrap(atomicView, docMap);
}
// a null doc map means that the readers are already sorted
return docMap == null ? unsortedReaders : Collections.singletonList(sortedView);
}
示例4: checkOnlineClassification
import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入方法依赖的package包/类
protected void checkOnlineClassification(Classifier<T> classifier, String inputDoc, T expectedResult, Analyzer analyzer, String textFieldName, String classFieldName, Query query) throws Exception {
AtomicReader atomicReader = null;
try {
populateSampleIndex(analyzer);
atomicReader = SlowCompositeReaderWrapper.wrap(indexWriter.getReader());
classifier.train(atomicReader, textFieldName, classFieldName, analyzer, query);
ClassificationResult<T> classificationResult = classifier.assignClass(inputDoc);
assertNotNull(classificationResult.getAssignedClass());
assertEquals("got an assigned class of " + classificationResult.getAssignedClass(), expectedResult, classificationResult.getAssignedClass());
assertTrue("got a not positive score " + classificationResult.getScore(), classificationResult.getScore() > 0);
updateSampleIndex(analyzer);
ClassificationResult<T> secondClassificationResult = classifier.assignClass(inputDoc);
assertEquals(classificationResult.getAssignedClass(), secondClassificationResult.getAssignedClass());
assertEquals(Double.valueOf(classificationResult.getScore()), Double.valueOf(secondClassificationResult.getScore()));
} finally {
if (atomicReader != null)
atomicReader.close();
}
}
示例5: testGetFilterHandleNumericParseError
import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入方法依赖的package包/类
public void testGetFilterHandleNumericParseError() throws Exception {
NumericRangeFilterBuilder filterBuilder = new NumericRangeFilterBuilder();
filterBuilder.setStrictMode(false);
String xml = "<NumericRangeFilter fieldName='AGE' type='int' lowerTerm='-1' upperTerm='NaN'/>";
Document doc = getDocumentFromString(xml);
Filter filter = filterBuilder.getFilter(doc.getDocumentElement());
Directory ramDir = newDirectory();
IndexWriter writer = new IndexWriter(ramDir, newIndexWriterConfig(null));
writer.commit();
try {
AtomicReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(ramDir));
try {
assertNull(filter.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
}
finally {
reader.close();
}
}
finally {
writer.commit();
writer.close();
ramDir.close();
}
}
示例6: testCachingWorks
import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入方法依赖的package包/类
public void testCachingWorks() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
writer.close();
IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
AtomicReaderContext context = (AtomicReaderContext) reader.getContext();
MockFilter filter = new MockFilter();
CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
// first time, nested filter is called
DocIdSet strongRef = cacher.getDocIdSet(context, context.reader().getLiveDocs());
assertTrue("first time", filter.wasCalled());
// make sure no exception if cache is holding the wrong docIdSet
cacher.getDocIdSet(context, context.reader().getLiveDocs());
// second time, nested filter should not be called
filter.clear();
cacher.getDocIdSet(context, context.reader().getLiveDocs());
assertFalse("second time", filter.wasCalled());
reader.close();
dir.close();
}
示例7: testIsCacheAble
import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入方法依赖的package包/类
public void testIsCacheAble() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
writer.addDocument(new Document());
writer.close();
IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
// not cacheable:
assertDocIdSetCacheable(reader, new QueryWrapperFilter(new TermQuery(new Term("test","value"))), false);
// returns default empty docidset, always cacheable:
assertDocIdSetCacheable(reader, NumericRangeFilter.newIntRange("test", Integer.valueOf(10000), Integer.valueOf(-10000), true, true), true);
// is cacheable:
assertDocIdSetCacheable(reader, FieldCacheRangeFilter.newIntRange("test", Integer.valueOf(10), Integer.valueOf(20), true, true), false);
// a fixedbitset filter is always cacheable
assertDocIdSetCacheable(reader, new Filter() {
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) {
return new FixedBitSet(context.reader().maxDoc());
}
}, true);
reader.close();
dir.close();
}
示例8: testBasics
import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入方法依赖的package包/类
public void testBasics() throws Exception {
// sanity check of norms writer
// TODO: generalize
AtomicReader slow = SlowCompositeReaderWrapper.wrap(reader);
NumericDocValues fooNorms = slow.getNormValues("foo");
NumericDocValues barNorms = slow.getNormValues("bar");
for (int i = 0; i < slow.maxDoc(); i++) {
assertFalse(fooNorms.get(i) == barNorms.get(i));
}
// sanity check of searching
TopDocs foodocs = searcher.search(new TermQuery(new Term("foo", "brown")), 10);
assertTrue(foodocs.totalHits > 0);
TopDocs bardocs = searcher.search(new TermQuery(new Term("bar", "brown")), 10);
assertTrue(bardocs.totalHits > 0);
assertTrue(foodocs.scoreDocs[0].score < bardocs.scoreDocs[0].score);
}
示例9: LuceneUtils
import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入方法依赖的package包/类
/**
* @param flagConfig Contains all information necessary for configuring LuceneUtils.
* {@link FlagConfig#luceneindexpath()} must be non-empty.
*/
public LuceneUtils(FlagConfig flagConfig) throws IOException {
if (flagConfig.luceneindexpath().isEmpty()) {
throw new IllegalArgumentException(
"-luceneindexpath is a required argument for initializing LuceneUtils instance.");
}
this.compositeReader = DirectoryReader.open(
FSDirectory.open(FileSystems.getDefault().getPath(flagConfig.luceneindexpath())));
this.leafReader = SlowCompositeReaderWrapper.wrap(compositeReader);
MultiFields.getFields(compositeReader);
this.flagConfig = flagConfig;
if (!flagConfig.stoplistfile().isEmpty())
loadStopWords(flagConfig.stoplistfile());
if (!flagConfig.startlistfile().isEmpty())
loadStartWords(flagConfig.startlistfile());
VerbatimLogger.info("Initialized LuceneUtils from Lucene index in directory: " + flagConfig.luceneindexpath() + "\n");
VerbatimLogger.info("Fields in index are: " + String.join(", ", this.getFieldNames()) + "\n");
}
示例10: testIsCacheAble
import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入方法依赖的package包/类
public void testIsCacheAble() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
writer.addDocument(new Document());
writer.close();
IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
// not cacheable:
assertDocIdSetCacheable(reader, new QueryWrapperFilter(new TermQuery(new Term("test","value"))), false);
// returns default empty docidset, always cacheable:
assertDocIdSetCacheable(reader, NumericRangeFilter.newIntRange("test", Integer.valueOf(10000), Integer.valueOf(-10000), true, true), true);
// is cacheable:
assertDocIdSetCacheable(reader, FieldCacheRangeFilter.newIntRange("test", Integer.valueOf(10), Integer.valueOf(20), true, true), true);
// a fixedbitset filter is always cacheable
assertDocIdSetCacheable(reader, new Filter() {
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) {
return new FixedBitSet(context.reader().maxDoc());
}
}, true);
reader.close();
dir.close();
}
示例11: setUp
import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入方法依赖的package包/类
@Override
public void setUp() throws Exception {
super.setUp();
directory = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
.setMergePolicy(newLogMergePolicy())
.setSimilarity(new DefaultSimilarity()));
for (int i = 0; i < values.length; i++) {
Document doc = new Document();
doc
.add(newTextField(FIELD, values[i], Field.Store.YES));
writer.addDocument(doc);
}
indexReader = SlowCompositeReaderWrapper.wrap(writer.getReader());
writer.close();
indexSearcher = newSearcher(indexReader);
indexSearcher.setSimilarity(new DefaultSimilarity());
}
示例12: testGetFilterHandleNumericParseError
import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入方法依赖的package包/类
public void testGetFilterHandleNumericParseError() throws Exception {
NumericRangeFilterBuilder filterBuilder = new NumericRangeFilterBuilder();
filterBuilder.setStrictMode(false);
String xml = "<NumericRangeFilter fieldName='AGE' type='int' lowerTerm='-1' upperTerm='NaN'/>";
Document doc = getDocumentFromString(xml);
Filter filter = filterBuilder.getFilter(doc.getDocumentElement());
Directory ramDir = newDirectory();
IndexWriter writer = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, null));
writer.commit();
try {
AtomicReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(ramDir));
try {
assertNull(filter.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
}
finally {
reader.close();
}
}
finally {
writer.commit();
writer.close();
ramDir.close();
}
}
示例13: getValuesForTable
import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入方法依赖的package包/类
/**
* Ritorna una collezione di stringhe come valori per popolare la tabella.
* Viene usato sia dalla parte gui per far vedere i valori, sia in
* inizializzazione del Segmenter per popolare fisicamente la tabella
*
* @param tableName nome della tabella
* @return lista di valori
*/
public Collection<? extends String> getValuesForTable(String tableName) {
BiMap<String, String> invFields = fieldsTable.inverse();
String field = invFields.get(tableName);
List<String> ret = new ArrayList<>();
if (field != null) {
if (reader == null) {
openIndex();
}
try {
final LeafReader ar = SlowCompositeReaderWrapper.wrap(reader);
final int maxdoc = reader.maxDoc();
for (int i = 0; i < maxdoc; i++) {
Document doc = ar.document(i);
String val = doc.get(field);
if (val != null) {
if (val.trim().length() > 0) {
ret.add(val.trim().toLowerCase());
}
}
}
} catch (Exception e) {
LogGui.printException(e);
}
}
return ret;
}
示例14: setUp
import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入方法依赖的package包/类
@Override
public void setUp() throws Exception {
super.setUp();
directory = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), directory, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false));
//Add series of docs with filterable fields : acces rights, prices, dates and "in-stock" flags
addDoc(writer, "admin guest", "010", "20040101", "Y");
addDoc(writer, "guest", "020", "20040101", "Y");
addDoc(writer, "guest", "020", "20050101", "Y");
addDoc(writer, "admin", "020", "20050101", "Maybe");
addDoc(writer, "admin guest", "030", "20050101", "N");
reader = SlowCompositeReaderWrapper.wrap(writer.getReader());
writer.close();
}
示例15: testMissingTermAndField
import org.apache.lucene.index.SlowCompositeReaderWrapper; //导入方法依赖的package包/类
public void testMissingTermAndField() throws Exception {
String fieldName = "field1";
Directory rd = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), rd);
Document doc = new Document();
doc.add(newStringField(fieldName, "value1", Field.Store.NO));
w.addDocument(doc);
IndexReader reader = SlowCompositeReaderWrapper.wrap(w.getReader());
assertTrue(reader.getContext() instanceof AtomicReaderContext);
AtomicReaderContext context = (AtomicReaderContext) reader.getContext();
w.close();
DocIdSet idSet = termFilter(fieldName, "value1").getDocIdSet(context, context.reader().getLiveDocs());
assertNotNull("must not be null", idSet);
DocIdSetIterator iter = idSet.iterator();
assertEquals(iter.nextDoc(), 0);
assertEquals(iter.nextDoc(), DocIdSetIterator.NO_MORE_DOCS);
idSet = termFilter(fieldName, "value2").getDocIdSet(context, context.reader().getLiveDocs());
assertNull("must be null", idSet);
idSet = termFilter("field2", "value1").getDocIdSet(context, context.reader().getLiveDocs());
assertNull("must be null", idSet);
reader.close();
rd.close();
}