本文整理汇总了Java中org.apache.lucene.index.DirectoryReader.open方法的典型用法代码示例。如果您正苦于以下问题:Java DirectoryReader.open方法的具体用法?Java DirectoryReader.open怎么用?Java DirectoryReader.open使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.index.DirectoryReader
的用法示例。
在下文中一共展示了DirectoryReader.open方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testCase
import org.apache.lucene.index.DirectoryReader; //导入方法依赖的package包/类
private void testCase(Query query, String field, int precision, CheckedConsumer<RandomIndexWriter, IOException> buildIndex,
Consumer<InternalGeoHashGrid> verify) throws IOException {
Directory directory = newDirectory();
RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
buildIndex.accept(indexWriter);
indexWriter.close();
IndexReader indexReader = DirectoryReader.open(directory);
IndexSearcher indexSearcher = newSearcher(indexReader, true, true);
GeoGridAggregationBuilder aggregationBuilder = new GeoGridAggregationBuilder("_name").field(field);
aggregationBuilder.precision(precision);
MappedFieldType fieldType = new GeoPointFieldMapper.GeoPointFieldType();
fieldType.setHasDocValues(true);
fieldType.setName(FIELD_NAME);
try (Aggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
aggregator.preCollection();
indexSearcher.search(query, aggregator);
aggregator.postCollection();
verify.accept((InternalGeoHashGrid) aggregator.buildAggregation(0L));
}
indexReader.close();
directory.close();
}
示例2: search
import org.apache.lucene.index.DirectoryReader; //导入方法依赖的package包/类
/** User runs a query and counts facets. */
private List<FacetResult> search() throws IOException {
DirectoryReader indexReader = DirectoryReader.open(indexDir);
IndexSearcher searcher = new IndexSearcher(indexReader);
SortedSetDocValuesReaderState state = new DefaultSortedSetDocValuesReaderState(indexReader);
// Aggregatses the facet counts
FacetsCollector fc = new FacetsCollector();
// MatchAllDocsQuery is for "browsing" (counts facets
// for all non-deleted docs in the index); normally
// you'd use a "normal" query:
FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, fc);
// Retrieve results
Facets facets = new SortedSetDocValuesFacetCounts(state, fc);
List<FacetResult> results = new ArrayList<FacetResult>();
results.add(facets.getTopChildren(10, "Author"));
results.add(facets.getTopChildren(10, "Publish Year"));
indexReader.close();
return results;
}
示例3: findByAuthorSurname
import org.apache.lucene.index.DirectoryReader; //导入方法依赖的package包/类
/**
* Search all books of a given author.
*
* @throws Exception never, otherwise the test fails.
*/
@Test
public void findByAuthorSurname() throws Exception {
IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(directory));
Query query = new QueryParser("author", new StandardAnalyzer()).parse("Gazzarini");
TopDocs matches = searcher.search(query, 10);
assertEquals(1, matches.totalHits);
final String id = Arrays.stream(matches.scoreDocs)
.map(scoreDoc -> luceneDoc(scoreDoc.doc, searcher))
.map(doc -> doc.get("id"))
.findFirst()
.get();
assertEquals("1", id);
}
示例4: testScriptedMetricWithoutCombine
import org.apache.lucene.index.DirectoryReader; //导入方法依赖的package包/类
/**
* without combine script, the "_aggs" map should contain a list of the size of the number of documents matched
*/
@SuppressWarnings("unchecked")
public void testScriptedMetricWithoutCombine() throws IOException {
try (Directory directory = newDirectory()) {
int numDocs = randomInt(100);
try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
for (int i = 0; i < numDocs; i++) {
indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i)));
}
}
try (IndexReader indexReader = DirectoryReader.open(directory)) {
ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME);
aggregationBuilder.initScript(INIT_SCRIPT).mapScript(MAP_SCRIPT);
ScriptedMetric scriptedMetric = search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder);
assertEquals(AGG_NAME, scriptedMetric.getName());
assertNotNull(scriptedMetric.aggregation());
Map<String, Object> agg = (Map<String, Object>) scriptedMetric.aggregation();
assertEquals(numDocs, ((List<Integer>) agg.get("collector")).size());
}
}
}
示例5: doTestDocValueRangeQueries
import org.apache.lucene.index.DirectoryReader; //导入方法依赖的package包/类
public void doTestDocValueRangeQueries(NumberType type, Supplier<Number> valueSupplier) throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
final int numDocs = TestUtil.nextInt(random(), 100, 500);
for (int i = 0; i < numDocs; ++i) {
w.addDocument(type.createFields("foo", valueSupplier.get(), true, true, false));
}
DirectoryReader reader = DirectoryReader.open(w);
IndexSearcher searcher = newSearcher(reader);
w.close();
final int iters = 10;
for (int iter = 0; iter < iters; ++iter) {
Query query = type.rangeQuery("foo",
random().nextBoolean() ? null : valueSupplier.get(),
random().nextBoolean() ? null : valueSupplier.get(),
randomBoolean(), randomBoolean(), true);
assertThat(query, Matchers.instanceOf(IndexOrDocValuesQuery.class));
IndexOrDocValuesQuery indexOrDvQuery = (IndexOrDocValuesQuery) query;
assertEquals(
searcher.count(indexOrDvQuery.getIndexQuery()),
searcher.count(indexOrDvQuery.getRandomAccessQuery()));
}
reader.close();
dir.close();
}
示例6: search
import org.apache.lucene.index.DirectoryReader; //导入方法依赖的package包/类
/**
* Search sample.
*
* @param directory the index directory.
* @throws IOException in case of I/O failure.
* @throws ParseException in case of Query parse exception.
*/
public static void search(Directory directory) throws IOException, ParseException {
IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(directory));
Query query = new QueryParser("title", new StandardAnalyzer()).parse("title:Solr");
TopDocs matches = searcher.search(query, 10);
System.out.println("Search returned " + matches.totalHits + " matches.");
Arrays.stream(matches.scoreDocs)
.map(scoreDoc -> luceneDoc(scoreDoc, searcher))
.forEach(doc -> {
System.out.println("-------------------------------------");
System.out.println("ID:\t" + doc.get("id"));
System.out.println("TITLE:\t" + doc.get("title"));
System.out.println("AUTHOR:\t" + doc.get("author"));
System.out.println("SCORE:\t" + doc.get("score"));
});
}
示例7: drillDown
import org.apache.lucene.index.DirectoryReader; //导入方法依赖的package包/类
/** User drills down on 'Publish Year/2010'. */
private FacetResult drillDown() throws IOException {
DirectoryReader indexReader = DirectoryReader.open(indexDir);
IndexSearcher searcher = new IndexSearcher(indexReader);
SortedSetDocValuesReaderState state = new DefaultSortedSetDocValuesReaderState(indexReader);
// Now user drills down on Publish Year/2010:
DrillDownQuery q = new DrillDownQuery(config);
q.add("Publish Year", "2010");
FacetsCollector fc = new FacetsCollector();
FacetsCollector.search(searcher, q, 10, fc);
// Retrieve results
Facets facets = new SortedSetDocValuesFacetCounts(state, fc);
FacetResult result = facets.getTopChildren(10, "Author");
indexReader.close();
return result;
}
示例8: testRedisDirectoryWithJedisPool
import org.apache.lucene.index.DirectoryReader; //导入方法依赖的package包/类
public void testRedisDirectoryWithJedisPool() throws IOException {
long start = System.currentTimeMillis();
IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new WhitespaceAnalyzer()).setOpenMode(IndexWriterConfig
.OpenMode.CREATE);
//indexWriterConfig.setInfoStream(System.out);
//indexWriterConfig.setRAMBufferSizeMB(2048);
//LogByteSizeMergePolicy logByteSizeMergePolicy = new LogByteSizeMergePolicy();
//logByteSizeMergePolicy.setMinMergeMB(1);
//logByteSizeMergePolicy.setMaxMergeMB(64);
//logByteSizeMergePolicy.setMaxCFSSegmentSizeMB(64);
//indexWriterConfig.setRAMBufferSizeMB(1024).setMergePolicy(logByteSizeMergePolicy).setUseCompoundFile(false);
//GenericObjectPoolConfig genericObjectPoolConfig = new GenericObjectPoolConfig();
//获取连接等待时间
//genericObjectPoolConfig.setMaxWaitMillis(3000);
//10s超时时间
JedisPool jedisPool = new JedisPool(new JedisPoolConfig(), "localhost", 6379, Constants.TIME_OUT);
RedisDirectory redisDirectory = new RedisDirectory(new JedisPoolStream(jedisPool));
IndexWriter indexWriter = new IndexWriter(redisDirectory, indexWriterConfig);
for (int i = 0; i < 10000000; i++) {
indexWriter.addDocument(addDocument(i));
}
indexWriter.commit();
indexWriter.close();
redisDirectory.close();
long end = System.currentTimeMillis();
log.error("RedisDirectoryWithJedisPool consumes {}s!", (end - start) / 1000);
start = System.currentTimeMillis();
IndexSearcher indexSearcher = new IndexSearcher(DirectoryReader.open(new RedisDirectory(new JedisStream("localhost",
6379))));
int total = 0;
for (int i = 0; i < 10000000; i++) {
TermQuery key1 = new TermQuery(new Term("key1", "key" + i));
TopDocs search = indexSearcher.search(key1, 10);
total += search.totalHits;
}
System.out.println(total);
end = System.currentTimeMillis();
log.error("RedisDirectoryWithJedisPool search consumes {}ms!", (end - start));
}
示例9: indexingDone
import org.apache.lucene.index.DirectoryReader; //导入方法依赖的package包/类
@Override
public void indexingDone() {
try {
spellChecker = new DirectSpellChecker();
spellChecker.setMaxEdits(2);
spellChecker.setAccuracy(0.1f);
spellChecker.setMinPrefix(0);
reader = DirectoryReader.open(writer);
fuzzySuggester = new FuzzySuggester(directory, "", writer.getAnalyzer());
Dictionary dict = new DocumentValueSourceDictionary(reader, WORD_FIELD, new LongValuesSource() {
@Override
public boolean needsScores() {
return false;
}
@Override
public LongValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
return null;
}
});
fuzzySuggester.build(dict);
writer.close();
searcher = new IndexSearcher(DirectoryReader.open(directory));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
示例10: search
import org.apache.lucene.index.DirectoryReader; //导入方法依赖的package包/类
/** User runs a query and counts facets. */
private List<FacetResult> search() throws IOException {
DirectoryReader indexReader = DirectoryReader.open(indexDir);
IndexSearcher searcher = new IndexSearcher(indexReader);
TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
FacetsCollector fc = new FacetsCollector();
// MatchAllDocsQuery is for "browsing" (counts facets
// for all non-deleted docs in the index); normally
// you'd use a "normal" query:
FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, fc);
// Retrieve results
List<FacetResult> results = new ArrayList<FacetResult>();
// Count both "Publish Date" and "Author" dimensions
Facets author = new FastTaxonomyFacetCounts("author", taxoReader, config, fc);
results.add(author.getTopChildren(10, "Author"));
Facets pubDate = new FastTaxonomyFacetCounts("pubdate", taxoReader, config, fc);
results.add(pubDate.getTopChildren(10, "Publish Date"));
indexReader.close();
taxoReader.close();
return results;
}
示例11: openIndex
import org.apache.lucene.index.DirectoryReader; //导入方法依赖的package包/类
private boolean openIndex(String foldername, String folderpath) {
try {
Analyzer analyzer = new StandardAnalyzer();
//diskConfig = new IndexWriterConfig(analyzer);
//diskConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
//diskConfig.setRAMBufferSizeMB(256.0);
//init director
FSDirectory diskDirectory = FSDirectory.open(Paths.get(folderpath));
//init reader
DirectoryReader diskReader = DirectoryReader.open(diskDirectory);
//every thing is ok
if (analyzer != null && diskDirectory != null && diskReader != null) {
analyzerList.put(foldername, analyzer);
directorList.put(foldername, diskDirectory);
readerList.put(foldername, diskReader);
log.info("Load Index Success:" + foldername + " path:" + folderpath);
return true;
} else {
log.error("Load Index Fail:" + foldername);
return false;
}
} catch (org.apache.lucene.index.IndexNotFoundException xe) {
log.error("Load Index Not Found:" + foldername);
//throw new Exception(e.getMessage());
//e.printStackTrace();
} catch (Exception xxe) {
//do nothing
xxe.printStackTrace();
log.error("load index Exception:" + xxe.getMessage());
//throw new Exception(e.getMessage());
}
return false;
}
示例12: testCase
import org.apache.lucene.index.DirectoryReader; //导入方法依赖的package包/类
private void testCase(Query query,
CheckedConsumer<RandomIndexWriter, IOException> indexer,
Consumer<Sum> verify) throws IOException {
try (Directory directory = newDirectory()) {
try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
indexer.accept(indexWriter);
}
try (IndexReader indexReader = DirectoryReader.open(directory)) {
IndexSearcher indexSearcher = newSearcher(indexReader, true, true);
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
fieldType.setName(FIELD_NAME);
fieldType.setHasDocValues(true);
SumAggregationBuilder aggregationBuilder = new SumAggregationBuilder("_name");
aggregationBuilder.field(FIELD_NAME);
try (SumAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
aggregator.preCollection();
indexSearcher.search(query, aggregator);
aggregator.postCollection();
verify.accept((Sum) aggregator.buildAggregation(0L));
}
}
}
}
示例13: getSearcher
import org.apache.lucene.index.DirectoryReader; //导入方法依赖的package包/类
public static IndexSearcher getSearcher() throws IOException {
Directory directory = FSDirectory.open(Paths.get(CommonConstants.INDEX_FILE_PATH));
IndexReader reader = DirectoryReader.open(directory);
return new IndexSearcher(reader);
}
示例14: searchGenesInVcfFilesInProject
import org.apache.lucene.index.DirectoryReader; //导入方法依赖的package包/类
/**
* Searches gene IDs, affected by variations in specified VCF files in a specified project
*
* @param projectId an ID of a project to search genes
* @param gene a prefix of a gene ID to search
* @param vcfFileIds a {@code List} of IDs of VCF files in project to search for gene IDs
* @return a {@code Set} of gene IDs, that are affected by some variations in specified VCf files
* @throws IOException
*/
public Set<String> searchGenesInVcfFilesInProject(long projectId, String gene, List<Long> vcfFileIds) throws
IOException {
if (vcfFileIds == null || vcfFileIds.isEmpty()) {
return Collections.emptySet();
}
BooleanQuery.Builder builder = new BooleanQuery.Builder();
PrefixQuery geneIdPrefixQuery = new PrefixQuery(new Term(FeatureIndexFields.GENE_ID.getFieldName(),
gene.toLowerCase()));
PrefixQuery geneNamePrefixQuery = new PrefixQuery(new Term(FeatureIndexFields.GENE_NAME.getFieldName(),
gene.toLowerCase()));
BooleanQuery.Builder geneIdOrNameQuery = new BooleanQuery.Builder();
geneIdOrNameQuery.add(geneIdPrefixQuery, BooleanClause.Occur.SHOULD);
geneIdOrNameQuery.add(geneNamePrefixQuery, BooleanClause.Occur.SHOULD);
builder.add(geneIdOrNameQuery.build(), BooleanClause.Occur.MUST);
List<Term> terms = vcfFileIds.stream()
.map(vcfFileId -> new Term(FeatureIndexFields.FILE_ID.getFieldName(), vcfFileId.toString()))
.collect(Collectors.toList());
TermsQuery termsQuery = new TermsQuery(terms);
builder.add(termsQuery, BooleanClause.Occur.MUST);
BooleanQuery query = builder.build();
Set<String> geneIds;
try (
Directory index = fileManager.getIndexForProject(projectId);
IndexReader reader = DirectoryReader.open(index)
) {
if (reader.numDocs() == 0) {
return Collections.emptySet();
}
IndexSearcher searcher = new IndexSearcher(reader);
final TopDocs docs = searcher.search(query, reader.numDocs());
final ScoreDoc[] hits = docs.scoreDocs;
geneIds = fetchGeneIds(hits, searcher);
} catch (IOException e) {
LOGGER.error(MessageHelper.getMessage(MessagesConstants.ERROR_FEATURE_INDEX_SEARCH_FAILED), e);
return Collections.emptySet();
}
return geneIds;
}
示例15: testConcurrentWritesAndCommits
import org.apache.lucene.index.DirectoryReader; //导入方法依赖的package包/类
public void testConcurrentWritesAndCommits() throws Exception {
try (Store store = createStore();
InternalEngine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), newMergePolicy(),
new SnapshotDeletionPolicy(NoDeletionPolicy.INSTANCE),
IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null))) {
final int numIndexingThreads = scaledRandomIntBetween(3, 6);
final int numDocsPerThread = randomIntBetween(500, 1000);
final CyclicBarrier barrier = new CyclicBarrier(numIndexingThreads + 1);
final List<Thread> indexingThreads = new ArrayList<>();
// create N indexing threads to index documents simultaneously
for (int threadNum = 0; threadNum < numIndexingThreads; threadNum++) {
final int threadIdx = threadNum;
Thread indexingThread = new Thread(() -> {
try {
barrier.await(); // wait for all threads to start at the same time
// index random number of docs
for (int i = 0; i < numDocsPerThread; i++) {
final String id = "thread" + threadIdx + "#" + i;
ParsedDocument doc = testParsedDocument(id, "test", null, testDocument(), B_1, null);
engine.index(indexForDoc(doc));
}
} catch (Exception e) {
throw new RuntimeException(e);
}
});
indexingThreads.add(indexingThread);
}
// start the indexing threads
for (Thread thread : indexingThreads) {
thread.start();
}
barrier.await(); // wait for indexing threads to all be ready to start
// create random commit points
boolean doneIndexing;
do {
doneIndexing = indexingThreads.stream().filter(Thread::isAlive).count() == 0;
//engine.flush(); // flush and commit
} while (doneIndexing == false);
// now, verify all the commits have the correct docs according to the user commit data
long prevLocalCheckpoint = SequenceNumbersService.NO_OPS_PERFORMED;
long prevMaxSeqNo = SequenceNumbersService.NO_OPS_PERFORMED;
for (IndexCommit commit : DirectoryReader.listCommits(store.directory())) {
Map<String, String> userData = commit.getUserData();
long localCheckpoint = userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) ?
Long.parseLong(userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) :
SequenceNumbersService.NO_OPS_PERFORMED;
long maxSeqNo = userData.containsKey(SequenceNumbers.MAX_SEQ_NO) ?
Long.parseLong(userData.get(SequenceNumbers.MAX_SEQ_NO)) :
SequenceNumbersService.UNASSIGNED_SEQ_NO;
// local checkpoint and max seq no shouldn't go backwards
assertThat(localCheckpoint, greaterThanOrEqualTo(prevLocalCheckpoint));
assertThat(maxSeqNo, greaterThanOrEqualTo(prevMaxSeqNo));
try (IndexReader reader = DirectoryReader.open(commit)) {
FieldStats stats = SeqNoFieldMapper.SeqNoDefaults.FIELD_TYPE.stats(reader);
final long highestSeqNo;
if (stats != null) {
highestSeqNo = (long) stats.getMaxValue();
} else {
highestSeqNo = SequenceNumbersService.NO_OPS_PERFORMED;
}
// make sure localCheckpoint <= highest seq no found <= maxSeqNo
assertThat(highestSeqNo, greaterThanOrEqualTo(localCheckpoint));
assertThat(highestSeqNo, lessThanOrEqualTo(maxSeqNo));
// make sure all sequence numbers up to and including the local checkpoint are in the index
FixedBitSet seqNosBitSet = getSeqNosSet(reader, highestSeqNo);
for (int i = 0; i <= localCheckpoint; i++) {
assertTrue("local checkpoint [" + localCheckpoint + "], _seq_no [" + i + "] should be indexed",
seqNosBitSet.get(i));
}
}
prevLocalCheckpoint = localCheckpoint;
prevMaxSeqNo = maxSeqNo;
}
}
}