本文整理汇总了Java中org.apache.lucene.store.Directory.close方法的典型用法代码示例。如果您正苦于以下问题:Java Directory.close方法的具体用法?Java Directory.close怎么用?Java Directory.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.store.Directory
的用法示例。
在下文中一共展示了Directory.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testGetParentIdNoParentField
import org.apache.lucene.store.Directory; //导入方法依赖的package包/类
public void testGetParentIdNoParentField() throws Exception {
ParentFieldMapper fieldMapper = createParentFieldMapper();
Directory directory = newDirectory();
IndexWriter indexWriter = new IndexWriter(directory, newIndexWriterConfig());
Document document = new Document();
document.add(new SortedDocValuesField("different_field", new BytesRef("1")));
indexWriter.addDocument(document);
indexWriter.close();
IndexReader indexReader = DirectoryReader.open(directory);
String id = ParentFieldSubFetchPhase.getParentId(fieldMapper, indexReader.leaves().get(0).reader(), 0);
assertNull(id);
indexReader.close();
directory.close();
}
示例2: testCase
import org.apache.lucene.store.Directory; //导入方法依赖的package包/类
private void testCase(Query query, String field, int precision, CheckedConsumer<RandomIndexWriter, IOException> buildIndex,
Consumer<InternalGeoHashGrid> verify) throws IOException {
Directory directory = newDirectory();
RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
buildIndex.accept(indexWriter);
indexWriter.close();
IndexReader indexReader = DirectoryReader.open(directory);
IndexSearcher indexSearcher = newSearcher(indexReader, true, true);
GeoGridAggregationBuilder aggregationBuilder = new GeoGridAggregationBuilder("_name").field(field);
aggregationBuilder.precision(precision);
MappedFieldType fieldType = new GeoPointFieldMapper.GeoPointFieldType();
fieldType.setHasDocValues(true);
fieldType.setName(FIELD_NAME);
try (Aggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
aggregator.preCollection();
indexSearcher.search(query, aggregator);
aggregator.postCollection();
verify.accept((InternalGeoHashGrid) aggregator.buildAggregation(0L));
}
indexReader.close();
directory.close();
}
示例3: testMultiPhrasePrefixQuery
import org.apache.lucene.store.Directory; //导入方法依赖的package包/类
public void testMultiPhrasePrefixQuery() throws Exception {
Analyzer analyzer = new StandardAnalyzer();
Directory dir = newDirectory();
String value = "The quick brown fox.";
IndexReader ir = indexOneDoc(dir, "text", value, analyzer);
MultiPhrasePrefixQuery query = new MultiPhrasePrefixQuery();
query.add(new Term("text", "quick"));
query.add(new Term("text", "brown"));
query.add(new Term("text", "fo"));
IndexSearcher searcher = newSearcher(ir);
TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
assertThat(topDocs.totalHits, equalTo(1));
int docId = topDocs.scoreDocs[0].doc;
CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder());
CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter(searcher, analyzer,
passageFormatter, null, value, false);
Snippet[] snippets = highlighter.highlightField("text", query, docId, 5);
assertThat(snippets.length, equalTo(1));
assertThat(snippets[0].getText(), equalTo("The <b>quick</b> <b>brown</b> <b>fox</b>."));
ir.close();
dir.close();
}
示例4: doTestDocValueRangeQueries
import org.apache.lucene.store.Directory; //导入方法依赖的package包/类
public void doTestDocValueRangeQueries(NumberType type, Supplier<Number> valueSupplier) throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
final int numDocs = TestUtil.nextInt(random(), 100, 500);
for (int i = 0; i < numDocs; ++i) {
w.addDocument(type.createFields("foo", valueSupplier.get(), true, true, false));
}
DirectoryReader reader = DirectoryReader.open(w);
IndexSearcher searcher = newSearcher(reader);
w.close();
final int iters = 10;
for (int iter = 0; iter < iters; ++iter) {
Query query = type.rangeQuery("foo",
random().nextBoolean() ? null : valueSupplier.get(),
random().nextBoolean() ? null : valueSupplier.get(),
randomBoolean(), randomBoolean(), true);
assertThat(query, Matchers.instanceOf(IndexOrDocValuesQuery.class));
IndexOrDocValuesQuery indexOrDvQuery = (IndexOrDocValuesQuery) query;
assertEquals(
searcher.count(indexOrDvQuery.getIndexQuery()),
searcher.count(indexOrDvQuery.getRandomAccessQuery()));
}
reader.close();
dir.close();
}
示例5: testCommonTermsQuery
import org.apache.lucene.store.Directory; //导入方法依赖的package包/类
public void testCommonTermsQuery() throws IOException {
Directory dir = newDirectory();
String value = "The quick brown fox.";
Analyzer analyzer = new StandardAnalyzer();
IndexReader ir = indexOneDoc(dir, "text", value, analyzer);
CommonTermsQuery query = new CommonTermsQuery(BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD, 128);
query.add(new Term("text", "quick"));
query.add(new Term("text", "brown"));
query.add(new Term("text", "fox"));
IndexSearcher searcher = newSearcher(ir);
TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
assertThat(topDocs.totalHits, equalTo(1));
int docId = topDocs.scoreDocs[0].doc;
CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder());
CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter(searcher, analyzer,
passageFormatter, null, value, false);
Snippet[] snippets = highlighter.highlightField("text", query, docId, 5);
assertThat(snippets.length, equalTo(1));
assertThat(snippets[0].getText(), equalTo("The <b>quick</b> <b>brown</b> <b>fox</b>."));
ir.close();
dir.close();
}
示例6: testCase
import org.apache.lucene.store.Directory; //导入方法依赖的package包/类
private void testCase(Query query, CheckedConsumer<RandomIndexWriter, IOException> buildIndex, Consumer<InternalMax> verify)
throws IOException {
Directory directory = newDirectory();
RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
buildIndex.accept(indexWriter);
indexWriter.close();
IndexReader indexReader = DirectoryReader.open(directory);
IndexSearcher indexSearcher = newSearcher(indexReader, true, true);
MaxAggregationBuilder aggregationBuilder = new MaxAggregationBuilder("_name").field("number");
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
fieldType.setName("number");
try (MaxAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
aggregator.preCollection();
indexSearcher.search(query, aggregator);
aggregator.postCollection();
verify.accept((InternalMax) aggregator.buildAggregation(0L));
}
indexReader.close();
directory.close();
}
示例7: testCase
import org.apache.lucene.store.Directory; //导入方法依赖的package包/类
private Aggregation testCase(Query query, AggregationBuilder builder) throws IOException {
Directory directory = newDirectory();
RandomIndexWriter iw = new RandomIndexWriter(random(), directory);
iw.addDocument(document("1", "a", "b"));
iw.addDocument(document("2", "c", "a"));
iw.addDocument(document("3", "b", "d"));
iw.close();
IndexReader indexReader = DirectoryReader.open(directory);
// We do not use LuceneTestCase.newSearcher because we need a DirectoryReader for "testInsideTerms"
IndexSearcher indexSearcher = new IndexSearcher(indexReader);
Aggregation result = searchAndReduce(indexSearcher, query, builder, STRING_FIELD_TYPE);
indexReader.close();
directory.close();
return result;
}
示例8: testCase
import org.apache.lucene.store.Directory; //导入方法依赖的package包/类
private void testCase(Query query, CheckedConsumer<RandomIndexWriter, IOException> buildIndex, Consumer<InternalAvg> verify)
throws IOException {
Directory directory = newDirectory();
RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
buildIndex.accept(indexWriter);
indexWriter.close();
IndexReader indexReader = DirectoryReader.open(directory);
IndexSearcher indexSearcher = newSearcher(indexReader, true, true);
AvgAggregationBuilder aggregationBuilder = new AvgAggregationBuilder("_name").field("number");
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
fieldType.setName("number");
try (AvgAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
aggregator.preCollection();
indexSearcher.search(query, aggregator);
aggregator.postCollection();
verify.accept((InternalAvg) aggregator.buildAggregation(0L));
}
indexReader.close();
directory.close();
}
示例9: testCache
import org.apache.lucene.store.Directory; //导入方法依赖的package包/类
/** Test that version map cache works, is evicted on close, etc */
public void testCache() throws Exception {
int size = Versions.lookupStates.size();
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
Document doc = new Document();
doc.add(new Field(UidFieldMapper.NAME, "6", UidFieldMapper.Defaults.FIELD_TYPE));
doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 87));
writer.addDocument(doc);
DirectoryReader reader = DirectoryReader.open(writer);
// should increase cache size by 1
assertEquals(87, Versions.loadVersion(reader, new Term(UidFieldMapper.NAME, "6")));
assertEquals(size+1, Versions.lookupStates.size());
// should be cache hit
assertEquals(87, Versions.loadVersion(reader, new Term(UidFieldMapper.NAME, "6")));
assertEquals(size+1, Versions.lookupStates.size());
reader.close();
writer.close();
// core should be evicted from the map
assertEquals(size, Versions.lookupStates.size());
dir.close();
}
示例10: testNoDocs
import org.apache.lucene.store.Directory; //导入方法依赖的package包/类
public void testNoDocs() throws IOException {
Directory directory = newDirectory();
RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
// intentionally not writing any docs
indexWriter.close();
IndexReader indexReader = DirectoryReader.open(directory);
testCase(new MatchAllDocsQuery(), newSearcher(indexReader, false, true), parentToChild -> {
assertEquals(0, parentToChild.getDocCount());
assertEquals(Double.POSITIVE_INFINITY, ((InternalMin) parentToChild.getAggregations().get("in_child")).getValue(),
Double.MIN_VALUE);
});
indexReader.close();
directory.close();
}
示例11: main
import org.apache.lucene.store.Directory; //导入方法依赖的package包/类
public static void main(String[] args) {
try {
Directory directory = FSDirectory.getDirectory("demo index", false);
IndexReader reader = IndexReader.open(directory);
// Term term = new Term("path", "pizza");
// int deleted = reader.delete(term);
// System.out.println("deleted " + deleted +
// " documents containing " + term);
for (int i = 0; i < reader.maxDoc(); i++)
reader.delete(i);
reader.close();
directory.close();
} catch (Exception e) {
System.out.println(" caught a " + e.getClass() +
"\n with message: " + e.getMessage());
}
}
示例12: testCacheFilterReader
import org.apache.lucene.store.Directory; //导入方法依赖的package包/类
/** Test that version map cache behaves properly with a filtered reader */
public void testCacheFilterReader() throws Exception {
int size = Versions.lookupStates.size();
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
Document doc = new Document();
doc.add(new Field(UidFieldMapper.NAME, "6", UidFieldMapper.Defaults.FIELD_TYPE));
doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 87));
writer.addDocument(doc);
DirectoryReader reader = DirectoryReader.open(writer);
assertEquals(87, Versions.loadVersion(reader, new Term(UidFieldMapper.NAME, "6")));
assertEquals(size+1, Versions.lookupStates.size());
// now wrap the reader
DirectoryReader wrapped = ElasticsearchDirectoryReader.wrap(reader, new ShardId("bogus", "_na_", 5));
assertEquals(87, Versions.loadVersion(wrapped, new Term(UidFieldMapper.NAME, "6")));
// same size map: core cache key is shared
assertEquals(size+1, Versions.lookupStates.size());
reader.close();
writer.close();
// core should be evicted from the map
assertEquals(size, Versions.lookupStates.size());
dir.close();
}
示例13: assertCompressionEquals
import org.apache.lucene.store.Directory; //导入方法依赖的package包/类
private void assertCompressionEquals(Mode expected, Codec actual) throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig(null);
iwc.setCodec(actual);
IndexWriter iw = new IndexWriter(dir, iwc);
iw.addDocument(new Document());
iw.commit();
iw.close();
DirectoryReader ir = DirectoryReader.open(dir);
SegmentReader sr = (SegmentReader) ir.leaves().get(0).reader();
String v = sr.getSegmentInfo().info.getAttribute(Lucene50StoredFieldsFormat.MODE_KEY);
assertNotNull(v);
assertEquals(expected, Mode.valueOf(v));
ir.close();
dir.close();
}
示例14: vectorsReader
import org.apache.lucene.store.Directory; //导入方法依赖的package包/类
@Override
public TermVectorsReader vectorsReader(Directory directory, SegmentInfo segmentInfo, FieldInfos fieldInfos, IOContext context) throws IOException {
final String fileName = IndexFileNames.segmentFileName(Lucene3xSegmentInfoFormat.getDocStoreSegment(segmentInfo), "", Lucene3xTermVectorsReader.VECTORS_FIELDS_EXTENSION);
// Unfortunately, for 3.x indices, each segment's
// FieldInfos can lie about hasVectors (claim it's true
// when really it's false).... so we have to carefully
// check if the files really exist before trying to open
// them (4.x has fixed this):
final boolean exists;
if (Lucene3xSegmentInfoFormat.getDocStoreOffset(segmentInfo) != -1 && Lucene3xSegmentInfoFormat.getDocStoreIsCompoundFile(segmentInfo)) {
String cfxFileName = IndexFileNames.segmentFileName(Lucene3xSegmentInfoFormat.getDocStoreSegment(segmentInfo), "", Lucene3xCodec.COMPOUND_FILE_STORE_EXTENSION);
if (segmentInfo.dir.fileExists(cfxFileName)) {
Directory cfsDir = new CompoundFileDirectory(segmentInfo.dir, cfxFileName, context, false);
try {
exists = cfsDir.fileExists(fileName);
} finally {
cfsDir.close();
}
} else {
exists = false;
}
} else {
exists = directory.fileExists(fileName);
}
if (!exists) {
// 3x's FieldInfos sometimes lies and claims a segment
// has vectors when it doesn't:
return null;
} else {
return new Lucene3xTermVectorsReader(directory, segmentInfo, fieldInfos, context);
}
}
示例15: writeStandardTermVector
import org.apache.lucene.store.Directory; //导入方法依赖的package包/类
private void writeStandardTermVector(TermVectorsResponse outResponse) throws IOException {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());
conf.setOpenMode(OpenMode.CREATE);
IndexWriter writer = new IndexWriter(dir, conf);
FieldType type = new FieldType(TextField.TYPE_STORED);
type.setStoreTermVectorOffsets(true);
type.setStoreTermVectorPayloads(false);
type.setStoreTermVectorPositions(true);
type.setStoreTermVectors(true);
type.freeze();
Document d = new Document();
d.add(new Field("id", "abc", StringField.TYPE_STORED));
d.add(new Field("title", "the1 quick brown fox jumps over the1 lazy dog", type));
d.add(new Field("desc", "the1 quick brown fox jumps over the1 lazy dog", type));
writer.updateDocument(new Term("id", "abc"), d);
writer.commit();
writer.close();
DirectoryReader dr = DirectoryReader.open(dir);
IndexSearcher s = new IndexSearcher(dr);
TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
ScoreDoc[] scoreDocs = search.scoreDocs;
int doc = scoreDocs[0].doc;
Fields termVectors = dr.getTermVectors(doc);
EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
outResponse.setFields(termVectors, null, flags, termVectors);
dr.close();
dir.close();
}