本文整理汇总了Java中org.apache.lucene.store.NRTCachingDirectory类的典型用法代码示例。如果您正苦于以下问题:Java NRTCachingDirectory类的具体用法?Java NRTCachingDirectory怎么用?Java NRTCachingDirectory使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
NRTCachingDirectory类属于org.apache.lucene.store包,在下文中一共展示了NRTCachingDirectory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: LuceneFiler
import org.apache.lucene.store.NRTCachingDirectory; //导入依赖的package包/类
public LuceneFiler(@Nonnull Filer delegate, @Nonnull Config config) throws IOException {
super(delegate);
String path = config.getString("index.path");
maxAge = config.getTime("index.maxAge", "-1");
double maxMergeMb = config.getDouble("index.maxMergeMb", 4);
double maxCachedMb = config.getDouble("index.maxCacheMb", 64);
long targetMaxStale = config.getTime("index.targetMaxStale", "5s");
long targetMinStale = config.getTime("index.targetMinStale", "1s");
Directory dir = FSDirectory.open(new File(path).toPath());
NRTCachingDirectory cachingDir = new NRTCachingDirectory(dir, maxMergeMb, maxCachedMb);
IndexWriterConfig writerConfig = new IndexWriterConfig(null);
writerConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
writer = new TrackingIndexWriter(new IndexWriter(cachingDir, writerConfig));
manager = new SearcherManager(writer.getIndexWriter(), true, new SearcherFactory());
thread = new ControlledRealTimeReopenThread<>(writer, manager, targetMaxStale, targetMinStale);
thread.start();
}
示例2: getTaxoWriter
import org.apache.lucene.store.NRTCachingDirectory; //导入依赖的package包/类
public DirectoryTaxonomyWriter getTaxoWriter(int segmentNumber) throws IOException {
Directory d;
if (indexConfig.getIndexSettings().getStoreIndexOnDisk()) {
d = MMapDirectory.open(getPathForFacetsIndex(segmentNumber));
}
else {
String indexSegmentDbName = getIndexSegmentDbName(segmentNumber);
String indexSegmentCollectionName = getIndexSegmentCollectionName(segmentNumber) + "_facets";
MongoDirectory mongoDirectory = new MongoDirectory(mongo, indexSegmentDbName, indexSegmentCollectionName, clusterConfig.isSharded(),
clusterConfig.getIndexBlockSize());
d = new DistributedDirectory(mongoDirectory);
}
NRTCachingDirectory nrtCachingDirectory = new NRTCachingDirectory(d, 2, 10);
return new DirectoryTaxonomyWriter(nrtCachingDirectory);
}
示例3: createIndexDirectory
import org.apache.lucene.store.NRTCachingDirectory; //导入依赖的package包/类
public Directory createIndexDirectory(String directory, LindenConfig.IndexType indexType) throws IOException {
switch (indexType) {
case RAM:
return new RAMDirectory();
default:
Preconditions.checkNotNull(directory, "index directory can not be null");
return new NRTCachingDirectory(FSDirectory.open(new File(directory)), maxMergeSizeMB, maxCachedMB);
}
}
示例4: createTaxoIndexDirectory
import org.apache.lucene.store.NRTCachingDirectory; //导入依赖的package包/类
public Directory createTaxoIndexDirectory(String directory, LindenConfig.IndexType indexType) throws IOException {
switch (indexType) {
case RAM:
return new RAMDirectory();
default:
Preconditions.checkNotNull(directory, "index directory can not be null");
return new NRTCachingDirectory(FSDirectory.open(new File(directory + ".taxonomy")),
maxMergeSizeMB, maxCachedMB);
}
}
示例5: getDirectory
import org.apache.lucene.store.NRTCachingDirectory; //导入依赖的package包/类
@Override
protected Directory getDirectory(Directory in) {
// Randomly swap in NRTCachingDir
if (random().nextBoolean()) {
if (VERBOSE) {
System.out.println("TEST: wrap NRTCachingDir");
}
return new NRTCachingDirectory(in, 5.0, 60.0);
} else {
return in;
}
}
示例6: testIOContext
import org.apache.lucene.store.NRTCachingDirectory; //导入依赖的package包/类
@Test
public void testIOContext() throws Exception {
// LUCENE-5591: make sure we pass an IOContext with an approximate
// segmentSize in FlushInfo
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
// we want a single large enough segment so that a doc-values update writes a large file
conf.setMergePolicy(NoMergePolicy.INSTANCE);
conf.setMaxBufferedDocs(Integer.MAX_VALUE); // manually flush
conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
IndexWriter writer = new IndexWriter(dir, conf);
for (int i = 0; i < 100; i++) {
writer.addDocument(doc(i));
}
writer.commit();
writer.close();
NRTCachingDirectory cachingDir = new NRTCachingDirectory(dir, 100, 1/(1024.*1024.));
conf = newIndexWriterConfig(new MockAnalyzer(random()));
// we want a single large enough segment so that a doc-values update writes a large file
conf.setMergePolicy(NoMergePolicy.INSTANCE);
conf.setMaxBufferedDocs(Integer.MAX_VALUE); // manually flush
conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
writer = new IndexWriter(cachingDir, conf);
writer.updateBinaryDocValue(new Term("id", "doc-0"), "val", toBytes(100L));
DirectoryReader reader = DirectoryReader.open(writer, true); // flush
assertEquals(0, cachingDir.listCachedFiles().length);
IOUtils.close(reader, writer, cachingDir);
}
示例7: testIOContext
import org.apache.lucene.store.NRTCachingDirectory; //导入依赖的package包/类
@Test
public void testIOContext() throws Exception {
// LUCENE-5591: make sure we pass an IOContext with an approximate
// segmentSize in FlushInfo
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
// we want a single large enough segment so that a doc-values update writes a large file
conf.setMergePolicy(NoMergePolicy.INSTANCE);
conf.setMaxBufferedDocs(Integer.MAX_VALUE); // manually flush
conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
IndexWriter writer = new IndexWriter(dir, conf);
for (int i = 0; i < 100; i++) {
writer.addDocument(doc(i));
}
writer.commit();
writer.close();
NRTCachingDirectory cachingDir = new NRTCachingDirectory(dir, 100, 1/(1024.*1024.));
conf = newIndexWriterConfig(new MockAnalyzer(random()));
// we want a single large enough segment so that a doc-values update writes a large file
conf.setMergePolicy(NoMergePolicy.INSTANCE);
conf.setMaxBufferedDocs(Integer.MAX_VALUE); // manually flush
conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
writer = new IndexWriter(cachingDir, conf);
writer.updateNumericDocValue(new Term("id", "doc-0"), "val", 100L);
DirectoryReader reader = DirectoryReader.open(writer, true); // flush
assertEquals(0, cachingDir.listCachedFiles().length);
IOUtils.close(reader, writer, cachingDir);
}
示例8: reduce
import org.apache.lucene.store.NRTCachingDirectory; //导入依赖的package包/类
private Directory reduce(Directory dir) {
Directory cdir = dir;
if (dir instanceof NRTCachingDirectory) {
cdir = ((NRTCachingDirectory)dir).getDelegate();
}
if (cdir instanceof RateLimitedDirectoryWrapper) {
cdir = ((RateLimitedDirectoryWrapper)dir).getDelegate();
}
if (cdir instanceof TrackingDirectoryWrapper) {
cdir = ((TrackingDirectoryWrapper)dir).getDelegate();
}
return cdir;
}
示例9: getBaseDir
import org.apache.lucene.store.NRTCachingDirectory; //导入依赖的package包/类
private Directory getBaseDir(Directory dir) {
Directory baseDir;
if (dir instanceof NRTCachingDirectory) {
baseDir = ((NRTCachingDirectory)dir).getDelegate();
} else if (dir instanceof RateLimitedDirectoryWrapper) {
baseDir = ((RateLimitedDirectoryWrapper)dir).getDelegate();
} else {
baseDir = dir;
}
return baseDir;
}
示例10: trainingSetDirectory
import org.apache.lucene.store.NRTCachingDirectory; //导入依赖的package包/类
/**
************************* TSet Based
*/
@ConditionalOnProperty(prefix = "lucene.categoriser.", value = "useTSetBasedCategoriser")
public @Bean Directory trainingSetDirectory() throws IOException {
logger.info("lucene trainingSet index location: {}", trainingSetCollectionPath );
Directory fsDir = FSDirectory.open(Paths.get(trainingSetCollectionPath));
return new NRTCachingDirectory(fsDir, iaViewMaxMergeSizeMB, iaViewMaxCachedMB);
}
示例11: iaViewDirectory
import org.apache.lucene.store.NRTCachingDirectory; //导入依赖的package包/类
/**
************************* IA Views
*/
public @Bean Directory iaViewDirectory() throws IOException {
logger.info("lucene index location: {}", iaviewCollectionPath );
Directory fsDir = FSDirectory.open(Paths.get(iaviewCollectionPath));
return new NRTCachingDirectory(fsDir, iaViewMaxMergeSizeMB, iaViewMaxCachedMB);
}
示例12: init
import org.apache.lucene.store.NRTCachingDirectory; //导入依赖的package包/类
/**
* Builds a new {@link FSIndex}.
*
* @param name
* the index name
* @param mbeanName
* the JMX MBean object name
* @param path
* the directory path
* @param analyzer
* the index writer analyzer
* @param refresh
* the index reader refresh frequency in seconds
* @param ramBufferMB
* the index writer RAM buffer size in MB
* @param maxMergeMB
* the directory max merge size in MB
* @param maxCachedMB
* the directory max cache size in MB
* @param refreshTask
* action to be done during refresh
*/
public void init(String name, String mbeanName, Path path, Analyzer analyzer, double refresh, int ramBufferMB,
int maxMergeMB, int maxCachedMB, Runnable refreshTask) {
try {
this.path = path;
this.name = name;
// Open or create directory
FSDirectory fsDirectory = FSDirectory.open(path);
this.directory = new NRTCachingDirectory(fsDirectory, maxMergeMB, maxCachedMB);
// Setup index writer
IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer);
indexWriterConfig.setRAMBufferSizeMB(ramBufferMB);
indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
indexWriterConfig.setUseCompoundFile(true);
indexWriterConfig.setMergePolicy(new TieredMergePolicy());
this.indexWriter = new IndexWriter(this.directory, indexWriterConfig);
// Setup NRT search
SearcherFactory searcherFactory = new SearcherFactory() {
@Override
public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) {
if (refreshTask != null) {
refreshTask.run();
}
IndexSearcher searcher = new IndexSearcher(reader);
searcher.setSimilarity(new NoIDFSimilarity());
return searcher;
}
};
TrackingIndexWriter trackingWriter = new TrackingIndexWriter(this.indexWriter);
this.searcherManager = new SearcherManager(this.indexWriter, true, searcherFactory);
this.searcherReopener = new ControlledRealTimeReopenThread<>(trackingWriter, this.searcherManager, refresh,
refresh);
this.searcherReopener.start();
// Register JMX MBean
// mbean = new ObjectName(mbeanName);
// ManagementFactory.getPlatformMBeanServer().registerMBean(service,
// this.mbean);
} catch (Exception e) {
throw new FhirIndexException(e, "Error while creating index %s", name);
}
}
示例13: create
import org.apache.lucene.store.NRTCachingDirectory; //导入依赖的package包/类
@Override
protected Directory create(String path, DirContext dirContext) throws IOException {
return new NRTCachingDirectory(FSDirectory.open(new File(path)), maxMergeSizeMB, maxCachedMB);
}
示例14: injectLockFactory
import org.apache.lucene.store.NRTCachingDirectory; //导入依赖的package包/类
private static Directory injectLockFactory(Directory dir, String lockPath,
String rawLockType) throws IOException {
if (null == rawLockType) {
// we default to "simple" for backwards compatibility
log.warn("No lockType configured for " + dir + " assuming 'simple'");
rawLockType = "simple";
}
final String lockType = rawLockType.toLowerCase(Locale.ROOT).trim();
if ("simple".equals(lockType)) {
// multiple SimpleFSLockFactory instances should be OK
dir.setLockFactory(new SimpleFSLockFactory(lockPath));
} else if ("native".equals(lockType)) {
dir.setLockFactory(new NativeFSLockFactory(lockPath));
} else if ("single".equals(lockType)) {
if (!(dir.getLockFactory() instanceof SingleInstanceLockFactory)) dir
.setLockFactory(new SingleInstanceLockFactory());
} else if ("hdfs".equals(lockType)) {
Directory del = dir;
if (dir instanceof NRTCachingDirectory) {
del = ((NRTCachingDirectory) del).getDelegate();
}
if (del instanceof BlockDirectory) {
del = ((BlockDirectory) del).getDirectory();
}
if (!(del instanceof HdfsDirectory)) {
throw new SolrException(ErrorCode.FORBIDDEN, "Directory: "
+ del.getClass().getName()
+ ", but hdfs lock factory can only be used with HdfsDirectory");
}
dir.setLockFactory(new HdfsLockFactory(((HdfsDirectory)del).getHdfsDirPath(), ((HdfsDirectory)del).getConfiguration()));
} else if ("none".equals(lockType)) {
// Recipe for disaster
log.error("CONFIGURATION WARNING: locks are disabled on " + dir);
dir.setLockFactory(NoLockFactory.getNoLockFactory());
} else {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Unrecognized lockType: " + rawLockType);
}
return dir;
}
示例15: init
import org.apache.lucene.store.NRTCachingDirectory; //导入依赖的package包/类
/**
* Initializes this using the specified {@link Sort} for trying to keep the {@link Document}s sorted.
*
* @param sort The {@link Sort} to be used.
*/
public void init(Sort sort) {
Log.debug("Initializing index");
try {
this.sort = sort;
// Get directory file
file = new File(path);
// Open or create directory
FSDirectory fsDirectory = FSDirectory.open(file);
directory = new NRTCachingDirectory(fsDirectory, maxMergeMB, maxCachedMB);
// Setup index writer
IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_48, analyzer);
config.setRAMBufferSizeMB(ramBufferMB);
config.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
config.setUseCompoundFile(true);
config.setMergePolicy(new SortingMergePolicy(config.getMergePolicy(), sort));
indexWriter = new IndexWriter(directory, config);
// Setup NRT search
SearcherFactory searcherFactory = new SearcherFactory() {
public IndexSearcher newSearcher(IndexReader reader) throws IOException {
IndexSearcher searcher = new IndexSearcher(reader);
searcher.setSimilarity(new NoIDFSimilarity());
return searcher;
}
};
TrackingIndexWriter trackingIndexWriter = new TrackingIndexWriter(indexWriter);
searcherManager = new SearcherManager(indexWriter, true, searcherFactory);
searcherReopener = new ControlledRealTimeReopenThread<>(trackingIndexWriter,
searcherManager,
refreshSeconds,
refreshSeconds);
searcherReopener.start(); // Start the refresher thread
} catch (IOException e) {
Log.error(e, "Error while initializing index");
throw new RuntimeException(e);
}
}