本文整理汇总了Java中org.apache.lucene.index.TieredMergePolicy.setUseCompoundFile方法的典型用法代码示例。如果您正苦于以下问题:Java TieredMergePolicy.setUseCompoundFile方法的具体用法?Java TieredMergePolicy.setUseCompoundFile怎么用?Java TieredMergePolicy.setUseCompoundFile使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.index.TieredMergePolicy
的用法示例。
在下文中一共展示了TieredMergePolicy.setUseCompoundFile方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: reduceOpenFiles
import org.apache.lucene.index.TieredMergePolicy; //导入方法依赖的package包/类
/** just tries to configure things to keep the open file
* count lowish */
public static void reduceOpenFiles(IndexWriter w) {
// keep number of open files lowish
MergePolicy mp = w.getConfig().getMergePolicy();
if (mp instanceof LogMergePolicy) {
LogMergePolicy lmp = (LogMergePolicy) mp;
lmp.setMergeFactor(Math.min(5, lmp.getMergeFactor()));
lmp.setUseCompoundFile(true);
} else if (mp instanceof TieredMergePolicy) {
TieredMergePolicy tmp = (TieredMergePolicy) mp;
tmp.setMaxMergeAtOnce(Math.min(5, tmp.getMaxMergeAtOnce()));
tmp.setSegmentsPerTier(Math.min(5, tmp.getSegmentsPerTier()));
tmp.setUseCompoundFile(true);
}
MergeScheduler ms = w.getConfig().getMergeScheduler();
if (ms instanceof ConcurrentMergeScheduler) {
((ConcurrentMergeScheduler) ms).setMaxThreadCount(2);
((ConcurrentMergeScheduler) ms).setMaxMergeCount(3);
}
}
示例2: createShard
import org.apache.lucene.index.TieredMergePolicy; //导入方法依赖的package包/类
private static void createShard(Configuration configuration, int i, Path path, int totalShardCount)
throws IOException {
HdfsDirectory hdfsDirectory = new HdfsDirectory(configuration, path);
IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new KeywordAnalyzer());
TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
mergePolicy.setUseCompoundFile(false);
IndexWriter indexWriter = new IndexWriter(hdfsDirectory, conf);
Partitioner<IntWritable, IntWritable> partitioner = new HashPartitioner<IntWritable, IntWritable>();
int partition = partitioner.getPartition(new IntWritable(i), null, totalShardCount);
assertEquals(i, partition);
Document doc = getDoc(i);
indexWriter.addDocument(doc);
indexWriter.close();
}
示例3: setupWriter
import org.apache.lucene.index.TieredMergePolicy; //导入方法依赖的package包/类
private void setupWriter(Configuration configuration) throws IOException {
TableDescriptor tableDescriptor = new TableDescriptor();
tableDescriptor.setName("test-table");
String uuid = UUID.randomUUID().toString();
tableDescriptor.setTableUri(new Path(_base, "table-table").toUri().toString());
tableDescriptor.setShardCount(2);
TableContext tableContext = TableContext.create(tableDescriptor);
ShardContext shardContext = ShardContext.create(tableContext, "shard-00000000");
Path tablePath = new Path(_base, "table-table");
_shardPath = new Path(tablePath, "shard-00000000");
String indexDirName = "index_" + uuid;
_path = new Path(_shardPath, indexDirName + ".commit");
_fileSystem.mkdirs(_path);
_badRowIdsPath = new Path(_shardPath, indexDirName + ".badrowids");
_badIndexPath = new Path(_shardPath, indexDirName + ".badindex");
_inUsePath = new Path(_shardPath, indexDirName + ".inuse");
Directory commitDirectory = new HdfsDirectory(configuration, _path);
_mainDirectory = new HdfsDirectory(configuration, _shardPath);
_fieldManager = tableContext.getFieldManager();
Analyzer analyzerForIndex = _fieldManager.getAnalyzerForIndex();
IndexWriterConfig conf = new IndexWriterConfig(LUCENE_VERSION, analyzerForIndex);
// conf.setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES);
TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
mergePolicy.setUseCompoundFile(false);
_commitWriter = new IndexWriter(commitDirectory, conf.clone());
// Make sure there's an empty index...
new IndexWriter(_mainDirectory, conf.clone()).close();
_mainWriter = new IndexWriter(_mainDirectory, conf.clone());
BufferStore.initNewBuffer(128, 128 * 128);
_indexImporter = new IndexImporter(_timer, getBlurIndex(shardContext, _mainDirectory), shardContext,
TimeUnit.MINUTES, 10, 10, null, _mainDirectory);
}
示例4: testMulipleCommitsAndReopens
import org.apache.lucene.index.TieredMergePolicy; //导入方法依赖的package包/类
@Test
public void testMulipleCommitsAndReopens() throws IOException {
IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new KeywordAnalyzer());
conf.setMergeScheduler(new SerialMergeScheduler());
TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
mergePolicy.setUseCompoundFile(false);
Set<String> fileSet = new TreeSet<String>();
long seed = new Random().nextLong();
System.out.println("Seed:" + seed);
Random random = new Random(seed);
int docCount = 0;
int passes = 10;
byte[] segmentsGenContents = null;
for (int run = 0; run < passes; run++) {
final FastHdfsKeyValueDirectory directory = new FastHdfsKeyValueDirectory(false, _timer, _configuration,
new Path(_path, "test_multiple_commits_reopens"));
if (segmentsGenContents != null) {
byte[] segmentsGenContentsCurrent = readSegmentsGen(directory);
assertTrue(Arrays.equals(segmentsGenContents, segmentsGenContentsCurrent));
}
assertFiles(fileSet, run, -1, directory);
assertEquals(docCount, getDocumentCount(directory));
IndexWriter writer = new IndexWriter(directory, conf.clone());
int numberOfCommits = random.nextInt(100);
for (int i = 0; i < numberOfCommits; i++) {
assertFiles(fileSet, run, i, directory);
addDocuments(writer, random.nextInt(100));
// Before Commit
writer.commit();
// After Commit
// Set files after commit
{
fileSet.clear();
List<IndexCommit> listCommits = DirectoryReader.listCommits(directory);
assertEquals(1, listCommits.size());
IndexCommit indexCommit = listCommits.get(0);
fileSet.addAll(indexCommit.getFileNames());
}
segmentsGenContents = readSegmentsGen(directory);
}
docCount = getDocumentCount(directory);
}
}
示例5: GenericBlurRecordWriter
import org.apache.lucene.index.TieredMergePolicy; //导入方法依赖的package包/类
public GenericBlurRecordWriter(Configuration configuration, int attemptId, String tmpDirName) throws IOException {
_configuration = configuration;
_documentBufferStrategy = BlurOutputFormat.getDocumentBufferStrategy(_configuration);
_indexLocally = BlurOutputFormat.isIndexLocally(_configuration);
_optimizeInFlight = BlurOutputFormat.isOptimizeInFlight(_configuration);
TableDescriptor tableDescriptor = BlurOutputFormat.getTableDescriptor(_configuration);
int shardCount = tableDescriptor.getShardCount();
int shardId = attemptId % shardCount;
Path tableOutput = BlurOutputFormat.getOutputPath(_configuration);
String shardName = ShardUtil.getShardName(BlurConstants.SHARD_PREFIX, shardId);
Path indexPath = new Path(tableOutput, shardName);
_newIndex = new Path(indexPath, tmpDirName);
_finalDir = new ProgressableDirectory(new HdfsDirectory(_configuration, _newIndex), getProgressable());
_finalDir.setLockFactory(NoLockFactory.getNoLockFactory());
TableContext tableContext = TableContext.create(tableDescriptor);
_fieldManager = tableContext.getFieldManager();
Analyzer analyzer = _fieldManager.getAnalyzerForIndex();
_conf = new IndexWriterConfig(LuceneVersionConstant.LUCENE_VERSION, analyzer);
_conf.setCodec(new Blur024Codec());
_conf.setSimilarity(tableContext.getSimilarity());
TieredMergePolicy mergePolicy = (TieredMergePolicy) _conf.getMergePolicy();
mergePolicy.setUseCompoundFile(false);
_overFlowConf = _conf.clone();
if (_indexLocally) {
String localDirPath = System.getProperty(JAVA_IO_TMPDIR);
_localPath = new File(localDirPath, UUID.randomUUID().toString() + ".tmp");
SimpleFSDirectory directory = new SimpleFSDirectory(_localPath);
_localDir = new ProgressableDirectory(directory, getProgressable());
_writer = new IndexWriter(_localDir, _conf.clone());
} else {
_localPath = null;
_localDir = null;
_writer = new IndexWriter(_finalDir, _conf.clone());
}
}