本文整理汇总了Java中org.apache.lucene.index.TieredMergePolicy.setSegmentsPerTier方法的典型用法代码示例。如果您正苦于以下问题:Java TieredMergePolicy.setSegmentsPerTier方法的具体用法?Java TieredMergePolicy.setSegmentsPerTier怎么用?Java TieredMergePolicy.setSegmentsPerTier使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.index.TieredMergePolicy
的用法示例。
在下文中一共展示了TieredMergePolicy.setSegmentsPerTier方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: newSortingMergePolicy
import org.apache.lucene.index.TieredMergePolicy; //导入方法依赖的package包/类
static MergePolicy newSortingMergePolicy(Sort sort) {
// usually create a MP with a low merge factor so that many merges happen
MergePolicy mp;
int thingToDo = random().nextInt(3);
if (thingToDo == 0) {
TieredMergePolicy tmp = newTieredMergePolicy(random());
final int numSegs = TestUtil.nextInt(random(), 3, 5);
tmp.setSegmentsPerTier(numSegs);
tmp.setMaxMergeAtOnce(TestUtil.nextInt(random(), 2, numSegs));
mp = tmp;
} else if (thingToDo == 1) {
LogMergePolicy lmp = newLogMergePolicy(random());
lmp.setMergeFactor(TestUtil.nextInt(random(), 3, 5));
mp = lmp;
} else {
// just a regular random one from LTC (could be alcoholic etc)
mp = newMergePolicy();
}
// wrap it with a sorting mp
return new SortingMergePolicy(mp, sort);
}
示例2: reduceOpenFiles
import org.apache.lucene.index.TieredMergePolicy; //导入方法依赖的package包/类
/** just tries to configure things to keep the open file
* count lowish */
public static void reduceOpenFiles(IndexWriter w) {
// keep number of open files lowish
MergePolicy mp = w.getConfig().getMergePolicy();
if (mp instanceof LogMergePolicy) {
LogMergePolicy lmp = (LogMergePolicy) mp;
lmp.setMergeFactor(Math.min(5, lmp.getMergeFactor()));
lmp.setNoCFSRatio(1.0);
} else if (mp instanceof TieredMergePolicy) {
TieredMergePolicy tmp = (TieredMergePolicy) mp;
tmp.setMaxMergeAtOnce(Math.min(5, tmp.getMaxMergeAtOnce()));
tmp.setSegmentsPerTier(Math.min(5, tmp.getSegmentsPerTier()));
tmp.setNoCFSRatio(1.0);
}
MergeScheduler ms = w.getConfig().getMergeScheduler();
if (ms instanceof ConcurrentMergeScheduler) {
// wtf... shouldnt it be even lower since its 1 by default?!?!
((ConcurrentMergeScheduler) ms).setMaxMergesAndThreads(3, 2);
}
}
示例3: newTieredMergePolicy
import org.apache.lucene.index.TieredMergePolicy; //导入方法依赖的package包/类
public static TieredMergePolicy newTieredMergePolicy(Random r) {
TieredMergePolicy tmp = new TieredMergePolicy();
if (rarely(r)) {
tmp.setMaxMergeAtOnce(TestUtil.nextInt(r, 2, 9));
tmp.setMaxMergeAtOnceExplicit(TestUtil.nextInt(r, 2, 9));
} else {
tmp.setMaxMergeAtOnce(TestUtil.nextInt(r, 10, 50));
tmp.setMaxMergeAtOnceExplicit(TestUtil.nextInt(r, 10, 50));
}
if (rarely(r)) {
tmp.setMaxMergedSegmentMB(0.2 + r.nextDouble() * 2.0);
} else {
tmp.setMaxMergedSegmentMB(r.nextDouble() * 100);
}
tmp.setFloorSegmentMB(0.2 + r.nextDouble() * 2.0);
tmp.setForceMergeDeletesPctAllowed(0.0 + r.nextDouble() * 30.0);
if (rarely(r)) {
tmp.setSegmentsPerTier(TestUtil.nextInt(r, 2, 20));
} else {
tmp.setSegmentsPerTier(TestUtil.nextInt(r, 10, 50));
}
configureRandom(r, tmp);
tmp.setReclaimDeletesWeight(r.nextDouble()*4);
return tmp;
}
示例4: reduceOpenFiles
import org.apache.lucene.index.TieredMergePolicy; //导入方法依赖的package包/类
/** just tries to configure things to keep the open file
* count lowish */
public static void reduceOpenFiles(IndexWriter w) {
// keep number of open files lowish
MergePolicy mp = w.getConfig().getMergePolicy();
if (mp instanceof LogMergePolicy) {
LogMergePolicy lmp = (LogMergePolicy) mp;
lmp.setMergeFactor(Math.min(5, lmp.getMergeFactor()));
lmp.setUseCompoundFile(true);
} else if (mp instanceof TieredMergePolicy) {
TieredMergePolicy tmp = (TieredMergePolicy) mp;
tmp.setMaxMergeAtOnce(Math.min(5, tmp.getMaxMergeAtOnce()));
tmp.setSegmentsPerTier(Math.min(5, tmp.getSegmentsPerTier()));
tmp.setUseCompoundFile(true);
}
MergeScheduler ms = w.getConfig().getMergeScheduler();
if (ms instanceof ConcurrentMergeScheduler) {
((ConcurrentMergeScheduler) ms).setMaxThreadCount(2);
((ConcurrentMergeScheduler) ms).setMaxMergeCount(3);
}
}
示例5: newSortingMergePolicy
import org.apache.lucene.index.TieredMergePolicy; //导入方法依赖的package包/类
static MergePolicy newSortingMergePolicy(Sorter sorter) {
// create a MP with a low merge factor so that many merges happen
MergePolicy mp;
if (random().nextBoolean()) {
TieredMergePolicy tmp = newTieredMergePolicy(random());
final int numSegs = _TestUtil.nextInt(random(), 3, 5);
tmp.setSegmentsPerTier(numSegs);
tmp.setMaxMergeAtOnce(_TestUtil.nextInt(random(), 2, numSegs));
mp = tmp;
} else {
LogMergePolicy lmp = newLogMergePolicy(random());
lmp.setMergeFactor(_TestUtil.nextInt(random(), 3, 5));
mp = lmp;
}
// wrap it with a sorting mp
return new SortingMergePolicy(mp, sorter);
}
示例6: getInstance
import org.apache.lucene.index.TieredMergePolicy; //导入方法依赖的package包/类
@Override
public MergePolicy getInstance(Map<String, String> config) throws IOException {
TieredMergePolicy mergePolicy = new TieredMergePolicy();
if (config.containsKey(SEGMENTS_PER_TIER)) {
mergePolicy.setSegmentsPerTier(Double.valueOf(config.get(SEGMENTS_PER_TIER)));
}
if (config.containsKey(MAX_MERGE_AT_ONCE)) {
mergePolicy.setMaxMergeAtOnce(Integer.valueOf(config.get(MAX_MERGE_AT_ONCE)));
}
return mergePolicy;
}
示例7: initIndexWriter
import org.apache.lucene.index.TieredMergePolicy; //导入方法依赖的package包/类
private void initIndexWriter() throws IOException
{
Analyzer analyzer = new StandardAnalyzer();
IndexWriterConfig iwc = new IndexWriterConfig(analyzer);
iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
iwc.setMergedSegmentWarmer(
new SimpleMergedSegmentWarmer(new LoggingInfoStream(Level.FINER)));
iwc.setReaderPooling(true);
// iwc.setMergeScheduler(new SerialMergeScheduler());
ConcurrentMergeScheduler mergeScheduler = new ConcurrentMergeScheduler();
iwc.setMergeScheduler(mergeScheduler);
TieredMergePolicy mergePolicy = new TieredMergePolicy();
mergePolicy.setMaxMergeAtOnce(_maxMergeAtOnce);
mergePolicy.setSegmentsPerTier(_segmentsPerTier);
iwc.setMergePolicy(mergePolicy);
iwc.setInfoStream(new LoggingInfoStream(Level.FINER));
_writer = new IndexWriter(getDirectory(), iwc);
}