当前位置: 首页>>代码示例>>Java>>正文


Java TieredMergePolicy类代码示例

本文整理汇总了Java中org.apache.lucene.index.TieredMergePolicy的典型用法代码示例。如果您正苦于以下问题:Java TieredMergePolicy类的具体用法?Java TieredMergePolicy怎么用?Java TieredMergePolicy使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


TieredMergePolicy类属于org.apache.lucene.index包,在下文中一共展示了TieredMergePolicy类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getInstance

import org.apache.lucene.index.TieredMergePolicy; //导入依赖的package包/类
@Override
public MergePolicy getInstance(Map<String, String> params) throws IOException {
  String field = params.get(SORT_FIELD);
  SortField.Type sortFieldType = SortField.Type.DOC;
  if (params.containsKey(SORT_FIELD_TYPE)) {
    sortFieldType = SortField.Type.valueOf(params.get(SORT_FIELD_TYPE).toUpperCase());
  }

  if (sortFieldType == SortField.Type.DOC) {
    throw new IOException(
        "Relying on internal lucene DocIDs is not guaranteed to work, this is only an implementation detail.");
  }

  boolean desc = true;
  if (params.containsKey(SORT_DESC)) {
    try {
      desc = Boolean.valueOf(params.get(SORT_DESC));
    } catch (Exception e) {
      desc = true;
    }
  }
  SortField sortField = new SortField(field, sortFieldType, desc);
  Sort sort = new Sort(sortField);
  return new SortingMergePolicyDecorator(new TieredMergePolicy(), sort);
}
 
开发者ID:XiaoMi,项目名称:linden,代码行数:26,代码来源:SortingMergePolicyFactory.java

示例2: newSortingMergePolicy

import org.apache.lucene.index.TieredMergePolicy; //导入依赖的package包/类
static MergePolicy newSortingMergePolicy(Sort sort) {
  // usually create a MP with a low merge factor so that many merges happen
  MergePolicy mp;
  int thingToDo = random().nextInt(3);
  if (thingToDo == 0) {
    TieredMergePolicy tmp = newTieredMergePolicy(random());
    final int numSegs = TestUtil.nextInt(random(), 3, 5);
    tmp.setSegmentsPerTier(numSegs);
    tmp.setMaxMergeAtOnce(TestUtil.nextInt(random(), 2, numSegs));
    mp = tmp;
  } else if (thingToDo == 1) {
    LogMergePolicy lmp = newLogMergePolicy(random());
    lmp.setMergeFactor(TestUtil.nextInt(random(), 3, 5));
    mp = lmp;
  } else {
    // just a regular random one from LTC (could be alcoholic etc)
    mp = newMergePolicy();
  }
  // wrap it with a sorting mp
  return new SortingMergePolicy(mp, sort);
}
 
开发者ID:europeana,项目名称:search,代码行数:22,代码来源:TestSortingMergePolicy.java

示例3: reduceOpenFiles

import org.apache.lucene.index.TieredMergePolicy; //导入依赖的package包/类
/** just tries to configure things to keep the open file
 * count lowish */
public static void reduceOpenFiles(IndexWriter w) {
  // keep number of open files lowish
  MergePolicy mp = w.getConfig().getMergePolicy();
  if (mp instanceof LogMergePolicy) {
    LogMergePolicy lmp = (LogMergePolicy) mp;
    lmp.setMergeFactor(Math.min(5, lmp.getMergeFactor()));
    lmp.setNoCFSRatio(1.0);
  } else if (mp instanceof TieredMergePolicy) {
    TieredMergePolicy tmp = (TieredMergePolicy) mp;
    tmp.setMaxMergeAtOnce(Math.min(5, tmp.getMaxMergeAtOnce()));
    tmp.setSegmentsPerTier(Math.min(5, tmp.getSegmentsPerTier()));
    tmp.setNoCFSRatio(1.0);
  }
  MergeScheduler ms = w.getConfig().getMergeScheduler();
  if (ms instanceof ConcurrentMergeScheduler) {
    // wtf... shouldnt it be even lower since its 1 by default?!?!
    ((ConcurrentMergeScheduler) ms).setMaxMergesAndThreads(3, 2);
  }
}
 
开发者ID:europeana,项目名称:search,代码行数:22,代码来源:TestUtil.java

示例4: newTieredMergePolicy

import org.apache.lucene.index.TieredMergePolicy; //导入依赖的package包/类
public static TieredMergePolicy newTieredMergePolicy(Random r) {
  TieredMergePolicy tmp = new TieredMergePolicy();
  if (rarely(r)) {
    tmp.setMaxMergeAtOnce(TestUtil.nextInt(r, 2, 9));
    tmp.setMaxMergeAtOnceExplicit(TestUtil.nextInt(r, 2, 9));
  } else {
    tmp.setMaxMergeAtOnce(TestUtil.nextInt(r, 10, 50));
    tmp.setMaxMergeAtOnceExplicit(TestUtil.nextInt(r, 10, 50));
  }
  if (rarely(r)) {
    tmp.setMaxMergedSegmentMB(0.2 + r.nextDouble() * 2.0);
  } else {
    tmp.setMaxMergedSegmentMB(r.nextDouble() * 100);
  }
  tmp.setFloorSegmentMB(0.2 + r.nextDouble() * 2.0);
  tmp.setForceMergeDeletesPctAllowed(0.0 + r.nextDouble() * 30.0);
  if (rarely(r)) {
    tmp.setSegmentsPerTier(TestUtil.nextInt(r, 2, 20));
  } else {
    tmp.setSegmentsPerTier(TestUtil.nextInt(r, 10, 50));
  }
  configureRandom(r, tmp);
  tmp.setReclaimDeletesWeight(r.nextDouble()*4);
  return tmp;
}
 
开发者ID:europeana,项目名称:search,代码行数:26,代码来源:LuceneTestCase.java

示例5: testTieredMPSolrIndexConfigCreation

import org.apache.lucene.index.TieredMergePolicy; //导入依赖的package包/类
@Test
public void testTieredMPSolrIndexConfigCreation() throws Exception {
  SolrConfig solrConfig = new SolrConfig("solr" + File.separator
      + "collection1", "solrconfig-tieredmergepolicy.xml", null);
  SolrIndexConfig solrIndexConfig = new SolrIndexConfig(solrConfig, null,
      null);
  assertNotNull(solrIndexConfig);
  IndexSchema indexSchema = IndexSchemaFactory.buildIndexSchema("schema.xml", solrConfig);

  IndexWriterConfig iwc = solrIndexConfig.toIndexWriterConfig(indexSchema);

  assertNotNull("null mp", iwc.getMergePolicy());
  assertTrue("mp is not TMP", iwc.getMergePolicy() instanceof TieredMergePolicy);
  TieredMergePolicy mp = (TieredMergePolicy) iwc.getMergePolicy();
  assertEquals("mp.maxMergeAtOnceExplicit", 19, mp.getMaxMergeAtOnceExplicit());
  assertEquals("mp.segmentsPerTier",9,(int)mp.getSegmentsPerTier());

  assertNotNull("null ms", iwc.getMergeScheduler());
  assertTrue("ms is not CMS", iwc.getMergeScheduler() instanceof ConcurrentMergeScheduler);
  ConcurrentMergeScheduler ms = (ConcurrentMergeScheduler)  iwc.getMergeScheduler();
  assertEquals("ms.maxMergeCount", 987, ms.getMaxMergeCount());
  assertEquals("ms.maxThreadCount", 42, ms.getMaxThreadCount());

}
 
开发者ID:europeana,项目名称:search,代码行数:25,代码来源:SolrIndexConfigTest.java

示例6: testDefaults

import org.apache.lucene.index.TieredMergePolicy; //导入依赖的package包/类
@Test
public void testDefaults() throws Exception {

  SolrConfig sc = new SolrConfig(new SolrResourceLoader("solr/collection1"), "solrconfig-defaults.xml", null);
  SolrIndexConfig sic = sc.indexConfig;
  assertEquals("default ramBufferSizeMB", 100.0D, sic.ramBufferSizeMB, 0.0D);
  assertEquals("default LockType", SolrIndexConfig.LOCK_TYPE_NATIVE, sic.lockType);
  assertEquals("default useCompoundFile", false, sic.useCompoundFile);

  IndexSchema indexSchema = IndexSchemaFactory.buildIndexSchema("schema.xml", solrConfig);
  IndexWriterConfig iwc = sic.toIndexWriterConfig(indexSchema);

  assertNotNull("null mp", iwc.getMergePolicy());
  assertTrue("mp is not TMP", iwc.getMergePolicy() instanceof TieredMergePolicy);

  assertNotNull("null ms", iwc.getMergeScheduler());
  assertTrue("ms is not CMS", iwc.getMergeScheduler() instanceof ConcurrentMergeScheduler);
}
 
开发者ID:europeana,项目名称:search,代码行数:19,代码来源:TestConfig.java

示例7: testLegacyMergePolicyConfig

import org.apache.lucene.index.TieredMergePolicy; //导入依赖的package包/类
public void testLegacyMergePolicyConfig() throws Exception {
  final boolean expectCFS 
    = Boolean.parseBoolean(System.getProperty("useCompoundFile"));

  initCore("solrconfig-mergepolicy-legacy.xml","schema-minimal.xml");
  IndexWriterConfig iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore().getLatestSchema());
  assertEquals(expectCFS, iwc.getUseCompoundFile());

  assertEquals("termIndexInteval", 256, iwc.getTermIndexInterval());

  TieredMergePolicy tieredMP = assertAndCast(TieredMergePolicy.class,
                                             iwc.getMergePolicy());

  assertEquals(7, tieredMP.getMaxMergeAtOnce());
  assertEquals(7.0D, tieredMP.getSegmentsPerTier(), 0.0D);
  assertEquals(expectCFS ? 1.0D : 0.0D, tieredMP.getNoCFSRatio(), 0.0D);

  assertCommitSomeNewDocs();
  assertCompoundSegments(h.getCore(), expectCFS);
}
 
开发者ID:europeana,项目名称:search,代码行数:21,代码来源:TestMergePolicyConfig.java

示例8: reduceOpenFiles

import org.apache.lucene.index.TieredMergePolicy; //导入依赖的package包/类
/** just tries to configure things to keep the open file
 * count lowish */
public static void reduceOpenFiles(IndexWriter w) {
  // keep number of open files lowish
  MergePolicy mp = w.getConfig().getMergePolicy();
  if (mp instanceof LogMergePolicy) {
    LogMergePolicy lmp = (LogMergePolicy) mp;
    lmp.setMergeFactor(Math.min(5, lmp.getMergeFactor()));
    lmp.setUseCompoundFile(true);
  } else if (mp instanceof TieredMergePolicy) {
    TieredMergePolicy tmp = (TieredMergePolicy) mp;
    tmp.setMaxMergeAtOnce(Math.min(5, tmp.getMaxMergeAtOnce()));
    tmp.setSegmentsPerTier(Math.min(5, tmp.getSegmentsPerTier()));
    tmp.setUseCompoundFile(true);
  }
  MergeScheduler ms = w.getConfig().getMergeScheduler();
  if (ms instanceof ConcurrentMergeScheduler) {
    ((ConcurrentMergeScheduler) ms).setMaxThreadCount(2);
    ((ConcurrentMergeScheduler) ms).setMaxMergeCount(3);
  }
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:22,代码来源:_TestUtil.java

示例9: testTieredMergePolicyConfig

import org.apache.lucene.index.TieredMergePolicy; //导入依赖的package包/类
public void testTieredMergePolicyConfig() throws Exception {
  IndexWriterConfig iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore().getSchema());
  MergePolicy mp = iwc.getMergePolicy();
  assertTrue(mp instanceof TieredMergePolicy);
  TieredMergePolicy tieredMP = (TieredMergePolicy) mp;

  // mp-specific setter
  assertEquals(19, tieredMP.getMaxMergeAtOnceExplicit());
  
  // make sure we apply compoundFile and mergeFactor
  assertEquals(false, tieredMP.getUseCompoundFile());
  assertEquals(7, tieredMP.getMaxMergeAtOnce());
  
  // make sure we overrode segmentsPerTier (split from maxMergeAtOnce out of mergeFactor)
  assertEquals(9D, tieredMP.getSegmentsPerTier(), 0.001);
  
  // make sure we overrode noCFSRatio (useless because we disabled useCompoundFile,
  // but just to make sure it works)
  assertEquals(1.0D, tieredMP.getNoCFSRatio(), 0.001);
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:21,代码来源:TestMergePolicyConfig.java

示例10: createShard

import org.apache.lucene.index.TieredMergePolicy; //导入依赖的package包/类
private static void createShard(Configuration configuration, int i, Path path, int totalShardCount)
    throws IOException {
  HdfsDirectory hdfsDirectory = new HdfsDirectory(configuration, path);
  IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new KeywordAnalyzer());
  TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
  mergePolicy.setUseCompoundFile(false);
  IndexWriter indexWriter = new IndexWriter(hdfsDirectory, conf);

  Partitioner<IntWritable, IntWritable> partitioner = new HashPartitioner<IntWritable, IntWritable>();
  int partition = partitioner.getPartition(new IntWritable(i), null, totalShardCount);
  assertEquals(i, partition);

  Document doc = getDoc(i);
  indexWriter.addDocument(doc);
  indexWriter.close();
}
 
开发者ID:apache,项目名称:incubator-blur,代码行数:17,代码来源:TableShardCountCollapserTest.java

示例11: newSortingMergePolicy

import org.apache.lucene.index.TieredMergePolicy; //导入依赖的package包/类
static MergePolicy newSortingMergePolicy(Sorter sorter) {
  // create a MP with a low merge factor so that many merges happen
  MergePolicy mp;
  if (random().nextBoolean()) {
    TieredMergePolicy tmp = newTieredMergePolicy(random());
    final int numSegs = _TestUtil.nextInt(random(), 3, 5);
    tmp.setSegmentsPerTier(numSegs);
    tmp.setMaxMergeAtOnce(_TestUtil.nextInt(random(), 2, numSegs));
    mp = tmp;
  } else {
    LogMergePolicy lmp = newLogMergePolicy(random());
    lmp.setMergeFactor(_TestUtil.nextInt(random(), 3, 5));
    mp = lmp;
  }
  // wrap it with a sorting mp
  return new SortingMergePolicy(mp, sorter);
}
 
开发者ID:jimaguere,项目名称:Maskana-Gestor-de-Conocimiento,代码行数:18,代码来源:TestSortingMergePolicy.java

示例12: testTieredMergePolicySettingsUpdate

import org.apache.lucene.index.TieredMergePolicy; //导入依赖的package包/类
public void testTieredMergePolicySettingsUpdate() throws IOException {
    IndexSettings indexSettings = indexSettings(Settings.EMPTY);
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d);

    indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d).build()));
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, 0.0d);

    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMbFrac(), 0);
    indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB)).build()));
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), 0.001);

    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE);
    indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1).build()));
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1);

    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT);
    indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING.getKey(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT - 1).build()));
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT-1);

    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), 0.0001);
    indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(), new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1)).build()));
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001);

    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getReclaimDeletesWeight(), MergePolicyConfig.DEFAULT_RECLAIM_DELETES_WEIGHT, 0);
    indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING.getKey(), MergePolicyConfig.DEFAULT_RECLAIM_DELETES_WEIGHT + 1).build()));
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getReclaimDeletesWeight(), MergePolicyConfig.DEFAULT_RECLAIM_DELETES_WEIGHT + 1, 0);

    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, 0);
    indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1).build()));
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1, 0);

    indexSettings.updateIndexMetaData(newIndexMeta("index", EMPTY_SETTINGS)); // see if defaults are restored
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d);
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), 0.00);
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE);
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT);
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001);
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getReclaimDeletesWeight(), MergePolicyConfig.DEFAULT_RECLAIM_DELETES_WEIGHT, 0);
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, 0);
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:41,代码来源:MergePolicySettingsTests.java

示例13: MergePolicyConfig

import org.apache.lucene.index.TieredMergePolicy; //导入依赖的package包/类
public MergePolicyConfig(ESLogger logger, Settings indexSettings) {
    this.logger = logger;
    this.noCFSRatio = parseNoCFSRatio(indexSettings.get(INDEX_COMPOUND_FORMAT, Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO)));
    double forceMergeDeletesPctAllowed = indexSettings.getAsDouble("index.merge.policy.expunge_deletes_allowed", DEFAULT_EXPUNGE_DELETES_ALLOWED); // percentage
    ByteSizeValue floorSegment = indexSettings.getAsBytesSize("index.merge.policy.floor_segment", DEFAULT_FLOOR_SEGMENT);
    int maxMergeAtOnce = indexSettings.getAsInt("index.merge.policy.max_merge_at_once", DEFAULT_MAX_MERGE_AT_ONCE);
    int maxMergeAtOnceExplicit = indexSettings.getAsInt("index.merge.policy.max_merge_at_once_explicit", DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT);
    // TODO is this really a good default number for max_merge_segment, what happens for large indices, won't they end up with many segments?
    ByteSizeValue maxMergedSegment = indexSettings.getAsBytesSize("index.merge.policy.max_merged_segment", DEFAULT_MAX_MERGED_SEGMENT);
    double segmentsPerTier = indexSettings.getAsDouble("index.merge.policy.segments_per_tier", DEFAULT_SEGMENTS_PER_TIER);
    double reclaimDeletesWeight = indexSettings.getAsDouble("index.merge.policy.reclaim_deletes_weight", DEFAULT_RECLAIM_DELETES_WEIGHT);
    this.mergesEnabled = indexSettings.getAsBoolean(INDEX_MERGE_ENABLED, true);
    if (mergesEnabled == false) {
        logger.warn("[{}] is set to false, this should only be used in tests and can cause serious problems in production environments", INDEX_MERGE_ENABLED);
    }
    maxMergeAtOnce = adjustMaxMergeAtOnceIfNeeded(maxMergeAtOnce, segmentsPerTier);
    mergePolicy.setNoCFSRatio(noCFSRatio);
    mergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed);
    mergePolicy.setFloorSegmentMB(floorSegment.mbFrac());
    mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce);
    mergePolicy.setMaxMergeAtOnceExplicit(maxMergeAtOnceExplicit);
    mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.mbFrac());
    mergePolicy.setSegmentsPerTier(segmentsPerTier);
    mergePolicy.setReclaimDeletesWeight(reclaimDeletesWeight);
    logger.debug("using [tiered] merge mergePolicy with expunge_deletes_allowed[{}], floor_segment[{}], max_merge_at_once[{}], max_merge_at_once_explicit[{}], max_merged_segment[{}], segments_per_tier[{}], reclaim_deletes_weight[{}]",
            forceMergeDeletesPctAllowed, floorSegment, maxMergeAtOnce, maxMergeAtOnceExplicit, maxMergedSegment, segmentsPerTier, reclaimDeletesWeight);
}
 
开发者ID:baidu,项目名称:Elasticsearch,代码行数:28,代码来源:MergePolicyConfig.java

示例14: getInstance

import org.apache.lucene.index.TieredMergePolicy; //导入依赖的package包/类
@Override
public MergePolicy getInstance(Map<String, String> config) throws IOException {
  TieredMergePolicy mergePolicy = new TieredMergePolicy();

  if (config.containsKey(SEGMENTS_PER_TIER)) {
    mergePolicy.setSegmentsPerTier(Double.valueOf(config.get(SEGMENTS_PER_TIER)));
  }
  if (config.containsKey(MAX_MERGE_AT_ONCE)) {
    mergePolicy.setMaxMergeAtOnce(Integer.valueOf(config.get(MAX_MERGE_AT_ONCE)));
  }
  return mergePolicy;
}
 
开发者ID:XiaoMi,项目名称:linden,代码行数:13,代码来源:TieredMergePolicyFactory.java

示例15: beforeClass

import org.apache.lucene.index.TieredMergePolicy; //导入依赖的package包/类
@BeforeClass
public static void beforeClass() throws Exception {
  savedFactory = System.getProperty("solr.DirectoryFactory");
  System.setProperty("solr.directoryFactory", "org.apache.solr.core.MockFSDirectoryFactory");
  System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
  System.setProperty("solr.tests.mergePolicy", TieredMergePolicy.class.getName());
  initCore("solrconfig.xml", "schema12.xml");
}
 
开发者ID:europeana,项目名称:search,代码行数:9,代码来源:DirectUpdateHandlerTest.java


注:本文中的org.apache.lucene.index.TieredMergePolicy类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。