当前位置: 首页>>代码示例>>Java>>正文


Java LogMergePolicy.setMergeFactor方法代码示例

本文整理汇总了Java中org.apache.lucene.index.LogMergePolicy.setMergeFactor方法的典型用法代码示例。如果您正苦于以下问题:Java LogMergePolicy.setMergeFactor方法的具体用法?Java LogMergePolicy.setMergeFactor怎么用?Java LogMergePolicy.setMergeFactor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.lucene.index.LogMergePolicy的用法示例。


在下文中一共展示了LogMergePolicy.setMergeFactor方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: newSortingMergePolicy

import org.apache.lucene.index.LogMergePolicy; //导入方法依赖的package包/类
static MergePolicy newSortingMergePolicy(Sort sort) {
  // usually create a MP with a low merge factor so that many merges happen
  MergePolicy mp;
  int thingToDo = random().nextInt(3);
  if (thingToDo == 0) {
    TieredMergePolicy tmp = newTieredMergePolicy(random());
    final int numSegs = TestUtil.nextInt(random(), 3, 5);
    tmp.setSegmentsPerTier(numSegs);
    tmp.setMaxMergeAtOnce(TestUtil.nextInt(random(), 2, numSegs));
    mp = tmp;
  } else if (thingToDo == 1) {
    LogMergePolicy lmp = newLogMergePolicy(random());
    lmp.setMergeFactor(TestUtil.nextInt(random(), 3, 5));
    mp = lmp;
  } else {
    // just a regular random one from LTC (could be alcoholic etc)
    mp = newMergePolicy();
  }
  // wrap it with a sorting mp
  return new SortingMergePolicy(mp, sort);
}
 
开发者ID:europeana,项目名称:search,代码行数:22,代码来源:TestSortingMergePolicy.java

示例2: reduceOpenFiles

import org.apache.lucene.index.LogMergePolicy; //导入方法依赖的package包/类
/** just tries to configure things to keep the open file
 * count lowish */
public static void reduceOpenFiles(IndexWriter w) {
  // keep number of open files lowish
  MergePolicy mp = w.getConfig().getMergePolicy();
  if (mp instanceof LogMergePolicy) {
    LogMergePolicy lmp = (LogMergePolicy) mp;
    lmp.setMergeFactor(Math.min(5, lmp.getMergeFactor()));
    lmp.setNoCFSRatio(1.0);
  } else if (mp instanceof TieredMergePolicy) {
    TieredMergePolicy tmp = (TieredMergePolicy) mp;
    tmp.setMaxMergeAtOnce(Math.min(5, tmp.getMaxMergeAtOnce()));
    tmp.setSegmentsPerTier(Math.min(5, tmp.getSegmentsPerTier()));
    tmp.setNoCFSRatio(1.0);
  }
  MergeScheduler ms = w.getConfig().getMergeScheduler();
  if (ms instanceof ConcurrentMergeScheduler) {
    // wtf... shouldnt it be even lower since its 1 by default?!?!
    ((ConcurrentMergeScheduler) ms).setMaxMergesAndThreads(3, 2);
  }
}
 
开发者ID:europeana,项目名称:search,代码行数:22,代码来源:TestUtil.java

示例3: testSubclassConcurrentMergeScheduler

import org.apache.lucene.index.LogMergePolicy; //导入方法依赖的package包/类
public void testSubclassConcurrentMergeScheduler() throws IOException {
  MockDirectoryWrapper dir = newMockDirectory();
  dir.failOn(new FailOnlyOnMerge());

  Document doc = new Document();
  Field idField = newStringField("id", "", Field.Store.YES);
  doc.add(idField);
  
  IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
      .setMergeScheduler(new MyMergeScheduler())
      .setMaxBufferedDocs(2).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
      .setMergePolicy(newLogMergePolicy()));
  LogMergePolicy logMP = (LogMergePolicy) writer.getConfig().getMergePolicy();
  logMP.setMergeFactor(10);
  for(int i=0;i<20;i++)
    writer.addDocument(doc);

  ((MyMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
  writer.close();
  
  assertTrue(mergeThreadCreated);
  assertTrue(mergeCalled);
  assertTrue(excCalled);
  dir.close();
}
 
开发者ID:europeana,项目名称:search,代码行数:26,代码来源:TestMergeSchedulerExternal.java

示例4: reduceOpenFiles

import org.apache.lucene.index.LogMergePolicy; //导入方法依赖的package包/类
/** just tries to configure things to keep the open file
 * count lowish */
public static void reduceOpenFiles(IndexWriter w) {
  // keep number of open files lowish
  MergePolicy mp = w.getConfig().getMergePolicy();
  if (mp instanceof LogMergePolicy) {
    LogMergePolicy lmp = (LogMergePolicy) mp;
    lmp.setMergeFactor(Math.min(5, lmp.getMergeFactor()));
    lmp.setUseCompoundFile(true);
  } else if (mp instanceof TieredMergePolicy) {
    TieredMergePolicy tmp = (TieredMergePolicy) mp;
    tmp.setMaxMergeAtOnce(Math.min(5, tmp.getMaxMergeAtOnce()));
    tmp.setSegmentsPerTier(Math.min(5, tmp.getSegmentsPerTier()));
    tmp.setUseCompoundFile(true);
  }
  MergeScheduler ms = w.getConfig().getMergeScheduler();
  if (ms instanceof ConcurrentMergeScheduler) {
    ((ConcurrentMergeScheduler) ms).setMaxThreadCount(2);
    ((ConcurrentMergeScheduler) ms).setMaxMergeCount(3);
  }
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:22,代码来源:_TestUtil.java

示例5: testSubclassConcurrentMergeScheduler

import org.apache.lucene.index.LogMergePolicy; //导入方法依赖的package包/类
public void testSubclassConcurrentMergeScheduler() throws IOException {
  MockDirectoryWrapper dir = newMockDirectory();
  dir.failOn(new FailOnlyOnMerge());

  Document doc = new Document();
  Field idField = newStringField("id", "", Field.Store.YES);
  doc.add(idField);
  
  IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
      TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergeScheduler(new MyMergeScheduler())
      .setMaxBufferedDocs(2).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
      .setMergePolicy(newLogMergePolicy()));
  LogMergePolicy logMP = (LogMergePolicy) writer.getConfig().getMergePolicy();
  logMP.setMergeFactor(10);
  for(int i=0;i<20;i++)
    writer.addDocument(doc);

  ((MyMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
  writer.close();
  
  assertTrue(mergeThreadCreated);
  assertTrue(mergeCalled);
  assertTrue(excCalled);
  dir.close();
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:26,代码来源:TestMergeSchedulerExternal.java

示例6: newSortingMergePolicy

import org.apache.lucene.index.LogMergePolicy; //导入方法依赖的package包/类
static MergePolicy newSortingMergePolicy(Sorter sorter) {
  // create a MP with a low merge factor so that many merges happen
  MergePolicy mp;
  if (random().nextBoolean()) {
    TieredMergePolicy tmp = newTieredMergePolicy(random());
    final int numSegs = _TestUtil.nextInt(random(), 3, 5);
    tmp.setSegmentsPerTier(numSegs);
    tmp.setMaxMergeAtOnce(_TestUtil.nextInt(random(), 2, numSegs));
    mp = tmp;
  } else {
    LogMergePolicy lmp = newLogMergePolicy(random());
    lmp.setMergeFactor(_TestUtil.nextInt(random(), 3, 5));
    mp = lmp;
  }
  // wrap it with a sorting mp
  return new SortingMergePolicy(mp, sorter);
}
 
开发者ID:jimaguere,项目名称:Maskana-Gestor-de-Conocimiento,代码行数:18,代码来源:TestSortingMergePolicy.java

示例7: getIndexWriter

import org.apache.lucene.index.LogMergePolicy; //导入方法依赖的package包/类
public static IndexWriter getIndexWriter(String indexPath, boolean create) throws IOException {
    Directory dir = FSDirectory.open(Paths.get(indexPath));
    Analyzer analyzer = new SmartChineseAnalyzer();
    IndexWriterConfig iwc = new IndexWriterConfig(analyzer);
    LogMergePolicy mergePolicy = new LogByteSizeMergePolicy();
    mergePolicy.setMergeFactor(50);
    mergePolicy.setMaxMergeDocs(5000);
    if (create){
        iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    } else {
        iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
    }
    return new IndexWriter(dir, iwc);
}
 
开发者ID:neal1991,项目名称:everywhere,代码行数:15,代码来源:IndexUtil.java

示例8: newLogMergePolicy

import org.apache.lucene.index.LogMergePolicy; //导入方法依赖的package包/类
public static LogMergePolicy newLogMergePolicy(Random r) {
  LogMergePolicy logmp = r.nextBoolean() ? new LogDocMergePolicy() : new LogByteSizeMergePolicy();
  logmp.setCalibrateSizeByDeletes(r.nextBoolean());
  if (rarely(r)) {
    logmp.setMergeFactor(TestUtil.nextInt(r, 2, 9));
  } else {
    logmp.setMergeFactor(TestUtil.nextInt(r, 10, 50));
  }
  configureRandom(r, logmp);
  return logmp;
}
 
开发者ID:europeana,项目名称:search,代码行数:12,代码来源:LuceneTestCase.java

示例9: clearWorkflowInstances

import org.apache.lucene.index.LogMergePolicy; //导入方法依赖的package包/类
@Override
public synchronized boolean clearWorkflowInstances() throws InstanceRepositoryException {
  IndexWriter writer = null;
  try {
      IndexWriterConfig config = new IndexWriterConfig(new StandardAnalyzer());
      config.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
      LogMergePolicy lmp =new LogDocMergePolicy();
      lmp.setMergeFactor(mergeFactor);
      config.setMergePolicy(lmp);

      writer = new IndexWriter(indexDir, config);
      LOG.log(Level.FINE,
              "LuceneWorkflowEngine: remove all workflow instances");
      writer.deleteDocuments(new Term("myfield", "myvalue"));
  } catch (IOException e) {
      LOG.log(Level.SEVERE, e.getMessage());
      LOG
              .log(Level.WARNING,
                      "Exception removing workflow instances from index: Message: "
                              + e.getMessage());
      throw new InstanceRepositoryException(e.getMessage());
  } finally {
    if (writer != null){
      try{
        writer.close();
      }
      catch(Exception ignore){}
      
      writer = null;
    }

  }
  
  return true;
}
 
开发者ID:apache,项目名称:oodt,代码行数:36,代码来源:LuceneWorkflowInstanceRepository.java

示例10: testOpenIfChangedManySegments

import org.apache.lucene.index.LogMergePolicy; //导入方法依赖的package包/类
@Test
public void testOpenIfChangedManySegments() throws Exception {
  // test openIfChanged() when the taxonomy contains many segments
  Directory dir = newDirectory();
  
  DirectoryTaxonomyWriter writer = new DirectoryTaxonomyWriter(dir) {
    @Override
    protected IndexWriterConfig createIndexWriterConfig(OpenMode openMode) {
      IndexWriterConfig conf = super.createIndexWriterConfig(openMode);
      LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
      lmp.setMergeFactor(2);
      return conf;
    }
  };
  TaxonomyReader reader = new DirectoryTaxonomyReader(writer);
  
  int numRounds = random().nextInt(10) + 10;
  int numCategories = 1; // one for root
  for (int i = 0; i < numRounds; i++) {
    int numCats = random().nextInt(4) + 1;
    for (int j = 0; j < numCats; j++) {
      writer.addCategory(new FacetLabel(Integer.toString(i), Integer.toString(j)));
    }
    numCategories += numCats + 1 /* one for round-parent */;
    TaxonomyReader newtr = TaxonomyReader.openIfChanged(reader);
    assertNotNull(newtr);
    reader.close();
    reader = newtr;
    
    // assert categories
    assertEquals(numCategories, reader.getSize());
    int roundOrdinal = reader.getOrdinal(new FacetLabel(Integer.toString(i)));
    int[] parents = reader.getParallelTaxonomyArrays().parents();
    assertEquals(0, parents[roundOrdinal]); // round's parent is root
    for (int j = 0; j < numCats; j++) {
      int ord = reader.getOrdinal(new FacetLabel(Integer.toString(i), Integer.toString(j)));
      assertEquals(roundOrdinal, parents[ord]); // round's parent is root
    }
  }

  reader.close();
  writer.close();
  dir.close();
}
 
开发者ID:europeana,项目名称:search,代码行数:45,代码来源:TestDirectoryTaxonomyReader.java

示例11: testOpenIfChangedManySegments

import org.apache.lucene.index.LogMergePolicy; //导入方法依赖的package包/类
@Test
public void testOpenIfChangedManySegments() throws Exception {
  // test openIfChanged() when the taxonomy contains many segments
  Directory dir = newDirectory();
  
  DirectoryTaxonomyWriter writer = new DirectoryTaxonomyWriter(dir) {
    @Override
    protected IndexWriterConfig createIndexWriterConfig(OpenMode openMode) {
      IndexWriterConfig conf = super.createIndexWriterConfig(openMode);
      LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
      lmp.setMergeFactor(2);
      return conf;
    }
  };
  TaxonomyReader reader = new DirectoryTaxonomyReader(writer);
  
  int numRounds = random().nextInt(10) + 10;
  int numCategories = 1; // one for root
  for (int i = 0; i < numRounds; i++) {
    int numCats = random().nextInt(4) + 1;
    for (int j = 0; j < numCats; j++) {
      writer.addCategory(new CategoryPath(Integer.toString(i), Integer.toString(j)));
    }
    numCategories += numCats + 1 /* one for round-parent */;
    TaxonomyReader newtr = TaxonomyReader.openIfChanged(reader);
    assertNotNull(newtr);
    reader.close();
    reader = newtr;
    
    // assert categories
    assertEquals(numCategories, reader.getSize());
    int roundOrdinal = reader.getOrdinal(new CategoryPath(Integer.toString(i)));
    int[] parents = reader.getParallelTaxonomyArrays().parents();
    assertEquals(0, parents[roundOrdinal]); // round's parent is root
    for (int j = 0; j < numCats; j++) {
      int ord = reader.getOrdinal(new CategoryPath(Integer.toString(i), Integer.toString(j)));
      assertEquals(roundOrdinal, parents[ord]); // round's parent is root
    }
  }

  reader.close();
  writer.close();
  dir.close();
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:45,代码来源:TestDirectoryTaxonomyReader.java


注:本文中的org.apache.lucene.index.LogMergePolicy.setMergeFactor方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。