当前位置: 首页>>代码示例>>Java>>正文


Java RatioBasedCompactionPolicy类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy的典型用法代码示例。如果您正苦于以下问题:Java RatioBasedCompactionPolicy类的具体用法?Java RatioBasedCompactionPolicy怎么用?Java RatioBasedCompactionPolicy使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


RatioBasedCompactionPolicy类属于org.apache.hadoop.hbase.regionserver.compactions包,在下文中一共展示了RatioBasedCompactionPolicy类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testCompactionEmptyHFile

import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; //导入依赖的package包/类
public void testCompactionEmptyHFile() throws IOException {
  // Set TTL
  ScanInfo oldScanInfo = store.getScanInfo();
  ScanInfo newScanInfo = new ScanInfo(oldScanInfo.getConfiguration(), oldScanInfo.getFamily(),
      oldScanInfo.getMinVersions(), oldScanInfo.getMaxVersions(), 600,
      oldScanInfo.getKeepDeletedCells(), oldScanInfo.getTimeToPurgeDeletes(),
      oldScanInfo.getComparator());
  store.setScanInfo(newScanInfo);
  // Do not compact empty store file
  List<StoreFile> candidates = sfCreate(0);
  for (StoreFile file : candidates) {
    if (file instanceof MockStoreFile) {
      MockStoreFile mockFile = (MockStoreFile) file;
      mockFile.setTimeRangeTracker(new TimeRangeTracker(-1, -1));
      mockFile.setEntries(0);
    }
  }
  // Test Default compactions
  CompactionRequest result = ((RatioBasedCompactionPolicy) store.storeEngine
      .getCompactionPolicy()).selectCompaction(candidates,
      new ArrayList<StoreFile>(), false, false, false);
  assertTrue(result.getFiles().size() == 0);
  store.setScanInfo(oldScanInfo);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestDefaultCompactSelection.java

示例2: testCompactionEmptyHFile

import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; //导入依赖的package包/类
public void testCompactionEmptyHFile() throws IOException {
  // Set TTL
  ScanInfo oldScanInfo = store.getScanInfo();
  ScanInfo newScanInfo = new ScanInfo(oldScanInfo.getFamily(),
      oldScanInfo.getMinVersions(), oldScanInfo.getMaxVersions(), 600,
      oldScanInfo.getKeepDeletedCells(), oldScanInfo.getTimeToPurgeDeletes(),
      oldScanInfo.getComparator());
  store.setScanInfo(newScanInfo);
  // Do not compact empty store file
  List<StoreFile> candidates = sfCreate(0);
  for (StoreFile file : candidates) {
    if (file instanceof MockStoreFile) {
      MockStoreFile mockFile = (MockStoreFile) file;
      mockFile.setTimeRangeTracker(new TimeRangeTracker(-1, -1));
      mockFile.setEntries(0);
    }
  }
  // Test Default compactions
  CompactionRequest result = ((RatioBasedCompactionPolicy) store.storeEngine
      .getCompactionPolicy()).selectCompaction(candidates,
      new ArrayList<StoreFile>(), false, false, false);
  assertTrue(result.getFiles().size() == 0);
  store.setScanInfo(oldScanInfo);
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:25,代码来源:TestDefaultCompactSelection.java

示例3: testCompactionEmptyHFile

import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; //导入依赖的package包/类
@Test
public void testCompactionEmptyHFile() throws IOException {
  // Set TTL
  ScanInfo oldScanInfo = store.getScanInfo();
  ScanInfo newScanInfo = oldScanInfo.customize(oldScanInfo.getMaxVersions(), 600,
      oldScanInfo.getKeepDeletedCells());
  store.setScanInfo(newScanInfo);
  // Do not compact empty store file
  List<HStoreFile> candidates = sfCreate(0);
  for (HStoreFile file : candidates) {
    if (file instanceof MockHStoreFile) {
      MockHStoreFile mockFile = (MockHStoreFile) file;
      mockFile.setTimeRangeTracker(TimeRangeTracker.create(TimeRangeTracker.Type.SYNC, -1, -1));
      mockFile.setEntries(0);
    }
  }
  // Test Default compactions
  CompactionRequestImpl result = ((RatioBasedCompactionPolicy) store.storeEngine
      .getCompactionPolicy()).selectCompaction(candidates,
      new ArrayList<>(), false, false, false);
  Assert.assertTrue(result.getFiles().isEmpty());
  store.setScanInfo(oldScanInfo);
}
 
开发者ID:apache,项目名称:hbase,代码行数:24,代码来源:TestDefaultCompactSelection.java

示例4: compactEquals

import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; //导入依赖的package包/类
void compactEquals(List<StoreFile> candidates, boolean forcemajor, boolean isOffPeak,
    long ... expected)
throws IOException {
  store.forceMajor = forcemajor;
  //Test Default compactions
  CompactionRequest result = ((RatioBasedCompactionPolicy)store.storeEngine.getCompactionPolicy())
      .selectCompaction(candidates, new ArrayList<StoreFile>(), false, isOffPeak, forcemajor);
  List<StoreFile> actual = new ArrayList<StoreFile>(result.getFiles());
  if (isOffPeak && !forcemajor) {
    assertTrue(result.isOffPeak());
  }
  assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual)));
  store.forceMajor = false;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestDefaultCompactSelection.java

示例5: compactEquals

import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; //导入依赖的package包/类
void compactEquals(List<HStoreFile> candidates, boolean forcemajor, boolean isOffPeak,
    long... expected) throws IOException {
  store.forceMajor = forcemajor;
  // Test Default compactions
  CompactionRequestImpl result =
      ((RatioBasedCompactionPolicy) store.storeEngine.getCompactionPolicy()).selectCompaction(
        candidates, new ArrayList<>(), false, isOffPeak, forcemajor);
  List<HStoreFile> actual = new ArrayList<>(result.getFiles());
  if (isOffPeak && !forcemajor) {
    Assert.assertTrue(result.isOffPeak());
  }
  Assert.assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual)));
  store.forceMajor = false;
}
 
开发者ID:apache,项目名称:hbase,代码行数:15,代码来源:TestCompactionPolicy.java

示例6: testTimeBasedMajorCompaction

import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; //导入依赖的package包/类
@Test
public void testTimeBasedMajorCompaction() throws Exception {
  // create 2 storefiles and force a major compaction to reset the time
  int delay = 10 * 1000; // 10 sec
  float jitterPct = 0.20f; // 20%
  conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, delay);
  conf.setFloat("hbase.hregion.majorcompaction.jitter", jitterPct);

  HStore s = ((HStore) r.getStore(COLUMN_FAMILY));
  s.storeEngine.getCompactionPolicy().setConf(conf);
  try {
    createStoreFile(r);
    createStoreFile(r);
    r.compact(true);

    // add one more file & verify that a regular compaction won't work
    createStoreFile(r);
    r.compact(false);
    assertEquals(2, s.getStorefilesCount());

    // ensure that major compaction time is deterministic
    RatioBasedCompactionPolicy
        c = (RatioBasedCompactionPolicy)s.storeEngine.getCompactionPolicy();
    Collection<StoreFile> storeFiles = s.getStorefiles();
    long mcTime = c.getNextMajorCompactTime(storeFiles);
    for (int i = 0; i < 10; ++i) {
      assertEquals(mcTime, c.getNextMajorCompactTime(storeFiles));
    }

    // ensure that the major compaction time is within the variance
    long jitter = Math.round(delay * jitterPct);
    assertTrue(delay - jitter <= mcTime && mcTime <= delay + jitter);

    // wait until the time-based compaction interval
    Thread.sleep(mcTime);

    // trigger a compaction request and ensure that it's upgraded to major
    r.compact(false);
    assertEquals(1, s.getStorefilesCount());
  } finally {
    // reset the timed compaction settings
    conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24);
    conf.setFloat("hbase.hregion.majorcompaction.jitter", 0.20F);
    // run a major to reset the cache
    createStoreFile(r);
    r.compact(true);
    assertEquals(1, s.getStorefilesCount());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:50,代码来源:TestMajorCompaction.java

示例7: testTimeBasedMajorCompaction

import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; //导入依赖的package包/类
@Test
public void testTimeBasedMajorCompaction() throws Exception {
  // create 2 storefiles and force a major compaction to reset the time
  int delay = 10 * 1000; // 10 sec
  float jitterPct = 0.20f; // 20%
  conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, delay);
  conf.setFloat("hbase.hregion.majorcompaction.jitter", jitterPct);

  HStore s = ((HStore) r.getStore(COLUMN_FAMILY));
  s.storeEngine.getCompactionPolicy().setConf(conf);
  try {
    createStoreFile(r);
    createStoreFile(r);
    r.compactStores(true);

    // add one more file & verify that a regular compaction won't work
    createStoreFile(r);
    r.compactStores(false);
    assertEquals(2, s.getStorefilesCount());

    // ensure that major compaction time is deterministic
    RatioBasedCompactionPolicy
        c = (RatioBasedCompactionPolicy)s.storeEngine.getCompactionPolicy();
    Collection<StoreFile> storeFiles = s.getStorefiles();
    long mcTime = c.getNextMajorCompactTime(storeFiles);
    for (int i = 0; i < 10; ++i) {
      assertEquals(mcTime, c.getNextMajorCompactTime(storeFiles));
    }

    // ensure that the major compaction time is within the variance
    long jitter = Math.round(delay * jitterPct);
    assertTrue(delay - jitter <= mcTime && mcTime <= delay + jitter);

    // wait until the time-based compaction interval
    Thread.sleep(mcTime);

    // trigger a compaction request and ensure that it's upgraded to major
    r.compactStores(false);
    assertEquals(1, s.getStorefilesCount());
  } finally {
    // reset the timed compaction settings
    conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24);
    conf.setFloat("hbase.hregion.majorcompaction.jitter", 0.20F);
    // run a major to reset the cache
    createStoreFile(r);
    r.compactStores(true);
    assertEquals(1, s.getStorefilesCount());
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:50,代码来源:TestMajorCompaction.java

示例8: testTimeBasedMajorCompaction

import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; //导入依赖的package包/类
@Test
public void testTimeBasedMajorCompaction() throws Exception {
  // create 2 storefiles and force a major compaction to reset the time
  int delay = 10 * 1000; // 10 sec
  float jitterPct = 0.20f; // 20%
  conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, delay);
  conf.setFloat("hbase.hregion.majorcompaction.jitter", jitterPct);

  HStore s = ((HStore) r.getStore(COLUMN_FAMILY));
  s.storeEngine.getCompactionPolicy().setConf(conf);
  try {
    createStoreFile(r);
    createStoreFile(r);
    r.compact(true);

    // add one more file & verify that a regular compaction won't work
    createStoreFile(r);
    r.compact(false);
    assertEquals(2, s.getStorefilesCount());

    // ensure that major compaction time is deterministic
    RatioBasedCompactionPolicy
        c = (RatioBasedCompactionPolicy)s.storeEngine.getCompactionPolicy();
    Collection<HStoreFile> storeFiles = s.getStorefiles();
    long mcTime = c.getNextMajorCompactTime(storeFiles);
    for (int i = 0; i < 10; ++i) {
      assertEquals(mcTime, c.getNextMajorCompactTime(storeFiles));
    }

    // ensure that the major compaction time is within the variance
    long jitter = Math.round(delay * jitterPct);
    assertTrue(delay - jitter <= mcTime && mcTime <= delay + jitter);

    // wait until the time-based compaction interval
    Thread.sleep(mcTime);

    // trigger a compaction request and ensure that it's upgraded to major
    r.compact(false);
    assertEquals(1, s.getStorefilesCount());
  } finally {
    // reset the timed compaction settings
    conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24);
    conf.setFloat("hbase.hregion.majorcompaction.jitter", 0.20F);
    // run a major to reset the cache
    createStoreFile(r);
    r.compact(true);
    assertEquals(1, s.getStorefilesCount());
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:50,代码来源:TestMajorCompaction.java


注:本文中的org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。