当前位置: 首页>>代码示例>>Java>>正文


Java HBaseAdmin.compact方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.HBaseAdmin.compact方法的典型用法代码示例。如果您正苦于以下问题:Java HBaseAdmin.compact方法的具体用法?Java HBaseAdmin.compact怎么用?Java HBaseAdmin.compact使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.HBaseAdmin的用法示例。


在下文中一共展示了HBaseAdmin.compact方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: perform

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
@Override
public void perform() throws Exception {
  HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
  HBaseAdmin admin = util.getHBaseAdmin();
  boolean major = RandomUtils.nextInt(100) < majorRatio;

  LOG.info("Performing action: Compact table " + tableName + ", major=" + major);
  try {
    if (major) {
      admin.majorCompact(tableNameBytes);
    } else {
      admin.compact(tableNameBytes);
    }
  } catch (Exception ex) {
    LOG.warn("Compaction failed, might be caused by other chaos: " + ex.getMessage());
  }
  if (sleepTime > 0) {
    Thread.sleep(sleepTime);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:21,代码来源:CompactTableAction.java

示例2: init

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
private HTable init(HBaseAdmin admin, long l, Scan scan, TableName tableName) throws Exception {
  HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
  Put put0 = new Put(Bytes.toBytes("row1"));
  put0.add(new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, Bytes
      .toBytes("version0")));
  table.put(put0);
  admin.flush(tableName);
  Put put1 = new Put(Bytes.toBytes("row2"));
  put1.add(new KeyValue(Bytes.toBytes("row2"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, Bytes
      .toBytes("version0")));
  table.put(put1);
  admin.flush(tableName);
  put0 = new Put(Bytes.toBytes("row1"));
  put0.add(new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, Bytes
      .toBytes("version1")));
  table.put(put0);
  admin.flush(tableName);
  admin.compact(tableName);

  ResultScanner scanner = table.getScanner(scan);
  Result result = scanner.next();
  List<KeyValue> kvs = result.getColumn(Bytes.toBytes("col"), Bytes.toBytes("q"));
  Assert.assertEquals(1, kvs.size());
  Assert.assertEquals("version1", Bytes.toString(kvs.get(0).getValue()));
  scanner.close();
  return table;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:28,代码来源:TestScannerWithBulkload.java

示例3: perform

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
@Override
public void perform() throws Exception {
  HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
  HBaseAdmin admin = util.getHBaseAdmin();
  boolean major = RandomUtils.nextInt(100) < majorRatio;

  LOG.info("Performing action: Compact random region of table "
    + tableName + ", major=" + major);
  List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes);
  if (regions == null || regions.isEmpty()) {
    LOG.info("Table " + tableName + " doesn't have regions to compact");
    return;
  }

  HRegionInfo region = PolicyBasedChaosMonkey.selectRandomItem(
    regions.toArray(new HRegionInfo[regions.size()]));

  try {
    if (major) {
      LOG.debug("Major compacting region " + region.getRegionNameAsString());
      admin.majorCompact(region.getRegionName());
    } else {
      LOG.debug("Compacting region " + region.getRegionNameAsString());
      admin.compact(region.getRegionName());
    }
  } catch (Exception ex) {
    LOG.warn("Compaction failed, might be caused by other chaos: " + ex.getMessage());
  }
  if (sleepTime > 0) {
    Thread.sleep(sleepTime);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:33,代码来源:CompactRandomRegionOfTableAction.java

示例4: testExcludeAllFromMinorCompaction

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
/**
 * This test is to test the scenario happened in HBASE-6901.
 * All files are bulk loaded and excluded from minor compaction.
 * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException
 * will be thrown.
 */
@Ignore ("Flakey: See HBASE-9051") @Test
public void testExcludeAllFromMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);

  try {
    util.setJobWithoutMRCluster();
    util.startMiniCluster();
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = HStore.getStoreHomedir(
        FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        admin.getTableRegions(TABLE_NAME).get(0),
        FAMILIES[0]);
    assertEquals(0, fs.listStatus(storePath).length);

    // Generate two bulk load files
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);

    for (int i = 0; i < 2; i++) {
      Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
      runIncrementalPELoad(conf, table, testDir);
      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
    }

    // Ensure data shows up
    int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME.getName());
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME.getName());
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:72,代码来源:TestHFileOutputFormat.java

示例5: testExcludeMinorCompaction

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
@Test
public void testExcludeMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);

  try {
    util.setJobWithoutMRCluster();
    util.startMiniCluster();
    Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
    final FileSystem fs = util.getTestFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = HStore.getStoreHomedir(
        FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        admin.getTableRegions(TABLE_NAME).get(0),
        FAMILIES[0]);
    assertEquals(0, fs.listStatus(storePath).length);

    // put some data in it and flush to create a storefile
    Put p = new Put(Bytes.toBytes("test"));
    p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
    table.put(p);
    admin.flush(TABLE_NAME.getName());
    assertEquals(1, util.countRows(table));
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

    // Generate a bulk load file with more rows
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);
    runIncrementalPELoad(conf, table, testDir);

    // Perform the actual load
    new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);

    // Ensure data shows up
    int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows + 1, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME.getName());
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME.getName());
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:76,代码来源:TestHFileOutputFormat.java

示例6: testExcludeAllFromMinorCompaction

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
/**
 * This test is to test the scenario happened in HBASE-6901.
 * All files are bulk loaded and excluded from minor compaction.
 * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException
 * will be thrown.
 */
@Test
public void testExcludeAllFromMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);

  try {
    util.startMiniCluster();
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = Store.getStoreHomedir(
        HTableDescriptor.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
        FAMILIES[0]);
    assertEquals(0, fs.listStatus(storePath).length);

    // Generate two bulk load files
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);
    util.startMiniMapReduceCluster();

    for (int i = 0; i < 2; i++) {
      Path testDir = util.getDataTestDir("testExcludeAllFromMinorCompaction_" + i);
      runIncrementalPELoad(conf, table, testDir);
      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
    }

    // Ensure data shows up
    int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME);
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME);
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniMapReduceCluster();
    util.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:73,代码来源:TestHFileOutputFormat.java

示例7: testExcludeMinorCompaction

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
@Test
public void testExcludeMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  Path testDir = util.getDataTestDir("testExcludeMinorCompaction");
  generateRandomStartKeys(5);

  try {
    util.startMiniCluster();
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = Store.getStoreHomedir(
        HTableDescriptor.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
        FAMILIES[0]);
    assertEquals(0, fs.listStatus(storePath).length);

    // put some data in it and flush to create a storefile
    Put p = new Put(Bytes.toBytes("test"));
    p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
    table.put(p);
    admin.flush(TABLE_NAME);
    assertEquals(1, util.countRows(table));
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

    // Generate a bulk load file with more rows
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);
    util.startMiniMapReduceCluster();
    runIncrementalPELoad(conf, table, testDir);

    // Perform the actual load
    new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);

    // Ensure data shows up
    int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows + 1, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME);
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME);
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniMapReduceCluster();
    util.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:77,代码来源:TestHFileOutputFormat.java

示例8: testExcludeAllFromMinorCompaction

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
/**
 * This test is to test the scenario happened in HBASE-6901.
 * All files are bulk loaded and excluded from minor compaction.
 * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException
 * will be thrown.
 */
@Ignore ("Flakey: See HBASE-9051") @Test
public void testExcludeAllFromMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);

  try {
    util.startMiniCluster();
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = new Path(
      FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        new Path(admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
          Bytes.toString(FAMILIES[0])));
    assertEquals(0, fs.listStatus(storePath).length);

    // Generate two bulk load files
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);
    util.startMiniMapReduceCluster();

    for (int i = 0; i < 2; i++) {
      Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
      runIncrementalPELoad(conf, table, testDir);
      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
    }

    // Ensure data shows up
    int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME.getName());
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME.getName());
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniMapReduceCluster();
    util.shutdownMiniCluster();
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:73,代码来源:TestHFileOutputFormat2.java

示例9: testExcludeMinorCompaction

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
@Test
public void testExcludeMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);

  try {
    util.startMiniCluster();
    Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = new Path(
      FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        new Path(admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
          Bytes.toString(FAMILIES[0])));
    assertEquals(0, fs.listStatus(storePath).length);

    // put some data in it and flush to create a storefile
    Put p = new Put(Bytes.toBytes("test"));
    p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
    table.put(p);
    admin.flush(TABLE_NAME.getName());
    assertEquals(1, util.countRows(table));
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

    // Generate a bulk load file with more rows
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);
    util.startMiniMapReduceCluster();
    runIncrementalPELoad(conf, table, testDir);

    // Perform the actual load
    new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);

    // Ensure data shows up
    int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows + 1, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME.getName());
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME.getName());
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniMapReduceCluster();
    util.shutdownMiniCluster();
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:77,代码来源:TestHFileOutputFormat2.java

示例10: testExcludeAllFromMinorCompaction

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
/**
 * This test is to test the scenario happened in HBASE-6901.
 * All files are bulk loaded and excluded from minor compaction.
 * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException
 * will be thrown.
 */
@Ignore ("Flakey: See HBASE-9051") @Test
public void testExcludeAllFromMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);

  try {
    util.startMiniCluster();
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = HStore.getStoreHomedir(
        FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        admin.getTableRegions(TABLE_NAME).get(0),
        FAMILIES[0]);
    assertEquals(0, fs.listStatus(storePath).length);

    // Generate two bulk load files
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);
    util.startMiniMapReduceCluster();

    for (int i = 0; i < 2; i++) {
      Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
      runIncrementalPELoad(conf, table, testDir);
      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
    }

    // Ensure data shows up
    int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME.getName());
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME.getName());
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniMapReduceCluster();
    util.shutdownMiniCluster();
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:73,代码来源:TestHFileOutputFormat.java

示例11: testExcludeMinorCompaction

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
@Test
public void testExcludeMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);

  try {
    util.startMiniCluster();
    Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = HStore.getStoreHomedir(
        FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        admin.getTableRegions(TABLE_NAME).get(0),
        FAMILIES[0]);
    assertEquals(0, fs.listStatus(storePath).length);

    // put some data in it and flush to create a storefile
    Put p = new Put(Bytes.toBytes("test"));
    p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
    table.put(p);
    admin.flush(TABLE_NAME.getName());
    assertEquals(1, util.countRows(table));
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

    // Generate a bulk load file with more rows
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);
    util.startMiniMapReduceCluster();
    runIncrementalPELoad(conf, table, testDir);

    // Perform the actual load
    new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);

    // Ensure data shows up
    int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows + 1, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME.getName());
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME.getName());
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniMapReduceCluster();
    util.shutdownMiniCluster();
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:77,代码来源:TestHFileOutputFormat.java


注:本文中的org.apache.hadoop.hbase.client.HBaseAdmin.compact方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。