当前位置: 首页>>代码示例>>Java>>正文


Java HBaseAdmin.majorCompact方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.HBaseAdmin.majorCompact方法的典型用法代码示例。如果您正苦于以下问题:Java HBaseAdmin.majorCompact方法的具体用法?Java HBaseAdmin.majorCompact怎么用?Java HBaseAdmin.majorCompact使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.HBaseAdmin的用法示例。


在下文中一共展示了HBaseAdmin.majorCompact方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: compactAndWait

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
private void compactAndWait() throws IOException, InterruptedException {
  LOG.debug("Compacting table " + tableName);
  HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
  HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
  admin.majorCompact(tableName);

  // Waiting for the compaction to start, at least .5s.
  final long maxWaitime = System.currentTimeMillis() + 500;
  boolean cont;
  do {
    cont = rs.compactSplitThread.getCompactionQueueSize() == 0;
    Threads.sleep(1);
  } while (cont && System.currentTimeMillis() < maxWaitime);

  while (rs.compactSplitThread.getCompactionQueueSize() > 0) {
    Threads.sleep(1);
  }
  LOG.debug("Compaction queue size reached 0, continuing");
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestChangingEncoding.java

示例2: perform

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
@Override
public void perform() throws Exception {
  HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
  HBaseAdmin admin = util.getHBaseAdmin();
  boolean major = RandomUtils.nextInt(100) < majorRatio;

  LOG.info("Performing action: Compact table " + tableName + ", major=" + major);
  try {
    if (major) {
      admin.majorCompact(tableNameBytes);
    } else {
      admin.compact(tableNameBytes);
    }
  } catch (Exception ex) {
    LOG.warn("Compaction failed, might be caused by other chaos: " + ex.getMessage());
  }
  if (sleepTime > 0) {
    Thread.sleep(sleepTime);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:21,代码来源:CompactTableAction.java

示例3: loadTest

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
@Test(timeout=TIMEOUT_MS)
public void loadTest() throws Exception {
  HBaseAdmin admin = new HBaseAdmin(conf);

  compression = Compression.Algorithm.GZ; // used for table setup
  super.loadTest();

  HColumnDescriptor hcd = getColumnDesc(admin);
  System.err.println("\nDisabling encode-on-disk. Old column descriptor: " + hcd + "\n");
  HTable t = new HTable(this.conf, TABLE);
  assertAllOnLine(t);

  admin.disableTable(TABLE);
  admin.modifyColumn(TABLE, hcd);

  System.err.println("\nRe-enabling table\n");
  admin.enableTable(TABLE);

  System.err.println("\nNew column descriptor: " +
      getColumnDesc(admin) + "\n");

  // The table may not have all regions on line yet.  Assert online before
  // moving to major compact.
  assertAllOnLine(t);

  System.err.println("\nCompacting the table\n");
  admin.majorCompact(TABLE.getName());
  // Wait until compaction completes
  Threads.sleepWithoutInterrupt(5000);
  HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
  while (rs.compactSplitThread.getCompactionQueueSize() > 0) {
    Threads.sleep(50);
  }

  System.err.println("\nDone with the test, shutting down the cluster\n");
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:37,代码来源:TestLoadAndSwitchEncodeOnDisk.java

示例4: compactAndBlockUntilDone

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
public static void compactAndBlockUntilDone(HBaseAdmin admin, HRegionServer rs, byte[] regionName)
    throws IOException, InterruptedException {
  log("Compacting region: " + Bytes.toStringBinary(regionName));
  admin.majorCompact(regionName);
  log("blocking until compaction is complete: " + Bytes.toStringBinary(regionName));
  Threads.sleepWithoutInterrupt(500);
  while (rs.compactSplitThread.getCompactionQueueSize() > 0) {
    Threads.sleep(50);
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:11,代码来源:TestEndToEndSplitTransaction.java

示例5: loadTest

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
@Test(timeout=TIMEOUT_MS)
public void loadTest() throws Exception {
  HBaseAdmin admin = new HBaseAdmin(conf);

  compression = Compression.Algorithm.GZ; // used for table setup
  super.loadTest();

  HColumnDescriptor hcd = getColumnDesc(admin);
  System.err.println("\nDisabling encode-on-disk. Old column descriptor: " +
      hcd + "\n");
  admin.disableTable(TABLE);
  hcd.setEncodeOnDisk(false);
  admin.modifyColumn(TABLE, hcd);

  System.err.println("\nRe-enabling table\n");
  admin.enableTable(TABLE);

  System.err.println("\nNew column descriptor: " +
      getColumnDesc(admin) + "\n");

  System.err.println("\nCompacting the table\n");
  admin.majorCompact(TABLE);
  // Wait until compaction completes
  Threads.sleepWithoutInterrupt(5000);
  HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
  while (rs.compactSplitThread.getCompactionQueueSize() > 0) {
    Threads.sleep(50);
  }

  System.err.println("\nDone with the test, shutting down the cluster\n");
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:32,代码来源:TestLoadAndSwitchEncodeOnDisk.java

示例6: compactTable

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
private void compactTable(String tableName) 
throws IOException, InterruptedException
   {
AbstractHBaseUtils hbaseUtils = dt.getHbaseUtils();
HBaseAdmin admin = new HBaseAdmin(hbaseUtils.getHbcfg());
       admin.flush(tableName);
       admin.majorCompact(tableName);
admin.close();
   }
 
开发者ID:dkmfbk,项目名称:knowledgestore,代码行数:10,代码来源:HBaseLowlevelUtilities.java

示例7: perform

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
@Override
public void perform() throws Exception {
  HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
  HBaseAdmin admin = util.getHBaseAdmin();
  boolean major = RandomUtils.nextInt(100) < majorRatio;

  LOG.info("Performing action: Compact random region of table "
    + tableName + ", major=" + major);
  List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes);
  if (regions == null || regions.isEmpty()) {
    LOG.info("Table " + tableName + " doesn't have regions to compact");
    return;
  }

  HRegionInfo region = PolicyBasedChaosMonkey.selectRandomItem(
    regions.toArray(new HRegionInfo[regions.size()]));

  try {
    if (major) {
      LOG.debug("Major compacting region " + region.getRegionNameAsString());
      admin.majorCompact(region.getRegionName());
    } else {
      LOG.debug("Compacting region " + region.getRegionNameAsString());
      admin.compact(region.getRegionName());
    }
  } catch (Exception ex) {
    LOG.warn("Compaction failed, might be caused by other chaos: " + ex.getMessage());
  }
  if (sleepTime > 0) {
    Thread.sleep(sleepTime);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:33,代码来源:CompactRandomRegionOfTableAction.java

示例8: testExcludeAllFromMinorCompaction

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
/**
 * This test is to test the scenario happened in HBASE-6901.
 * All files are bulk loaded and excluded from minor compaction.
 * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException
 * will be thrown.
 */
@Ignore ("Flakey: See HBASE-9051") @Test
public void testExcludeAllFromMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);

  try {
    util.setJobWithoutMRCluster();
    util.startMiniCluster();
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = HStore.getStoreHomedir(
        FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        admin.getTableRegions(TABLE_NAME).get(0),
        FAMILIES[0]);
    assertEquals(0, fs.listStatus(storePath).length);

    // Generate two bulk load files
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);

    for (int i = 0; i < 2; i++) {
      Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
      runIncrementalPELoad(conf, table, testDir);
      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
    }

    // Ensure data shows up
    int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME.getName());
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME.getName());
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:72,代码来源:TestHFileOutputFormat.java

示例9: testExcludeMinorCompaction

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
@Test
public void testExcludeMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);

  try {
    util.setJobWithoutMRCluster();
    util.startMiniCluster();
    Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
    final FileSystem fs = util.getTestFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = HStore.getStoreHomedir(
        FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        admin.getTableRegions(TABLE_NAME).get(0),
        FAMILIES[0]);
    assertEquals(0, fs.listStatus(storePath).length);

    // put some data in it and flush to create a storefile
    Put p = new Put(Bytes.toBytes("test"));
    p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
    table.put(p);
    admin.flush(TABLE_NAME.getName());
    assertEquals(1, util.countRows(table));
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

    // Generate a bulk load file with more rows
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);
    runIncrementalPELoad(conf, table, testDir);

    // Perform the actual load
    new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);

    // Ensure data shows up
    int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows + 1, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME.getName());
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME.getName());
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:76,代码来源:TestHFileOutputFormat.java

示例10: testExcludeAllFromMinorCompaction

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
/**
 * This test is to test the scenario happened in HBASE-6901.
 * All files are bulk loaded and excluded from minor compaction.
 * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException
 * will be thrown.
 */
@Test
public void testExcludeAllFromMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);

  try {
    util.startMiniCluster();
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = Store.getStoreHomedir(
        HTableDescriptor.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
        FAMILIES[0]);
    assertEquals(0, fs.listStatus(storePath).length);

    // Generate two bulk load files
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);
    util.startMiniMapReduceCluster();

    for (int i = 0; i < 2; i++) {
      Path testDir = util.getDataTestDir("testExcludeAllFromMinorCompaction_" + i);
      runIncrementalPELoad(conf, table, testDir);
      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
    }

    // Ensure data shows up
    int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME);
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME);
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniMapReduceCluster();
    util.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:73,代码来源:TestHFileOutputFormat.java

示例11: testExcludeMinorCompaction

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
@Test
public void testExcludeMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  Path testDir = util.getDataTestDir("testExcludeMinorCompaction");
  generateRandomStartKeys(5);

  try {
    util.startMiniCluster();
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = Store.getStoreHomedir(
        HTableDescriptor.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
        FAMILIES[0]);
    assertEquals(0, fs.listStatus(storePath).length);

    // put some data in it and flush to create a storefile
    Put p = new Put(Bytes.toBytes("test"));
    p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
    table.put(p);
    admin.flush(TABLE_NAME);
    assertEquals(1, util.countRows(table));
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

    // Generate a bulk load file with more rows
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);
    util.startMiniMapReduceCluster();
    runIncrementalPELoad(conf, table, testDir);

    // Perform the actual load
    new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);

    // Ensure data shows up
    int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows + 1, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME);
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME);
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniMapReduceCluster();
    util.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:77,代码来源:TestHFileOutputFormat.java

示例12: testExcludeAllFromMinorCompaction

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
/**
 * This test is to test the scenario happened in HBASE-6901.
 * All files are bulk loaded and excluded from minor compaction.
 * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException
 * will be thrown.
 */
@Ignore ("Flakey: See HBASE-9051") @Test
public void testExcludeAllFromMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);

  try {
    util.startMiniCluster();
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = new Path(
      FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        new Path(admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
          Bytes.toString(FAMILIES[0])));
    assertEquals(0, fs.listStatus(storePath).length);

    // Generate two bulk load files
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);
    util.startMiniMapReduceCluster();

    for (int i = 0; i < 2; i++) {
      Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
      runIncrementalPELoad(conf, table, testDir);
      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
    }

    // Ensure data shows up
    int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME.getName());
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME.getName());
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniMapReduceCluster();
    util.shutdownMiniCluster();
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:73,代码来源:TestHFileOutputFormat2.java

示例13: testExcludeMinorCompaction

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
@Test
public void testExcludeMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);

  try {
    util.startMiniCluster();
    Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = new Path(
      FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        new Path(admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
          Bytes.toString(FAMILIES[0])));
    assertEquals(0, fs.listStatus(storePath).length);

    // put some data in it and flush to create a storefile
    Put p = new Put(Bytes.toBytes("test"));
    p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
    table.put(p);
    admin.flush(TABLE_NAME.getName());
    assertEquals(1, util.countRows(table));
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

    // Generate a bulk load file with more rows
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);
    util.startMiniMapReduceCluster();
    runIncrementalPELoad(conf, table, testDir);

    // Perform the actual load
    new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);

    // Ensure data shows up
    int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows + 1, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME.getName());
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME.getName());
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniMapReduceCluster();
    util.shutdownMiniCluster();
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:77,代码来源:TestHFileOutputFormat2.java

示例14: testExcludeAllFromMinorCompaction

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
/**
 * This test is to test the scenario happened in HBASE-6901.
 * All files are bulk loaded and excluded from minor compaction.
 * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException
 * will be thrown.
 */
@Ignore ("Flakey: See HBASE-9051") @Test
public void testExcludeAllFromMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);

  try {
    util.startMiniCluster();
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = HStore.getStoreHomedir(
        FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        admin.getTableRegions(TABLE_NAME).get(0),
        FAMILIES[0]);
    assertEquals(0, fs.listStatus(storePath).length);

    // Generate two bulk load files
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);
    util.startMiniMapReduceCluster();

    for (int i = 0; i < 2; i++) {
      Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
      runIncrementalPELoad(conf, table, testDir);
      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
    }

    // Ensure data shows up
    int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME.getName());
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME.getName());
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniMapReduceCluster();
    util.shutdownMiniCluster();
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:73,代码来源:TestHFileOutputFormat.java

示例15: testExcludeMinorCompaction

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
@Test
public void testExcludeMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);

  try {
    util.startMiniCluster();
    Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = HStore.getStoreHomedir(
        FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        admin.getTableRegions(TABLE_NAME).get(0),
        FAMILIES[0]);
    assertEquals(0, fs.listStatus(storePath).length);

    // put some data in it and flush to create a storefile
    Put p = new Put(Bytes.toBytes("test"));
    p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
    table.put(p);
    admin.flush(TABLE_NAME.getName());
    assertEquals(1, util.countRows(table));
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

    // Generate a bulk load file with more rows
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);
    util.startMiniMapReduceCluster();
    runIncrementalPELoad(conf, table, testDir);

    // Perform the actual load
    new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);

    // Ensure data shows up
    int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows + 1, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME.getName());
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME.getName());
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniMapReduceCluster();
    util.shutdownMiniCluster();
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:77,代码来源:TestHFileOutputFormat.java


注:本文中的org.apache.hadoop.hbase.client.HBaseAdmin.majorCompact方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。