當前位置: 首頁>>代碼示例>>Java>>正文


Java HTableDescriptor.setCompactionEnabled方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.HTableDescriptor.setCompactionEnabled方法的典型用法代碼示例。如果您正苦於以下問題:Java HTableDescriptor.setCompactionEnabled方法的具體用法?Java HTableDescriptor.setCompactionEnabled怎麽用?Java HTableDescriptor.setCompactionEnabled使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.HTableDescriptor的用法示例。


在下文中一共展示了HTableDescriptor.setCompactionEnabled方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testRecoveryAndDoubleExecutionOnline

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test(timeout = 60000)
public void testRecoveryAndDoubleExecutionOnline() throws Exception {
  final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOnline");
  final String cf2 = "cf2";
  final String cf3 = "cf3";
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();

  // create the table
  HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
    procExec, tableName, null, "cf1", cf3);

  ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);

  // Modify multiple properties of the table.
  HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
  boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true;
  htd.setCompactionEnabled(newCompactionEnableOption);
  htd.addFamily(new HColumnDescriptor(cf2));
  htd.removeFamily(cf3.getBytes());

  // Start the Modify procedure && kill the executor
  long procId = procExec.submitProcedure(
    new ModifyTableProcedure(procExec.getEnvironment(), htd), nonceGroup, nonce);

  // Restart the executor and execute the step twice
  int numberOfSteps = ModifyTableState.values().length;
  MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps,
    ModifyTableState.values());

  // Validate descriptor
  HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
  assertEquals(newCompactionEnableOption, currentHtd.isCompactionEnabled());
  assertEquals(2, currentHtd.getFamiliesKeys().size());
  assertTrue(currentHtd.hasFamily(cf2.getBytes()));
  assertFalse(currentHtd.hasFamily(cf3.getBytes()));

  // cf2 should be added cf3 should be removed
  MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
    tableName, regions, "cf1", cf2);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:41,代碼來源:TestModifyTableProcedure.java

示例2: testRollbackAndDoubleExecutionOnline

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test(timeout = 60000)
public void testRollbackAndDoubleExecutionOnline() throws Exception {
  final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
  final String familyName = "cf2";
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();

  // create the table
  HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
    procExec, tableName, null, "cf1");

  ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);

  HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
  boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true;
  htd.setCompactionEnabled(newCompactionEnableOption);
  htd.addFamily(new HColumnDescriptor(familyName));

  // Start the Modify procedure && kill the executor
  long procId = procExec.submitProcedure(
    new ModifyTableProcedure(procExec.getEnvironment(), htd), nonceGroup, nonce);

  // Restart the executor and rollback the step twice
  int numberOfSteps = ModifyTableState.values().length - 4; // failing in the middle of proc
  MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
    procExec,
    procId,
    numberOfSteps,
    ModifyTableState.values());

  // cf2 should not be present
  MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
    tableName, regions, "cf1");
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:34,代碼來源:TestModifyTableProcedure.java

示例3: testRollbackAndDoubleExecutionOffline

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test(timeout = 60000)
public void testRollbackAndDoubleExecutionOffline() throws Exception {
  final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
  final String familyName = "cf2";
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();

  // create the table
  HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
    procExec, tableName, null, "cf1");
  UTIL.getHBaseAdmin().disableTable(tableName);

  ProcedureTestingUtility.waitNoProcedureRunning(procExec);
  ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);

  HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
  boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true;
  htd.setCompactionEnabled(newCompactionEnableOption);
  htd.addFamily(new HColumnDescriptor(familyName));
  htd.setRegionReplication(3);

  // Start the Modify procedure && kill the executor
  long procId = procExec.submitProcedure(
    new ModifyTableProcedure(procExec.getEnvironment(), htd), nonceGroup, nonce);

  // Restart the executor and rollback the step twice
  int numberOfSteps = ModifyTableState.values().length - 4; // failing in the middle of proc
  MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
    procExec,
    procId,
    numberOfSteps,
    ModifyTableState.values());

  // cf2 should not be present
  MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
    tableName, regions, "cf1");
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:37,代碼來源:TestModifyTableProcedure.java

示例4: testRollbackAndDoubleExecutionAfterPONR

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test(timeout = 60000)
public void testRollbackAndDoubleExecutionAfterPONR() throws Exception {
  final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecutionAfterPONR");
  final String familyToAddName = "cf2";
  final String familyToRemove = "cf1";
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();

  // create the table
  HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
    procExec, tableName, null, familyToRemove);
  UTIL.getHBaseAdmin().disableTable(tableName);

  ProcedureTestingUtility.waitNoProcedureRunning(procExec);
  ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);

  HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
  htd.setCompactionEnabled(!htd.isCompactionEnabled());
  htd.addFamily(new HColumnDescriptor(familyToAddName));
  htd.removeFamily(familyToRemove.getBytes());
  htd.setRegionReplication(3);

  // Start the Modify procedure && kill the executor
  long procId = procExec.submitProcedure(
    new ModifyTableProcedure(procExec.getEnvironment(), htd), nonceGroup, nonce);

  // Failing after MODIFY_TABLE_DELETE_FS_LAYOUT we should not trigger the rollback.
  // NOTE: the 5 (number of MODIFY_TABLE_DELETE_FS_LAYOUT + 1 step) is hardcoded,
  //       so you have to look at this test at least once when you add a new step.
  int numberOfSteps = 5;
  MasterProcedureTestingUtility.testRollbackAndDoubleExecutionAfterPONR(
    procExec,
    procId,
    numberOfSteps,
    ModifyTableState.values());

  // "cf2" should be added and "cf1" should be removed
  MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
    tableName, regions, false, familyToAddName);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:40,代碼來源:TestModifyTableProcedure.java

示例5: testRecoveryAndDoubleExecutionOffline

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test(timeout=60000)
public void testRecoveryAndDoubleExecutionOffline() throws Exception {
  final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOffline");
  final String cf2 = "cf2";
  final String cf3 = "cf3";
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();

  // create the table
  HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
    procExec, tableName, null, "cf1", cf3);
  UTIL.getHBaseAdmin().disableTable(tableName);

  ProcedureTestingUtility.waitNoProcedureRunning(procExec);
  ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);

  // Modify multiple properties of the table.
  HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
  boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true;
  htd.setCompactionEnabled(newCompactionEnableOption);
  htd.addFamily(new HColumnDescriptor(cf2));
  htd.removeFamily(cf3.getBytes());
  htd.setRegionReplication(3);

  // Start the Modify procedure && kill the executor
  long procId = procExec.submitProcedure(
    new ModifyTableProcedure(procExec.getEnvironment(), htd), nonceGroup, nonce);

  // Restart the executor and execute the step twice
  int numberOfSteps = ModifyTableState.values().length;
  MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(
    procExec,
    procId,
    numberOfSteps,
    ModifyTableState.values());

  // Validate descriptor
  HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
  assertEquals(newCompactionEnableOption, currentHtd.isCompactionEnabled());
  assertEquals(2, currentHtd.getFamiliesKeys().size());

  // cf2 should be added cf3 should be removed
  MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
    tableName, regions, false, "cf1", cf2);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:45,代碼來源:TestModifyTableProcedure.java

示例6: testThroughputTuning

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * Test the tuning task of {@link PressureAwareCompactionThroughputController}
 */
@Test
public void testThroughputTuning() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
  conf.setLong(
    PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND,
    20L * 1024 * 1024);
  conf.setLong(
    PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND,
    10L * 1024 * 1024);
  conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 4);
  conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 6);
  conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
    PressureAwareCompactionThroughputController.class.getName());
  conf.setInt(
    PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD,
    1000);
  TEST_UTIL.startMiniCluster(1);
  Connection conn = ConnectionFactory.createConnection(conf);
  try {
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor(family));
    htd.setCompactionEnabled(false);
    TEST_UTIL.getHBaseAdmin().createTable(htd);
    TEST_UTIL.waitTableAvailable(tableName);
    HRegionServer regionServer = TEST_UTIL.getRSForFirstRegionInTable(tableName);
    PressureAwareCompactionThroughputController throughputController =
        (PressureAwareCompactionThroughputController) regionServer.compactSplitThread
            .getCompactionThroughputController();
    assertEquals(10L * 1024 * 1024, throughputController.maxThroughput, EPSILON);
    Table table = conn.getTable(tableName);
    for (int i = 0; i < 5; i++) {
      table.put(new Put(Bytes.toBytes(i)).add(family, qualifier, new byte[0]));
      TEST_UTIL.flush(tableName);
    }
    Thread.sleep(2000);
    assertEquals(15L * 1024 * 1024, throughputController.maxThroughput, EPSILON);

    table.put(new Put(Bytes.toBytes(5)).add(family, qualifier, new byte[0]));
    TEST_UTIL.flush(tableName);
    Thread.sleep(2000);
    assertEquals(20L * 1024 * 1024, throughputController.maxThroughput, EPSILON);

    table.put(new Put(Bytes.toBytes(6)).add(family, qualifier, new byte[0]));
    TEST_UTIL.flush(tableName);
    Thread.sleep(2000);
    assertEquals(Double.MAX_VALUE, throughputController.maxThroughput, EPSILON);

    conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
      NoLimitCompactionThroughputController.class.getName());
    regionServer.compactSplitThread.onConfigurationChange(conf);
    assertTrue(throughputController.isStopped());
    assertTrue(regionServer.compactSplitThread.getCompactionThroughputController() instanceof NoLimitCompactionThroughputController);
  } finally {
    conn.close();
    TEST_UTIL.shutdownMiniCluster();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:62,代碼來源:TestCompactionWithThroughputController.java

示例7: testGetCompactionPressureForStripedStore

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * Test the logic that we calculate compaction pressure for a striped store.
 */
@Test
public void testGetCompactionPressureForStripedStore() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, StripeStoreEngine.class.getName());
  conf.setBoolean(StripeStoreConfig.FLUSH_TO_L0_KEY, false);
  conf.setInt(StripeStoreConfig.INITIAL_STRIPE_COUNT_KEY, 2);
  conf.setInt(StripeStoreConfig.MIN_FILES_KEY, 4);
  conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 12);
  TEST_UTIL.startMiniCluster(1);
  Connection conn = ConnectionFactory.createConnection(conf);
  try {
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor(family));
    htd.setCompactionEnabled(false);
    TEST_UTIL.getHBaseAdmin().createTable(htd);
    TEST_UTIL.waitTableAvailable(tableName);
    HStore store = (HStore) getStoreWithName(tableName);
    assertEquals(0, store.getStorefilesCount());
    assertEquals(0.0, store.getCompactionPressure(), EPSILON);
    Table table = conn.getTable(tableName);
    for (int i = 0; i < 4; i++) {
      table.put(new Put(Bytes.toBytes(i)).add(family, qualifier, new byte[0]));
      table.put(new Put(Bytes.toBytes(100 + i)).add(family, qualifier, new byte[0]));
      TEST_UTIL.flush(tableName);
    }
    assertEquals(8, store.getStorefilesCount());
    assertEquals(0.0, store.getCompactionPressure(), EPSILON);

    table.put(new Put(Bytes.toBytes(4)).add(family, qualifier, new byte[0]));
    table.put(new Put(Bytes.toBytes(104)).add(family, qualifier, new byte[0]));
    TEST_UTIL.flush(tableName);
    assertEquals(10, store.getStorefilesCount());
    assertEquals(0.5, store.getCompactionPressure(), EPSILON);

    table.put(new Put(Bytes.toBytes(5)).add(family, qualifier, new byte[0]));
    table.put(new Put(Bytes.toBytes(105)).add(family, qualifier, new byte[0]));
    TEST_UTIL.flush(tableName);
    assertEquals(12, store.getStorefilesCount());
    assertEquals(1.0, store.getCompactionPressure(), EPSILON);

    table.put(new Put(Bytes.toBytes(6)).add(family, qualifier, new byte[0]));
    table.put(new Put(Bytes.toBytes(106)).add(family, qualifier, new byte[0]));
    TEST_UTIL.flush(tableName);
    assertEquals(14, store.getStorefilesCount());
    assertEquals(2.0, store.getCompactionPressure(), EPSILON);
  } finally {
    conn.close();
    TEST_UTIL.shutdownMiniCluster();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:54,代碼來源:TestCompactionWithThroughputController.java


注:本文中的org.apache.hadoop.hbase.HTableDescriptor.setCompactionEnabled方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。