本文整理汇总了Java中org.apache.hadoop.hbase.HTableDescriptor.setCompactionEnabled方法的典型用法代码示例。如果您正苦于以下问题:Java HTableDescriptor.setCompactionEnabled方法的具体用法?Java HTableDescriptor.setCompactionEnabled怎么用?Java HTableDescriptor.setCompactionEnabled使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.HTableDescriptor
的用法示例。
在下文中一共展示了HTableDescriptor.setCompactionEnabled方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testRecoveryAndDoubleExecutionOnline
import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testRecoveryAndDoubleExecutionOnline() throws Exception {
final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOnline");
final String cf2 = "cf2";
final String cf3 = "cf3";
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
// create the table
HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
procExec, tableName, null, "cf1", cf3);
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
// Modify multiple properties of the table.
HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true;
htd.setCompactionEnabled(newCompactionEnableOption);
htd.addFamily(new HColumnDescriptor(cf2));
htd.removeFamily(cf3.getBytes());
// Start the Modify procedure && kill the executor
long procId = procExec.submitProcedure(
new ModifyTableProcedure(procExec.getEnvironment(), htd), nonceGroup, nonce);
// Restart the executor and execute the step twice
int numberOfSteps = ModifyTableState.values().length;
MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps,
ModifyTableState.values());
// Validate descriptor
HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
assertEquals(newCompactionEnableOption, currentHtd.isCompactionEnabled());
assertEquals(2, currentHtd.getFamiliesKeys().size());
assertTrue(currentHtd.hasFamily(cf2.getBytes()));
assertFalse(currentHtd.hasFamily(cf3.getBytes()));
// cf2 should be added cf3 should be removed
MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
tableName, regions, "cf1", cf2);
}
示例2: testRollbackAndDoubleExecutionOnline
import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testRollbackAndDoubleExecutionOnline() throws Exception {
final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
final String familyName = "cf2";
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
// create the table
HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
procExec, tableName, null, "cf1");
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true;
htd.setCompactionEnabled(newCompactionEnableOption);
htd.addFamily(new HColumnDescriptor(familyName));
// Start the Modify procedure && kill the executor
long procId = procExec.submitProcedure(
new ModifyTableProcedure(procExec.getEnvironment(), htd), nonceGroup, nonce);
// Restart the executor and rollback the step twice
int numberOfSteps = ModifyTableState.values().length - 4; // failing in the middle of proc
MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
procExec,
procId,
numberOfSteps,
ModifyTableState.values());
// cf2 should not be present
MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
tableName, regions, "cf1");
}
示例3: testRollbackAndDoubleExecutionOffline
import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testRollbackAndDoubleExecutionOffline() throws Exception {
final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
final String familyName = "cf2";
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
// create the table
HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
procExec, tableName, null, "cf1");
UTIL.getHBaseAdmin().disableTable(tableName);
ProcedureTestingUtility.waitNoProcedureRunning(procExec);
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true;
htd.setCompactionEnabled(newCompactionEnableOption);
htd.addFamily(new HColumnDescriptor(familyName));
htd.setRegionReplication(3);
// Start the Modify procedure && kill the executor
long procId = procExec.submitProcedure(
new ModifyTableProcedure(procExec.getEnvironment(), htd), nonceGroup, nonce);
// Restart the executor and rollback the step twice
int numberOfSteps = ModifyTableState.values().length - 4; // failing in the middle of proc
MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
procExec,
procId,
numberOfSteps,
ModifyTableState.values());
// cf2 should not be present
MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
tableName, regions, "cf1");
}
示例4: testRollbackAndDoubleExecutionAfterPONR
import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testRollbackAndDoubleExecutionAfterPONR() throws Exception {
final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecutionAfterPONR");
final String familyToAddName = "cf2";
final String familyToRemove = "cf1";
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
// create the table
HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
procExec, tableName, null, familyToRemove);
UTIL.getHBaseAdmin().disableTable(tableName);
ProcedureTestingUtility.waitNoProcedureRunning(procExec);
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
htd.setCompactionEnabled(!htd.isCompactionEnabled());
htd.addFamily(new HColumnDescriptor(familyToAddName));
htd.removeFamily(familyToRemove.getBytes());
htd.setRegionReplication(3);
// Start the Modify procedure && kill the executor
long procId = procExec.submitProcedure(
new ModifyTableProcedure(procExec.getEnvironment(), htd), nonceGroup, nonce);
// Failing after MODIFY_TABLE_DELETE_FS_LAYOUT we should not trigger the rollback.
// NOTE: the 5 (number of MODIFY_TABLE_DELETE_FS_LAYOUT + 1 step) is hardcoded,
// so you have to look at this test at least once when you add a new step.
int numberOfSteps = 5;
MasterProcedureTestingUtility.testRollbackAndDoubleExecutionAfterPONR(
procExec,
procId,
numberOfSteps,
ModifyTableState.values());
// "cf2" should be added and "cf1" should be removed
MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
tableName, regions, false, familyToAddName);
}
示例5: testRecoveryAndDoubleExecutionOffline
import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testRecoveryAndDoubleExecutionOffline() throws Exception {
final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOffline");
final String cf2 = "cf2";
final String cf3 = "cf3";
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
// create the table
HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
procExec, tableName, null, "cf1", cf3);
UTIL.getHBaseAdmin().disableTable(tableName);
ProcedureTestingUtility.waitNoProcedureRunning(procExec);
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
// Modify multiple properties of the table.
HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true;
htd.setCompactionEnabled(newCompactionEnableOption);
htd.addFamily(new HColumnDescriptor(cf2));
htd.removeFamily(cf3.getBytes());
htd.setRegionReplication(3);
// Start the Modify procedure && kill the executor
long procId = procExec.submitProcedure(
new ModifyTableProcedure(procExec.getEnvironment(), htd), nonceGroup, nonce);
// Restart the executor and execute the step twice
int numberOfSteps = ModifyTableState.values().length;
MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(
procExec,
procId,
numberOfSteps,
ModifyTableState.values());
// Validate descriptor
HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
assertEquals(newCompactionEnableOption, currentHtd.isCompactionEnabled());
assertEquals(2, currentHtd.getFamiliesKeys().size());
// cf2 should be added cf3 should be removed
MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
tableName, regions, false, "cf1", cf2);
}
示例6: testThroughputTuning
import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
/**
* Test the tuning task of {@link PressureAwareCompactionThroughputController}
*/
@Test
public void testThroughputTuning() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
conf.setLong(
PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND,
20L * 1024 * 1024);
conf.setLong(
PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND,
10L * 1024 * 1024);
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 4);
conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 6);
conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
PressureAwareCompactionThroughputController.class.getName());
conf.setInt(
PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD,
1000);
TEST_UTIL.startMiniCluster(1);
Connection conn = ConnectionFactory.createConnection(conf);
try {
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor(family));
htd.setCompactionEnabled(false);
TEST_UTIL.getHBaseAdmin().createTable(htd);
TEST_UTIL.waitTableAvailable(tableName);
HRegionServer regionServer = TEST_UTIL.getRSForFirstRegionInTable(tableName);
PressureAwareCompactionThroughputController throughputController =
(PressureAwareCompactionThroughputController) regionServer.compactSplitThread
.getCompactionThroughputController();
assertEquals(10L * 1024 * 1024, throughputController.maxThroughput, EPSILON);
Table table = conn.getTable(tableName);
for (int i = 0; i < 5; i++) {
table.put(new Put(Bytes.toBytes(i)).add(family, qualifier, new byte[0]));
TEST_UTIL.flush(tableName);
}
Thread.sleep(2000);
assertEquals(15L * 1024 * 1024, throughputController.maxThroughput, EPSILON);
table.put(new Put(Bytes.toBytes(5)).add(family, qualifier, new byte[0]));
TEST_UTIL.flush(tableName);
Thread.sleep(2000);
assertEquals(20L * 1024 * 1024, throughputController.maxThroughput, EPSILON);
table.put(new Put(Bytes.toBytes(6)).add(family, qualifier, new byte[0]));
TEST_UTIL.flush(tableName);
Thread.sleep(2000);
assertEquals(Double.MAX_VALUE, throughputController.maxThroughput, EPSILON);
conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
NoLimitCompactionThroughputController.class.getName());
regionServer.compactSplitThread.onConfigurationChange(conf);
assertTrue(throughputController.isStopped());
assertTrue(regionServer.compactSplitThread.getCompactionThroughputController() instanceof NoLimitCompactionThroughputController);
} finally {
conn.close();
TEST_UTIL.shutdownMiniCluster();
}
}
示例7: testGetCompactionPressureForStripedStore
import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
/**
* Test the logic that we calculate compaction pressure for a striped store.
*/
@Test
public void testGetCompactionPressureForStripedStore() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, StripeStoreEngine.class.getName());
conf.setBoolean(StripeStoreConfig.FLUSH_TO_L0_KEY, false);
conf.setInt(StripeStoreConfig.INITIAL_STRIPE_COUNT_KEY, 2);
conf.setInt(StripeStoreConfig.MIN_FILES_KEY, 4);
conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 12);
TEST_UTIL.startMiniCluster(1);
Connection conn = ConnectionFactory.createConnection(conf);
try {
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor(family));
htd.setCompactionEnabled(false);
TEST_UTIL.getHBaseAdmin().createTable(htd);
TEST_UTIL.waitTableAvailable(tableName);
HStore store = (HStore) getStoreWithName(tableName);
assertEquals(0, store.getStorefilesCount());
assertEquals(0.0, store.getCompactionPressure(), EPSILON);
Table table = conn.getTable(tableName);
for (int i = 0; i < 4; i++) {
table.put(new Put(Bytes.toBytes(i)).add(family, qualifier, new byte[0]));
table.put(new Put(Bytes.toBytes(100 + i)).add(family, qualifier, new byte[0]));
TEST_UTIL.flush(tableName);
}
assertEquals(8, store.getStorefilesCount());
assertEquals(0.0, store.getCompactionPressure(), EPSILON);
table.put(new Put(Bytes.toBytes(4)).add(family, qualifier, new byte[0]));
table.put(new Put(Bytes.toBytes(104)).add(family, qualifier, new byte[0]));
TEST_UTIL.flush(tableName);
assertEquals(10, store.getStorefilesCount());
assertEquals(0.5, store.getCompactionPressure(), EPSILON);
table.put(new Put(Bytes.toBytes(5)).add(family, qualifier, new byte[0]));
table.put(new Put(Bytes.toBytes(105)).add(family, qualifier, new byte[0]));
TEST_UTIL.flush(tableName);
assertEquals(12, store.getStorefilesCount());
assertEquals(1.0, store.getCompactionPressure(), EPSILON);
table.put(new Put(Bytes.toBytes(6)).add(family, qualifier, new byte[0]));
table.put(new Put(Bytes.toBytes(106)).add(family, qualifier, new byte[0]));
TEST_UTIL.flush(tableName);
assertEquals(14, store.getStorefilesCount());
assertEquals(2.0, store.getCompactionPressure(), EPSILON);
} finally {
conn.close();
TEST_UTIL.shutdownMiniCluster();
}
}