本文整理匯總了Java中org.apache.hadoop.hbase.client.HBaseAdmin.majorCompact方法的典型用法代碼示例。如果您正苦於以下問題:Java HBaseAdmin.majorCompact方法的具體用法?Java HBaseAdmin.majorCompact怎麽用?Java HBaseAdmin.majorCompact使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.client.HBaseAdmin
的用法示例。
在下文中一共展示了HBaseAdmin.majorCompact方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: compactAndWait
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
private void compactAndWait() throws IOException, InterruptedException {
LOG.debug("Compacting table " + tableName);
HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
admin.majorCompact(tableName);
// Waiting for the compaction to start, at least .5s.
final long maxWaitime = System.currentTimeMillis() + 500;
boolean cont;
do {
cont = rs.compactSplitThread.getCompactionQueueSize() == 0;
Threads.sleep(1);
} while (cont && System.currentTimeMillis() < maxWaitime);
while (rs.compactSplitThread.getCompactionQueueSize() > 0) {
Threads.sleep(1);
}
LOG.debug("Compaction queue size reached 0, continuing");
}
示例2: perform
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Override
public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin();
boolean major = RandomUtils.nextInt(100) < majorRatio;
LOG.info("Performing action: Compact table " + tableName + ", major=" + major);
try {
if (major) {
admin.majorCompact(tableNameBytes);
} else {
admin.compact(tableNameBytes);
}
} catch (Exception ex) {
LOG.warn("Compaction failed, might be caused by other chaos: " + ex.getMessage());
}
if (sleepTime > 0) {
Thread.sleep(sleepTime);
}
}
示例3: loadTest
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Test(timeout=TIMEOUT_MS)
public void loadTest() throws Exception {
HBaseAdmin admin = new HBaseAdmin(conf);
compression = Compression.Algorithm.GZ; // used for table setup
super.loadTest();
HColumnDescriptor hcd = getColumnDesc(admin);
System.err.println("\nDisabling encode-on-disk. Old column descriptor: " + hcd + "\n");
HTable t = new HTable(this.conf, TABLE);
assertAllOnLine(t);
admin.disableTable(TABLE);
admin.modifyColumn(TABLE, hcd);
System.err.println("\nRe-enabling table\n");
admin.enableTable(TABLE);
System.err.println("\nNew column descriptor: " +
getColumnDesc(admin) + "\n");
// The table may not have all regions on line yet. Assert online before
// moving to major compact.
assertAllOnLine(t);
System.err.println("\nCompacting the table\n");
admin.majorCompact(TABLE.getName());
// Wait until compaction completes
Threads.sleepWithoutInterrupt(5000);
HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
while (rs.compactSplitThread.getCompactionQueueSize() > 0) {
Threads.sleep(50);
}
System.err.println("\nDone with the test, shutting down the cluster\n");
}
示例4: compactAndBlockUntilDone
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public static void compactAndBlockUntilDone(HBaseAdmin admin, HRegionServer rs, byte[] regionName)
throws IOException, InterruptedException {
log("Compacting region: " + Bytes.toStringBinary(regionName));
admin.majorCompact(regionName);
log("blocking until compaction is complete: " + Bytes.toStringBinary(regionName));
Threads.sleepWithoutInterrupt(500);
while (rs.compactSplitThread.getCompactionQueueSize() > 0) {
Threads.sleep(50);
}
}
示例5: loadTest
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Test(timeout=TIMEOUT_MS)
public void loadTest() throws Exception {
HBaseAdmin admin = new HBaseAdmin(conf);
compression = Compression.Algorithm.GZ; // used for table setup
super.loadTest();
HColumnDescriptor hcd = getColumnDesc(admin);
System.err.println("\nDisabling encode-on-disk. Old column descriptor: " +
hcd + "\n");
admin.disableTable(TABLE);
hcd.setEncodeOnDisk(false);
admin.modifyColumn(TABLE, hcd);
System.err.println("\nRe-enabling table\n");
admin.enableTable(TABLE);
System.err.println("\nNew column descriptor: " +
getColumnDesc(admin) + "\n");
System.err.println("\nCompacting the table\n");
admin.majorCompact(TABLE);
// Wait until compaction completes
Threads.sleepWithoutInterrupt(5000);
HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
while (rs.compactSplitThread.getCompactionQueueSize() > 0) {
Threads.sleep(50);
}
System.err.println("\nDone with the test, shutting down the cluster\n");
}
示例6: compactTable
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
private void compactTable(String tableName)
throws IOException, InterruptedException
{
AbstractHBaseUtils hbaseUtils = dt.getHbaseUtils();
HBaseAdmin admin = new HBaseAdmin(hbaseUtils.getHbcfg());
admin.flush(tableName);
admin.majorCompact(tableName);
admin.close();
}
示例7: perform
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Override
public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin();
boolean major = RandomUtils.nextInt(100) < majorRatio;
LOG.info("Performing action: Compact random region of table "
+ tableName + ", major=" + major);
List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes);
if (regions == null || regions.isEmpty()) {
LOG.info("Table " + tableName + " doesn't have regions to compact");
return;
}
HRegionInfo region = PolicyBasedChaosMonkey.selectRandomItem(
regions.toArray(new HRegionInfo[regions.size()]));
try {
if (major) {
LOG.debug("Major compacting region " + region.getRegionNameAsString());
admin.majorCompact(region.getRegionName());
} else {
LOG.debug("Compacting region " + region.getRegionNameAsString());
admin.compact(region.getRegionName());
}
} catch (Exception ex) {
LOG.warn("Compaction failed, might be caused by other chaos: " + ex.getMessage());
}
if (sleepTime > 0) {
Thread.sleep(sleepTime);
}
}
示例8: testExcludeAllFromMinorCompaction
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
/**
* This test is to test the scenario happened in HBASE-6901.
* All files are bulk loaded and excluded from minor compaction.
* Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException
* will be thrown.
*/
@Ignore ("Flakey: See HBASE-9051") @Test
public void testExcludeAllFromMinorCompaction() throws Exception {
Configuration conf = util.getConfiguration();
conf.setInt("hbase.hstore.compaction.min", 2);
generateRandomStartKeys(5);
try {
util.setJobWithoutMRCluster();
util.startMiniCluster();
final FileSystem fs = util.getDFSCluster().getFileSystem();
HBaseAdmin admin = new HBaseAdmin(conf);
HTable table = util.createTable(TABLE_NAME, FAMILIES);
assertEquals("Should start with empty table", 0, util.countRows(table));
// deep inspection: get the StoreFile dir
final Path storePath = HStore.getStoreHomedir(
FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
admin.getTableRegions(TABLE_NAME).get(0),
FAMILIES[0]);
assertEquals(0, fs.listStatus(storePath).length);
// Generate two bulk load files
conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
true);
for (int i = 0; i < 2; i++) {
Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
runIncrementalPELoad(conf, table, testDir);
// Perform the actual load
new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
}
// Ensure data shows up
int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
assertEquals("LoadIncrementalHFiles should put expected data in table",
expectedRows, util.countRows(table));
// should have a second StoreFile now
assertEquals(2, fs.listStatus(storePath).length);
// minor compactions shouldn't get rid of the file
admin.compact(TABLE_NAME.getName());
try {
quickPoll(new Callable<Boolean>() {
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
throw new IOException("SF# = " + fs.listStatus(storePath).length);
} catch (AssertionError ae) {
// this is expected behavior
}
// a major compaction should work though
admin.majorCompact(TABLE_NAME.getName());
quickPoll(new Callable<Boolean>() {
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
} finally {
util.shutdownMiniCluster();
}
}
示例9: testExcludeMinorCompaction
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Test
public void testExcludeMinorCompaction() throws Exception {
Configuration conf = util.getConfiguration();
conf.setInt("hbase.hstore.compaction.min", 2);
generateRandomStartKeys(5);
try {
util.setJobWithoutMRCluster();
util.startMiniCluster();
Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
final FileSystem fs = util.getTestFileSystem();
HBaseAdmin admin = new HBaseAdmin(conf);
HTable table = util.createTable(TABLE_NAME, FAMILIES);
assertEquals("Should start with empty table", 0, util.countRows(table));
// deep inspection: get the StoreFile dir
final Path storePath = HStore.getStoreHomedir(
FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
admin.getTableRegions(TABLE_NAME).get(0),
FAMILIES[0]);
assertEquals(0, fs.listStatus(storePath).length);
// put some data in it and flush to create a storefile
Put p = new Put(Bytes.toBytes("test"));
p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
table.put(p);
admin.flush(TABLE_NAME.getName());
assertEquals(1, util.countRows(table));
quickPoll(new Callable<Boolean>() {
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
// Generate a bulk load file with more rows
conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
true);
runIncrementalPELoad(conf, table, testDir);
// Perform the actual load
new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
// Ensure data shows up
int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
assertEquals("LoadIncrementalHFiles should put expected data in table",
expectedRows + 1, util.countRows(table));
// should have a second StoreFile now
assertEquals(2, fs.listStatus(storePath).length);
// minor compactions shouldn't get rid of the file
admin.compact(TABLE_NAME.getName());
try {
quickPoll(new Callable<Boolean>() {
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
throw new IOException("SF# = " + fs.listStatus(storePath).length);
} catch (AssertionError ae) {
// this is expected behavior
}
// a major compaction should work though
admin.majorCompact(TABLE_NAME.getName());
quickPoll(new Callable<Boolean>() {
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
} finally {
util.shutdownMiniCluster();
}
}
示例10: testExcludeAllFromMinorCompaction
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
/**
* This test is to test the scenario happened in HBASE-6901.
* All files are bulk loaded and excluded from minor compaction.
* Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException
* will be thrown.
*/
@Test
public void testExcludeAllFromMinorCompaction() throws Exception {
Configuration conf = util.getConfiguration();
conf.setInt("hbase.hstore.compaction.min", 2);
generateRandomStartKeys(5);
try {
util.startMiniCluster();
final FileSystem fs = util.getDFSCluster().getFileSystem();
HBaseAdmin admin = new HBaseAdmin(conf);
HTable table = util.createTable(TABLE_NAME, FAMILIES);
assertEquals("Should start with empty table", 0, util.countRows(table));
// deep inspection: get the StoreFile dir
final Path storePath = Store.getStoreHomedir(
HTableDescriptor.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
FAMILIES[0]);
assertEquals(0, fs.listStatus(storePath).length);
// Generate two bulk load files
conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
true);
util.startMiniMapReduceCluster();
for (int i = 0; i < 2; i++) {
Path testDir = util.getDataTestDir("testExcludeAllFromMinorCompaction_" + i);
runIncrementalPELoad(conf, table, testDir);
// Perform the actual load
new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
}
// Ensure data shows up
int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
assertEquals("LoadIncrementalHFiles should put expected data in table",
expectedRows, util.countRows(table));
// should have a second StoreFile now
assertEquals(2, fs.listStatus(storePath).length);
// minor compactions shouldn't get rid of the file
admin.compact(TABLE_NAME);
try {
quickPoll(new Callable<Boolean>() {
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
throw new IOException("SF# = " + fs.listStatus(storePath).length);
} catch (AssertionError ae) {
// this is expected behavior
}
// a major compaction should work though
admin.majorCompact(TABLE_NAME);
quickPoll(new Callable<Boolean>() {
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
} finally {
util.shutdownMiniMapReduceCluster();
util.shutdownMiniCluster();
}
}
示例11: testExcludeMinorCompaction
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Test
public void testExcludeMinorCompaction() throws Exception {
Configuration conf = util.getConfiguration();
conf.setInt("hbase.hstore.compaction.min", 2);
Path testDir = util.getDataTestDir("testExcludeMinorCompaction");
generateRandomStartKeys(5);
try {
util.startMiniCluster();
final FileSystem fs = util.getDFSCluster().getFileSystem();
HBaseAdmin admin = new HBaseAdmin(conf);
HTable table = util.createTable(TABLE_NAME, FAMILIES);
assertEquals("Should start with empty table", 0, util.countRows(table));
// deep inspection: get the StoreFile dir
final Path storePath = Store.getStoreHomedir(
HTableDescriptor.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
FAMILIES[0]);
assertEquals(0, fs.listStatus(storePath).length);
// put some data in it and flush to create a storefile
Put p = new Put(Bytes.toBytes("test"));
p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
table.put(p);
admin.flush(TABLE_NAME);
assertEquals(1, util.countRows(table));
quickPoll(new Callable<Boolean>() {
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
// Generate a bulk load file with more rows
conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
true);
util.startMiniMapReduceCluster();
runIncrementalPELoad(conf, table, testDir);
// Perform the actual load
new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
// Ensure data shows up
int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
assertEquals("LoadIncrementalHFiles should put expected data in table",
expectedRows + 1, util.countRows(table));
// should have a second StoreFile now
assertEquals(2, fs.listStatus(storePath).length);
// minor compactions shouldn't get rid of the file
admin.compact(TABLE_NAME);
try {
quickPoll(new Callable<Boolean>() {
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
throw new IOException("SF# = " + fs.listStatus(storePath).length);
} catch (AssertionError ae) {
// this is expected behavior
}
// a major compaction should work though
admin.majorCompact(TABLE_NAME);
quickPoll(new Callable<Boolean>() {
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
} finally {
util.shutdownMiniMapReduceCluster();
util.shutdownMiniCluster();
}
}
示例12: testExcludeAllFromMinorCompaction
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
/**
* This test is to test the scenario happened in HBASE-6901.
* All files are bulk loaded and excluded from minor compaction.
* Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException
* will be thrown.
*/
@Ignore ("Flakey: See HBASE-9051") @Test
public void testExcludeAllFromMinorCompaction() throws Exception {
Configuration conf = util.getConfiguration();
conf.setInt("hbase.hstore.compaction.min", 2);
generateRandomStartKeys(5);
try {
util.startMiniCluster();
final FileSystem fs = util.getDFSCluster().getFileSystem();
HBaseAdmin admin = new HBaseAdmin(conf);
HTable table = util.createTable(TABLE_NAME, FAMILIES);
assertEquals("Should start with empty table", 0, util.countRows(table));
// deep inspection: get the StoreFile dir
final Path storePath = new Path(
FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
new Path(admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
Bytes.toString(FAMILIES[0])));
assertEquals(0, fs.listStatus(storePath).length);
// Generate two bulk load files
conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
true);
util.startMiniMapReduceCluster();
for (int i = 0; i < 2; i++) {
Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
runIncrementalPELoad(conf, table, testDir);
// Perform the actual load
new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
}
// Ensure data shows up
int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
assertEquals("LoadIncrementalHFiles should put expected data in table",
expectedRows, util.countRows(table));
// should have a second StoreFile now
assertEquals(2, fs.listStatus(storePath).length);
// minor compactions shouldn't get rid of the file
admin.compact(TABLE_NAME.getName());
try {
quickPoll(new Callable<Boolean>() {
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
throw new IOException("SF# = " + fs.listStatus(storePath).length);
} catch (AssertionError ae) {
// this is expected behavior
}
// a major compaction should work though
admin.majorCompact(TABLE_NAME.getName());
quickPoll(new Callable<Boolean>() {
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
} finally {
util.shutdownMiniMapReduceCluster();
util.shutdownMiniCluster();
}
}
示例13: testExcludeMinorCompaction
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Test
public void testExcludeMinorCompaction() throws Exception {
Configuration conf = util.getConfiguration();
conf.setInt("hbase.hstore.compaction.min", 2);
generateRandomStartKeys(5);
try {
util.startMiniCluster();
Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
final FileSystem fs = util.getDFSCluster().getFileSystem();
HBaseAdmin admin = new HBaseAdmin(conf);
HTable table = util.createTable(TABLE_NAME, FAMILIES);
assertEquals("Should start with empty table", 0, util.countRows(table));
// deep inspection: get the StoreFile dir
final Path storePath = new Path(
FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
new Path(admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
Bytes.toString(FAMILIES[0])));
assertEquals(0, fs.listStatus(storePath).length);
// put some data in it and flush to create a storefile
Put p = new Put(Bytes.toBytes("test"));
p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
table.put(p);
admin.flush(TABLE_NAME.getName());
assertEquals(1, util.countRows(table));
quickPoll(new Callable<Boolean>() {
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
// Generate a bulk load file with more rows
conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
true);
util.startMiniMapReduceCluster();
runIncrementalPELoad(conf, table, testDir);
// Perform the actual load
new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
// Ensure data shows up
int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
assertEquals("LoadIncrementalHFiles should put expected data in table",
expectedRows + 1, util.countRows(table));
// should have a second StoreFile now
assertEquals(2, fs.listStatus(storePath).length);
// minor compactions shouldn't get rid of the file
admin.compact(TABLE_NAME.getName());
try {
quickPoll(new Callable<Boolean>() {
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
throw new IOException("SF# = " + fs.listStatus(storePath).length);
} catch (AssertionError ae) {
// this is expected behavior
}
// a major compaction should work though
admin.majorCompact(TABLE_NAME.getName());
quickPoll(new Callable<Boolean>() {
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
} finally {
util.shutdownMiniMapReduceCluster();
util.shutdownMiniCluster();
}
}
示例14: testExcludeAllFromMinorCompaction
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
/**
* This test is to test the scenario happened in HBASE-6901.
* All files are bulk loaded and excluded from minor compaction.
* Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException
* will be thrown.
*/
@Ignore ("Flakey: See HBASE-9051") @Test
public void testExcludeAllFromMinorCompaction() throws Exception {
Configuration conf = util.getConfiguration();
conf.setInt("hbase.hstore.compaction.min", 2);
generateRandomStartKeys(5);
try {
util.startMiniCluster();
final FileSystem fs = util.getDFSCluster().getFileSystem();
HBaseAdmin admin = new HBaseAdmin(conf);
HTable table = util.createTable(TABLE_NAME, FAMILIES);
assertEquals("Should start with empty table", 0, util.countRows(table));
// deep inspection: get the StoreFile dir
final Path storePath = HStore.getStoreHomedir(
FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
admin.getTableRegions(TABLE_NAME).get(0),
FAMILIES[0]);
assertEquals(0, fs.listStatus(storePath).length);
// Generate two bulk load files
conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
true);
util.startMiniMapReduceCluster();
for (int i = 0; i < 2; i++) {
Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
runIncrementalPELoad(conf, table, testDir);
// Perform the actual load
new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
}
// Ensure data shows up
int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
assertEquals("LoadIncrementalHFiles should put expected data in table",
expectedRows, util.countRows(table));
// should have a second StoreFile now
assertEquals(2, fs.listStatus(storePath).length);
// minor compactions shouldn't get rid of the file
admin.compact(TABLE_NAME.getName());
try {
quickPoll(new Callable<Boolean>() {
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
throw new IOException("SF# = " + fs.listStatus(storePath).length);
} catch (AssertionError ae) {
// this is expected behavior
}
// a major compaction should work though
admin.majorCompact(TABLE_NAME.getName());
quickPoll(new Callable<Boolean>() {
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
} finally {
util.shutdownMiniMapReduceCluster();
util.shutdownMiniCluster();
}
}
示例15: testExcludeMinorCompaction
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Test
public void testExcludeMinorCompaction() throws Exception {
Configuration conf = util.getConfiguration();
conf.setInt("hbase.hstore.compaction.min", 2);
generateRandomStartKeys(5);
try {
util.startMiniCluster();
Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
final FileSystem fs = util.getDFSCluster().getFileSystem();
HBaseAdmin admin = new HBaseAdmin(conf);
HTable table = util.createTable(TABLE_NAME, FAMILIES);
assertEquals("Should start with empty table", 0, util.countRows(table));
// deep inspection: get the StoreFile dir
final Path storePath = HStore.getStoreHomedir(
FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
admin.getTableRegions(TABLE_NAME).get(0),
FAMILIES[0]);
assertEquals(0, fs.listStatus(storePath).length);
// put some data in it and flush to create a storefile
Put p = new Put(Bytes.toBytes("test"));
p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
table.put(p);
admin.flush(TABLE_NAME.getName());
assertEquals(1, util.countRows(table));
quickPoll(new Callable<Boolean>() {
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
// Generate a bulk load file with more rows
conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
true);
util.startMiniMapReduceCluster();
runIncrementalPELoad(conf, table, testDir);
// Perform the actual load
new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
// Ensure data shows up
int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
assertEquals("LoadIncrementalHFiles should put expected data in table",
expectedRows + 1, util.countRows(table));
// should have a second StoreFile now
assertEquals(2, fs.listStatus(storePath).length);
// minor compactions shouldn't get rid of the file
admin.compact(TABLE_NAME.getName());
try {
quickPoll(new Callable<Boolean>() {
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
throw new IOException("SF# = " + fs.listStatus(storePath).length);
} catch (AssertionError ae) {
// this is expected behavior
}
// a major compaction should work though
admin.majorCompact(TABLE_NAME.getName());
quickPoll(new Callable<Boolean>() {
public Boolean call() throws Exception {
return fs.listStatus(storePath).length == 1;
}
}, 5000);
} finally {
util.shutdownMiniMapReduceCluster();
util.shutdownMiniCluster();
}
}