本文整理汇总了Java中org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster方法的典型用法代码示例。如果您正苦于以下问题:Java HBaseTestingUtility.shutdownMiniHBaseCluster方法的具体用法?Java HBaseTestingUtility.shutdownMiniHBaseCluster怎么用?Java HBaseTestingUtility.shutdownMiniHBaseCluster使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.HBaseTestingUtility
的用法示例。
在下文中一共展示了HBaseTestingUtility.shutdownMiniHBaseCluster方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doTestWithMapReduce
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName,
String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions,
int expectedNumSplits, boolean shutdownCluster) throws Exception {
//create the table and snapshot
createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions);
if (shutdownCluster) {
util.shutdownMiniHBaseCluster();
}
try {
// create the job
JobConf jobConf = new JobConf(util.getConfiguration());
jobConf.setJarByClass(util.getClass());
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(jobConf,
TestTableSnapshotInputFormat.class);
TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS,
TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
NullWritable.class, jobConf, true, tableDir);
jobConf.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class);
jobConf.setNumReduceTasks(1);
jobConf.setOutputFormat(NullOutputFormat.class);
RunningJob job = JobClient.runJob(jobConf);
Assert.assertTrue(job.isSuccessful());
} finally {
if (!shutdownCluster) {
util.getHBaseAdmin().deleteSnapshot(snapshotName);
util.deleteTable(tableName);
}
}
}
示例2: testScanner
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
private void testScanner(HBaseTestingUtility util, String snapshotName, int numRegions,
boolean shutdownCluster) throws Exception {
setupCluster();
TableName tableName = TableName.valueOf("testScanner");
try {
createTableAndSnapshot(util, tableName, snapshotName, numRegions);
if (shutdownCluster) {
util.shutdownMiniHBaseCluster();
}
Path restoreDir = util.getDataTestDirOnTestFS(snapshotName);
Scan scan = new Scan(bbb, yyy); // limit the scan
TableSnapshotScanner scanner = new TableSnapshotScanner(UTIL.getConfiguration(), restoreDir,
snapshotName, scan);
verifyScanner(scanner, bbb, yyy);
scanner.close();
} finally {
if (!shutdownCluster) {
util.getHBaseAdmin().deleteSnapshot(snapshotName);
util.deleteTable(tableName);
tearDownCluster();
}
}
}
示例3: doTestWithMapReduce
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName,
String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions,
int expectedNumSplits, boolean shutdownCluster) throws Exception {
//create the table and snapshot
createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions);
if (shutdownCluster) {
util.shutdownMiniHBaseCluster();
}
try {
// create the job
Job job = new Job(util.getConfiguration());
Scan scan = new Scan(startRow, endRow); // limit the scan
job.setJarByClass(util.getClass());
TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
TestTableSnapshotInputFormat.class);
TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
NullWritable.class, job, true, tableDir);
job.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class);
job.setNumReduceTasks(1);
job.setOutputFormatClass(NullOutputFormat.class);
Assert.assertTrue(job.waitForCompletion(true));
} finally {
if (!shutdownCluster) {
util.getHBaseAdmin().deleteSnapshot(snapshotName);
util.deleteTable(tableName);
}
}
}
示例4: testShouldCheckMasterFailOverWhenMETAIsInOpenedState
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@Test (timeout=180000)
public void testShouldCheckMasterFailOverWhenMETAIsInOpenedState()
throws Exception {
LOG.info("Starting testShouldCheckMasterFailOverWhenMETAIsInOpenedState");
final int NUM_MASTERS = 1;
final int NUM_RS = 2;
// Start the cluster
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
Configuration conf = TEST_UTIL.getConfiguration();
conf.setInt("hbase.master.info.port", -1);
conf.setBoolean("hbase.assignment.usezk", true);
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
// Find regionserver carrying meta.
List<RegionServerThread> regionServerThreads =
cluster.getRegionServerThreads();
Region metaRegion = null;
HRegionServer metaRegionServer = null;
for (RegionServerThread regionServerThread : regionServerThreads) {
HRegionServer regionServer = regionServerThread.getRegionServer();
metaRegion = regionServer.getOnlineRegion(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
regionServer.abort("");
if (null != metaRegion) {
metaRegionServer = regionServer;
break;
}
}
TEST_UTIL.shutdownMiniHBaseCluster();
// Create a ZKW to use in the test
ZooKeeperWatcher zkw =
HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL,
metaRegion, metaRegionServer.getServerName());
LOG.info("Staring cluster for second time");
TEST_UTIL.startMiniHBaseCluster(NUM_MASTERS, NUM_RS);
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
while (!master.isInitialized()) {
Thread.sleep(100);
}
// Failover should be completed, now wait for no RIT
log("Waiting for no more RIT");
ZKAssign.blockUntilNoRIT(zkw);
zkw.close();
// Stop the cluster
TEST_UTIL.shutdownMiniCluster();
}