本文整理汇总了Java中org.apache.hadoop.hbase.HBaseTestingUtility.deleteTable方法的典型用法代码示例。如果您正苦于以下问题:Java HBaseTestingUtility.deleteTable方法的具体用法?Java HBaseTestingUtility.deleteTable怎么用?Java HBaseTestingUtility.deleteTable使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.HBaseTestingUtility
的用法示例。
在下文中一共展示了HBaseTestingUtility.deleteTable方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testWithMockedMapReduce
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@Override
protected void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName,
int numRegions, int expectedNumSplits) throws Exception {
setupCluster();
TableName tableName = TableName.valueOf("testWithMockedMapReduce");
try {
createTableAndSnapshot(
util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions);
JobConf job = new JobConf(util.getConfiguration());
Path tmpTableDir = util.getRandomDir();
TableMapReduceUtil.initTableSnapshotMapJob(snapshotName,
COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
NullWritable.class, job, false, tmpTableDir);
// mapred doesn't support start and end keys? o.O
verifyWithMockedMapReduce(job, numRegions, expectedNumSplits, getStartRow(), getEndRow());
} finally {
util.getHBaseAdmin().deleteSnapshot(snapshotName);
util.deleteTable(tableName);
tearDownCluster();
}
}
示例2: testWithMockedMapReduce
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@Override
public void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName,
int numRegions, int expectedNumSplits) throws Exception {
setupCluster();
TableName tableName = TableName.valueOf("testWithMockedMapReduce");
try {
createTableAndSnapshot(
util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions);
Job job = new Job(util.getConfiguration());
Path tmpTableDir = util.getRandomDir();
Scan scan = new Scan(getStartRow(), getEndRow()); // limit the scan
TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
NullWritable.class, job, false, tmpTableDir);
verifyWithMockedMapReduce(job, numRegions, expectedNumSplits, getStartRow(), getEndRow());
} finally {
util.getHBaseAdmin().deleteSnapshot(snapshotName);
util.deleteTable(tableName);
tearDownCluster();
}
}
示例3: doTestWithMapReduce
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName,
String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions,
int expectedNumSplits, boolean shutdownCluster) throws Exception {
//create the table and snapshot
createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions);
if (shutdownCluster) {
util.shutdownMiniHBaseCluster();
}
try {
// create the job
JobConf jobConf = new JobConf(util.getConfiguration());
jobConf.setJarByClass(util.getClass());
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(jobConf,
TestTableSnapshotInputFormat.class);
TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS,
TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
NullWritable.class, jobConf, true, tableDir);
jobConf.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class);
jobConf.setNumReduceTasks(1);
jobConf.setOutputFormat(NullOutputFormat.class);
RunningJob job = JobClient.runJob(jobConf);
Assert.assertTrue(job.isSuccessful());
} finally {
if (!shutdownCluster) {
util.getHBaseAdmin().deleteSnapshot(snapshotName);
util.deleteTable(tableName);
}
}
}
示例4: createTableAndSnapshot
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
public static void createTableAndSnapshot(HBaseTestingUtility util, TableName tableName,
String snapshotName, int numRegions)
throws Exception {
try {
util.deleteTable(tableName);
} catch(Exception ex) {
// ignore
}
if (numRegions > 1) {
util.createTable(tableName, FAMILIES, 1, bbb, yyy, numRegions);
} else {
util.createTable(tableName, FAMILIES);
}
Admin admin = util.getHBaseAdmin();
// put some stuff in the table
HTable table = new HTable(util.getConfiguration(), tableName);
util.loadTable(table, FAMILIES);
Path rootDir = FSUtils.getRootDir(util.getConfiguration());
FileSystem fs = rootDir.getFileSystem(util.getConfiguration());
SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName,
Arrays.asList(FAMILIES), null, snapshotName, rootDir, fs, true);
// load different values
byte[] value = Bytes.toBytes("after_snapshot_value");
util.loadTable(table, FAMILIES, value);
// cause flush to create new files in the region
admin.flush(tableName);
table.close();
}
示例5: testScanner
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
private void testScanner(HBaseTestingUtility util, String snapshotName, int numRegions,
boolean shutdownCluster) throws Exception {
setupCluster();
TableName tableName = TableName.valueOf("testScanner");
try {
createTableAndSnapshot(util, tableName, snapshotName, numRegions);
if (shutdownCluster) {
util.shutdownMiniHBaseCluster();
}
Path restoreDir = util.getDataTestDirOnTestFS(snapshotName);
Scan scan = new Scan(bbb, yyy); // limit the scan
TableSnapshotScanner scanner = new TableSnapshotScanner(UTIL.getConfiguration(), restoreDir,
snapshotName, scan);
verifyScanner(scanner, bbb, yyy);
scanner.close();
} finally {
if (!shutdownCluster) {
util.getHBaseAdmin().deleteSnapshot(snapshotName);
util.deleteTable(tableName);
tearDownCluster();
}
}
}
示例6: doTestWithMapReduce
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName,
String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions,
int expectedNumSplits, boolean shutdownCluster) throws Exception {
//create the table and snapshot
createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions);
if (shutdownCluster) {
util.shutdownMiniHBaseCluster();
}
try {
// create the job
Job job = new Job(util.getConfiguration());
Scan scan = new Scan(startRow, endRow); // limit the scan
job.setJarByClass(util.getClass());
TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
TestTableSnapshotInputFormat.class);
TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
NullWritable.class, job, true, tableDir);
job.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class);
job.setNumReduceTasks(1);
job.setOutputFormatClass(NullOutputFormat.class);
Assert.assertTrue(job.waitForCompletion(true));
} finally {
if (!shutdownCluster) {
util.getHBaseAdmin().deleteSnapshot(snapshotName);
util.deleteTable(tableName);
}
}
}
示例7: createTableAndSnapshot
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
protected static void createTableAndSnapshot(HBaseTestingUtility util, TableName tableName,
String snapshotName, byte[] startRow, byte[] endRow, int numRegions)
throws Exception {
try {
util.deleteTable(tableName);
} catch(Exception ex) {
// ignore
}
if (numRegions > 1) {
util.createTable(tableName, FAMILIES, 1, startRow, endRow, numRegions);
} else {
util.createTable(tableName, FAMILIES);
}
Admin admin = util.getHBaseAdmin();
// put some stuff in the table
HTable table = new HTable(util.getConfiguration(), tableName);
util.loadTable(table, FAMILIES);
Path rootDir = FSUtils.getRootDir(util.getConfiguration());
FileSystem fs = rootDir.getFileSystem(util.getConfiguration());
SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName,
Arrays.asList(FAMILIES), null, snapshotName, rootDir, fs, true);
// load different values
byte[] value = Bytes.toBytes("after_snapshot_value");
util.loadTable(table, FAMILIES, value);
// cause flush to create new files in the region
admin.flush(tableName);
table.close();
}