本文整理匯總了Java中org.apache.hadoop.hbase.client.HBaseAdmin.isTableEnabled方法的典型用法代碼示例。如果您正苦於以下問題:Java HBaseAdmin.isTableEnabled方法的具體用法?Java HBaseAdmin.isTableEnabled怎麽用?Java HBaseAdmin.isTableEnabled使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.client.HBaseAdmin
的用法示例。
在下文中一共展示了HBaseAdmin.isTableEnabled方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: deleteTable
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
/**
* delete table in preparation for next test
*
* @param tablename
* @throws IOException
*/
void deleteTable(String tablename) throws IOException {
HBaseAdmin admin = new HBaseAdmin(conf);
admin.getConnection().clearRegionCache();
byte[] tbytes = Bytes.toBytes(tablename);
if (admin.isTableEnabled(tbytes)) {
admin.disableTableAsync(tbytes);
}
while (!admin.isTableDisabled(tbytes)) {
try {
Thread.sleep(250);
} catch (InterruptedException e) {
e.printStackTrace();
fail("Interrupted when trying to disable table " + tablename);
}
}
admin.deleteTable(tbytes);
}
示例2: createTableAndColumn
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public static void createTableAndColumn(Configuration conf,
String table,
byte[] columnFamily)
throws IOException {
HBaseAdmin hbase = new HBaseAdmin(conf);
HTableDescriptor desc = new HTableDescriptor(table);
HColumnDescriptor meta = new HColumnDescriptor(columnFamily);
desc.addFamily(meta);
if (hbase.tableExists(table)) {
if(hbase.isTableEnabled(table)) {
hbase.disableTable(table);
}
hbase.deleteTable(table);
}
hbase.createTable(desc);
}
示例3: deleteTable
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
/**
* delete table in preparation for next test
*
* @param tablename
* @throws IOException
*/
void deleteTable(TableName tablename) throws IOException {
HBaseAdmin admin = new HBaseAdmin(conf);
admin.getConnection().clearRegionCache();
if (admin.isTableEnabled(tablename)) {
admin.disableTableAsync(tablename);
}
long totalWait = 0;
long maxWait = 30*1000;
long sleepTime = 250;
while (!admin.isTableDisabled(tablename)) {
try {
Thread.sleep(sleepTime);
totalWait += sleepTime;
if (totalWait >= maxWait) {
fail("Waited too long for table to be disabled + " + tablename);
}
} catch (InterruptedException e) {
e.printStackTrace();
fail("Interrupted when trying to disable table " + tablename);
}
}
admin.deleteTable(tablename);
}
示例4: before
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Before
public void before() throws Exception {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) {
if (admin.isTableEnabled(TABLE)) admin.disableTable(TABLE);
admin.deleteTable(TABLE);
}
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(COLUMN_1).setMaxVersions(3));
htd.addFamily(new HColumnDescriptor(COLUMN_2).setMaxVersions(3));
htd.addFamily(new HColumnDescriptor(COLUMN_3).setMaxVersions(3));
admin.createTable(htd);
HTable table = null;
try {
table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
Put put = new Put(ROW_1);
put.add(COLUMN_1, QUALIFIER_1, TS_2, VALUE_1);
table.put(put);
put = new Put(ROW_2);
put.add(COLUMN_1, QUALIFIER_1, TS_1, VALUE_1);
put.add(COLUMN_1, QUALIFIER_1, TS_2, VALUE_2);
put.add(COLUMN_2, QUALIFIER_2, TS_2, VALUE_2);
table.put(put);
table.flushCommits();
} finally {
if (null != table) table.close();
}
remoteTable = new RemoteHTable(
new Client(new Cluster().add("localhost",
REST_TEST_UTIL.getServletPort())),
TEST_UTIL.getConfiguration(), TABLE);
}
示例5: doQuarantineTest
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
/**
* Test that use this should have a timeout, because this method could potentially wait forever.
*/
private void doQuarantineTest(String table, HBaseFsck hbck, int check, int corrupt, int fail,
int quar, int missing) throws Exception {
try {
setupTable(table);
assertEquals(ROWKEYS.length, countRows());
TEST_UTIL.getHBaseAdmin().flush(table); // flush is async.
// Mess it up by leaving a hole in the assignment, meta, and hdfs data
TEST_UTIL.getHBaseAdmin().disableTable(table);
String[] args = {"-sidelineCorruptHFiles", "-repairHoles", "-ignorePreCheckPermission", table};
ExecutorService exec = new ScheduledThreadPoolExecutor(10);
HBaseFsck res = hbck.exec(exec, args);
HFileCorruptionChecker hfcc = res.getHFilecorruptionChecker();
assertEquals(hfcc.getHFilesChecked(), check);
assertEquals(hfcc.getCorrupted().size(), corrupt);
assertEquals(hfcc.getFailures().size(), fail);
assertEquals(hfcc.getQuarantined().size(), quar);
assertEquals(hfcc.getMissing().size(), missing);
// its been fixed, verify that we can enable
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
admin.enableTableAsync(table);
while (!admin.isTableEnabled(table)) {
try {
Thread.sleep(250);
} catch (InterruptedException e) {
e.printStackTrace();
fail("Interrupted when trying to enable table " + table);
}
}
} finally {
deleteTable(table);
}
}
示例6: validateTable
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public static void validateTable(HBaseAdmin admin, String tableName) throws IOException, InterruptedException {
if (tableName.equals(Args.ALL_TABLES)) return;
boolean tableExists = false;
try {
if (tableName.contains(Constant.TABLE_DELIMITER)) {
String[] tables = tableName.split(Constant.TABLE_DELIMITER);
for (String table : tables) {
tableExists = admin.tableExists(table);
}
} else {
tableExists = admin.listTables(tableName).length > 0;
}
} catch (Exception e) {
Thread.sleep(1000);
System.out.println();
System.out.println(admin.getConfiguration().get("hbase.zookeeper.quorum") + " is invalid zookeeper quorum");
System.exit(1);
}
if (tableExists) {
try {
if (!admin.isTableEnabled(tableName)) {
throw new InvalidTableException("Table is not enabled.");
}
} catch (Exception ignore) {
}
} else {
throw new InvalidTableException("Table does not exist.");
}
}
示例7: before
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Before
public void before() throws Exception {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) {
if (admin.isTableEnabled(TABLE)) admin.disableTable(TABLE);
admin.deleteTable(TABLE);
}
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
htd.addFamily(new HColumnDescriptor(COLUMN_1).setMaxVersions(3));
htd.addFamily(new HColumnDescriptor(COLUMN_2).setMaxVersions(3));
htd.addFamily(new HColumnDescriptor(COLUMN_3).setMaxVersions(3));
admin.createTable(htd);
HTable table = null;
try {
table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
Put put = new Put(ROW_1);
put.add(COLUMN_1, QUALIFIER_1, TS_2, VALUE_1);
table.put(put);
put = new Put(ROW_2);
put.add(COLUMN_1, QUALIFIER_1, TS_1, VALUE_1);
put.add(COLUMN_1, QUALIFIER_1, TS_2, VALUE_2);
put.add(COLUMN_2, QUALIFIER_2, TS_2, VALUE_2);
table.put(put);
table.flushCommits();
} finally {
if (null != table) table.close();
}
remoteTable = new RemoteHTable(
new Client(new Cluster().add("localhost",
REST_TEST_UTIL.getServletPort())),
TEST_UTIL.getConfiguration(), TABLE);
}
示例8: modifyTable
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Test
public void modifyTable() throws Exception {
String TABLE_NAME = "TEST_BENCHMARK";
//
Configuration configuration = createConfiguration();
HBaseAdmin hbaseAdmin = createHBaseAdmin(configuration);
HTableDescriptor htd = hbaseAdmin.getTableDescriptor(Bytes
.toBytes(TABLE_NAME));
//
HTableDescriptor newHtd = new HTableDescriptor(htd);
newHtd.setValue(HTableDescriptor.SPLIT_POLICY,
ConstantSizeRegionSplitPolicy.class.getName());
//
boolean disabled = false;
if (hbaseAdmin.isTableEnabled(TABLE_NAME)) {
hbaseAdmin.disableTable(TABLE_NAME);
disabled = true;
}
//
hbaseAdmin.modifyTable(Bytes.toBytes(TABLE_NAME), newHtd);
//
if (disabled) {
hbaseAdmin.enableTable(TABLE_NAME);
}
//
System.out.println(newHtd);
}
示例9: validateTable
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
protected static void validateTable(HBaseAdmin admin, String tableName) throws IOException, InterruptedException {
if (tableName.equals(Args.ALL_TABLES)) return;
boolean tableExists = admin.tableExists(tableName);
if (tableExists) {
if (!admin.isTableEnabled(tableName)) {
throw new InvalidTableException("Table is not enabled.");
}
} else {
throw new InvalidTableException("Table does not exist.");
}
}
示例10: deleteTable
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Test
public void deleteTable() throws Exception {
String TABLE_NAME = "TEST_BENCHMARK";
//
Configuration configuration = createConfiguration();
HBaseAdmin hbaseAdmin = createHBaseAdmin(configuration);
if (hbaseAdmin.isTableEnabled(TABLE_NAME)) {
hbaseAdmin.disableTable(TABLE_NAME);
}
//
hbaseAdmin.deleteTable(TABLE_NAME); // TableNotFoundException
//
System.out.println("delete table");
}
示例11: tearDown
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@After
public void tearDown() throws IOException {
LOG.info("Cleaning up after test.");
HBaseAdmin admin = util.getHBaseAdmin();
if (admin.tableExists(TABLE_NAME)) {
if (admin.isTableEnabled(TABLE_NAME)) admin.disableTable(TABLE_NAME);
admin.deleteTable(TABLE_NAME);
}
LOG.info("Restoring cluster.");
util.restoreCluster();
LOG.info("Cluster restored.");
}
示例12: testSplitBeforeSettingSplittingInZK
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
private void testSplitBeforeSettingSplittingInZK(boolean nodeCreated) throws Exception {
final byte[] tableName = Bytes.toBytes("testSplitBeforeSettingSplittingInZK");
HBaseAdmin admin = new HBaseAdmin(TESTING_UTIL.getConfiguration());
// Create table then get the single region for our new table.
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("cf"));
admin.createTable(htd);
List<HRegion> regions = null;
for (int i=0; i<100; i++) {
regions = cluster.getRegions(tableName);
if (regions.size() > 0) break;
Thread.sleep(100);
}
int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionName());
HRegionServer regionServer = cluster.getRegionServer(regionServerIndex);
SplitTransaction st = null;
if (nodeCreated) {
st = new MockedSplitTransaction(regions.get(0), null) {
@Override
int transitionNodeSplitting(ZooKeeperWatcher zkw, HRegionInfo parent,
ServerName serverName, int version) throws KeeperException, IOException {
throw new TransitionToSplittingFailedException();
}
};
} else {
st = new MockedSplitTransaction(regions.get(0), null) {
@Override
void createNodeSplitting(ZooKeeperWatcher zkw, HRegionInfo region, ServerName serverName)
throws KeeperException, IOException {
throw new SplittingNodeCreationFailedException ();
}
};
}
String node = ZKAssign.getNodeName(regionServer.getZooKeeper(), regions.get(0)
.getRegionInfo().getEncodedName());
// make sure the client is uptodate
regionServer.getZooKeeper().sync(node);
for (int i = 0; i < 100; i++) {
// We expect the znode to be deleted by this time. Here the znode could be in OPENED state and the
// master has not yet deleted the znode.
if (ZKUtil.checkExists(regionServer.getZooKeeper(), node) != -1) {
Thread.sleep(100);
}
}
try {
st.execute(regionServer, regionServer);
} catch (IOException e) {
// check for the specific instance in case the Split failed due to the existence of the znode in OPENED state.
// This will at least make the test to fail;
if (nodeCreated) {
assertTrue("Should be instance of TransitionToSplittingFailedException",
e instanceof TransitionToSplittingFailedException);
} else {
assertTrue("Should be instance of CreateSplittingNodeFailedException",
e instanceof SplittingNodeCreationFailedException );
}
node = ZKAssign.getNodeName(regionServer.getZooKeeper(), regions.get(0)
.getRegionInfo().getEncodedName());
// make sure the client is uptodate
regionServer.getZooKeeper().sync(node);
if (nodeCreated) {
assertFalse(ZKUtil.checkExists(regionServer.getZooKeeper(), node) == -1);
} else {
assertTrue(ZKUtil.checkExists(regionServer.getZooKeeper(), node) == -1);
}
assertTrue(st.rollback(regionServer, regionServer));
assertTrue(ZKUtil.checkExists(regionServer.getZooKeeper(), node) == -1);
}
if (admin.isTableAvailable(tableName) && admin.isTableEnabled(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
}
示例13: testShouldClearRITWhenNodeFoundInSplittingState
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Test
public void testShouldClearRITWhenNodeFoundInSplittingState() throws Exception {
final byte[] tableName = Bytes.toBytes("testShouldClearRITWhenNodeFoundInSplittingState");
HBaseAdmin admin = new HBaseAdmin(TESTING_UTIL.getConfiguration());
// Create table then get the single region for our new table.
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("cf"));
admin.createTable(htd);
for (int i = 0; cluster.getRegions(tableName).size() == 0 && i < 100; i++) {
Thread.sleep(100);
}
assertTrue("Table not online", cluster.getRegions(tableName).size() != 0);
HRegion region = cluster.getRegions(tableName).get(0);
int regionServerIndex = cluster.getServerWith(region.getRegionName());
HRegionServer regionServer = cluster.getRegionServer(regionServerIndex);
SplitTransaction st = null;
st = new MockedSplitTransaction(region, null) {
@Override
void createSplitDir(FileSystem fs, Path splitdir) throws IOException {
throw new IOException("");
}
};
try {
st.execute(regionServer, regionServer);
} catch (IOException e) {
String node = ZKAssign.getNodeName(regionServer.getZooKeeper(), region
.getRegionInfo().getEncodedName());
assertFalse(ZKUtil.checkExists(regionServer.getZooKeeper(), node) == -1);
AssignmentManager am = cluster.getMaster().getAssignmentManager();
for (int i = 0; !am.getRegionsInTransition().containsKey(
region.getRegionInfo().getEncodedName())
&& i < 100; i++) {
Thread.sleep(200);
}
assertTrue("region is not in transition "+region,
am.getRegionsInTransition().containsKey(region.getRegionInfo().getEncodedName()));
RegionState regionState = am.getRegionsInTransition().get(region.getRegionInfo()
.getEncodedName());
assertTrue(regionState.getState() == RegionState.State.SPLITTING);
assertTrue(st.rollback(regionServer, regionServer));
assertTrue(ZKUtil.checkExists(regionServer.getZooKeeper(), node) == -1);
for (int i=0; am.getRegionsInTransition().containsKey(region.getRegionInfo().getEncodedName()) && i<100; i++) {
// Just in case the nodeDeleted event did not get executed.
Thread.sleep(200);
}
assertFalse("region is still in transition",
am.getRegionsInTransition().containsKey(region.getRegionInfo().getEncodedName()));
}
if (admin.isTableAvailable(tableName) && admin.isTableEnabled(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
admin.close();
}
}
示例14: loadingClassFromLibDirInJar
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
void loadingClassFromLibDirInJar(String libPrefix) throws Exception {
FileSystem fs = cluster.getFileSystem();
File innerJarFile1 = buildCoprocessorJar(cpName1);
File innerJarFile2 = buildCoprocessorJar(cpName2);
File outerJarFile = new File(TEST_UTIL.getDataTestDir().toString(), "outer.jar");
ClassLoaderTestHelper.addJarFilesToJar(
outerJarFile, libPrefix, innerJarFile1, innerJarFile2);
// copy the jars into dfs
fs.copyFromLocalFile(new Path(outerJarFile.getPath()),
new Path(fs.getUri().toString() + Path.SEPARATOR));
String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR +
outerJarFile.getName();
assertTrue("Copy jar file to HDFS failed.",
fs.exists(new Path(jarFileOnHDFS)));
LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS);
// create a table that references the coprocessors
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
htd.addFamily(new HColumnDescriptor("test"));
// without configuration values
htd.setValue("COPROCESSOR$1", jarFileOnHDFS.toString() + "|" + cpName1 +
"|" + Coprocessor.PRIORITY_USER);
// with configuration values
htd.setValue("COPROCESSOR$2", jarFileOnHDFS.toString() + "|" + cpName2 +
"|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName);
}
admin.deleteTable(tableName);
}
admin.createTable(htd);
waitForTable(htd.getTableName());
// verify that the coprocessors were loaded
boolean found1 = false, found2 = false, found2_k1 = false,
found2_k2 = false, found2_k3 = false;
MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
for (HRegion region:
hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
if (region.getRegionNameAsString().startsWith(tableName)) {
CoprocessorEnvironment env;
env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
if (env != null) {
found1 = true;
}
env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName2);
if (env != null) {
found2 = true;
Configuration conf = env.getConfiguration();
found2_k1 = conf.get("k1") != null;
found2_k2 = conf.get("k2") != null;
found2_k3 = conf.get("k3") != null;
}
}
}
assertTrue("Class " + cpName1 + " was missing on a region", found1);
assertTrue("Class " + cpName2 + " was missing on a region", found2);
assertTrue("Configuration key 'k1' was missing on a region", found2_k1);
assertTrue("Configuration key 'k2' was missing on a region", found2_k2);
assertTrue("Configuration key 'k3' was missing on a region", found2_k3);
}
示例15: addColumnFamily
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Test
public void addColumnFamily() throws Exception {
String TABLE_NAME = "TEST_BENCHMARK";
//
Configuration configuration = createConfiguration();
HBaseAdmin hbaseAdmin = createHBaseAdmin(configuration);
//
// 新增column時,須將table disable
if (hbaseAdmin.isTableEnabled(TABLE_NAME)) {
hbaseAdmin.disableTable(TABLE_NAME);
}
//
String COLUMN_FAMILY = "seq";
HColumnDescriptor hcd = new HColumnDescriptor(COLUMN_FAMILY);
// hcd.setBloomFilterType(BloomType.ROWCOL);
hcd.setBlockCacheEnabled(true);
// hcd.setCompressionType(Algorithm.SNAPPY);
hcd.setInMemory(true);
hcd.setMaxVersions(1);
hcd.setMinVersions(0);
hcd.setTimeToLive(432000);// 秒為單位
hbaseAdmin.addColumn(TABLE_NAME, hcd);// TableNotDisabledException,
System.out.println("add column family [" + COLUMN_FAMILY + "]");
//
COLUMN_FAMILY = "id";
hcd = new HColumnDescriptor(COLUMN_FAMILY);
// hcd.setBloomFilterType(BloomType.ROWCOL);
hcd.setBlockCacheEnabled(true);
// hcd.setCompressionType(Algorithm.SNAPPY);
hcd.setInMemory(true);
hcd.setMaxVersions(1);
hcd.setMinVersions(0);
hcd.setTimeToLive(432000);// 秒為單位,5d
hbaseAdmin.addColumn(TABLE_NAME, hcd);// TableNotDisabledException,
System.out.println("add column family [" + COLUMN_FAMILY + "]");
//
COLUMN_FAMILY = "info";
hcd = new HColumnDescriptor(COLUMN_FAMILY);
// hcd.setBloomFilterType(BloomType.ROWCOL);
hcd.setBlockCacheEnabled(true);
// hcd.setCompressionType(Algorithm.SNAPPY);
hcd.setInMemory(true);
hcd.setMaxVersions(1);
hcd.setMinVersions(0);
hcd.setTimeToLive(432000);// 秒為單位
hbaseAdmin.addColumn(TABLE_NAME, hcd);// TableNotDisabledException
System.out.println("add column family [" + COLUMN_FAMILY + "]");
// 新增column後,再將table enable
if (hbaseAdmin.isTableDisabled(TABLE_NAME)) {
hbaseAdmin.enableTable(TABLE_NAME);
}
}