本文整理匯總了Java中org.apache.hadoop.hbase.client.HConnectionManager.deleteConnection方法的典型用法代碼示例。如果您正苦於以下問題:Java HConnectionManager.deleteConnection方法的具體用法?Java HConnectionManager.deleteConnection怎麽用?Java HConnectionManager.deleteConnection使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.client.HConnectionManager
的用法示例。
在下文中一共展示了HConnectionManager.deleteConnection方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: after
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
@After public void after() {
try {
// Clean out root location or later tests will be confused... they presume
// start fresh in zk.
RootLocationEditor.deleteRootLocation(this.watcher);
} catch (KeeperException e) {
LOG.warn("Unable to delete root location", e);
}
// Clear out our doctored connection or could mess up subsequent tests.
HConnectionManager.deleteConnection(UTIL.getConfiguration());
this.watcher.close();
}
示例2: stop
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
@Override
public void stop(String why) {
if (this.ct != null) {
this.ct.stop();
}
if (this.connection != null) {
HConnectionManager.deleteConnection(this.connection.getConfiguration());
}
}
示例3: stop
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
@Override
public void stop(String why) {
if (this.stopped) return;
this.stopped = true;
if (this.zkw != null) {
LOG.info("Stopping " + this.zkw);
this.zkw.close();
}
// Not sure why we're deleting a connection that we never acquired or used
HConnectionManager.deleteConnection(this.getConf());
}
示例4: after
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
@After public void after() {
try {
// Clean out meta location or later tests will be confused... they presume
// start fresh in zk.
MetaRegionTracker.deleteMetaLocation(this.watcher);
} catch (KeeperException e) {
LOG.warn("Unable to delete hbase:meta location", e);
}
// Clear out our doctored connection or could mess up subsequent tests.
HConnectionManager.deleteConnection(UTIL.getConfiguration());
this.watcher.close();
}
示例5: stop
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
@Override
public void stop(String why) {
if (this.stopped) return;
this.stopped = true;
if (this.zkHelper != null) {
LOG.info("Stopping " + this.zkHelper.getZookeeperWatcher());
this.zkHelper.getZookeeperWatcher().close();
}
// Not sure why we're deleting a connection that we never acquired or used
HConnectionManager.deleteConnection(this.conf, true);
}
示例6: testMetaRebuild
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
@Test(timeout = 120000)
public void testMetaRebuild() throws Exception {
wipeOutMeta();
// is meta really messed up?
assertEquals(0, scanMeta());
assertErrors(doFsck(conf, false),
new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED, });
// Note, would like to check # of tables, but this takes a while to time
// out.
// shutdown the minicluster
TEST_UTIL.shutdownMiniHBaseCluster();
TEST_UTIL.shutdownMiniZKCluster();
HConnectionManager.deleteConnection(conf);
// rebuild meta table from scratch
HBaseFsck fsck = new HBaseFsck(conf);
assertTrue(fsck.rebuildMeta(false));
// bring up the minicluster
TEST_UTIL.startMiniZKCluster(); // tables seem enabled by default
TEST_UTIL.restartHBaseCluster(3);
ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL);
LOG.info("Waiting for no more RIT");
ZKAssign.blockUntilNoRIT(zkw);
LOG.info("No more RIT in ZK, now doing final test verification");
// everything is good again.
assertEquals(4, scanMeta());
HTableDescriptor[] htbls = TEST_UTIL.getHBaseAdmin().listTables();
LOG.info("Tables present after restart: " + Arrays.toString(htbls));
assertEquals(1, htbls.length);
assertErrors(doFsck(conf, false), new ERROR_CODE[] {});
LOG.info("Table " + table + " has " + tableRowCount(conf, table)
+ " entries.");
assertEquals(16, tableRowCount(conf, table));
}
示例7: tearDownAfter
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
@After
public void tearDownAfter() throws Exception {
TEST_UTIL.shutdownMiniCluster();
HConnectionManager.deleteConnection(conf);
}
示例8: testMetaRebuild
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
@Test(timeout = 120000)
public void testMetaRebuild() throws Exception {
wipeOutMeta();
// is meta really messed up?
assertEquals(1, scanMeta());
assertErrors(doFsck(conf, false),
new ERROR_CODE[] {
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED});
// Note, would like to check # of tables, but this takes a while to time
// out.
// shutdown the minicluster
TEST_UTIL.shutdownMiniHBaseCluster();
TEST_UTIL.shutdownMiniZKCluster();
HConnectionManager.deleteConnection(conf);
// rebuild meta table from scratch
HBaseFsck fsck = new HBaseFsck(conf);
assertTrue(fsck.rebuildMeta(false));
// bring up the minicluster
TEST_UTIL.startMiniZKCluster();
TEST_UTIL.restartHBaseCluster(3);
TEST_UTIL.getHBaseAdmin().enableTable(table);
ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL);
LOG.info("Waiting for no more RIT");
ZKAssign.blockUntilNoRIT(zkw);
LOG.info("No more RIT in ZK, now doing final test verification");
// everything is good again.
assertEquals(5, scanMeta());
HTableDescriptor[] htbls = TEST_UTIL.getHBaseAdmin().listTables();
LOG.info("Tables present after restart: " + Arrays.toString(htbls));
assertEquals(1, htbls.length);
assertErrors(doFsck(conf, false), new ERROR_CODE[] {});
LOG.info("Table " + table + " has " + tableRowCount(conf, table)
+ " entries.");
assertEquals(16, tableRowCount(conf, table));
}
示例9: testMetaRebuild
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
@Test(timeout = 120000)
public void testMetaRebuild() throws Exception {
wipeOutMeta();
// is meta really messed up?
assertEquals(1, scanMeta());
assertErrors(doFsck(conf, false),
new ERROR_CODE[] {
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED});
// Note, would like to check # of tables, but this takes a while to time
// out.
// shutdown the minicluster
TEST_UTIL.shutdownMiniHBaseCluster();
TEST_UTIL.shutdownMiniZKCluster();
HConnectionManager.deleteConnection(conf);
// rebuild meta table from scratch
HBaseFsck fsck = new HBaseFsck(conf);
assertTrue(fsck.rebuildMeta(false));
// bring up the minicluster
TEST_UTIL.startMiniZKCluster(); // tables seem enabled by default
TEST_UTIL.restartHBaseCluster(3);
ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL);
LOG.info("Waiting for no more RIT");
ZKAssign.blockUntilNoRIT(zkw);
LOG.info("No more RIT in ZK, now doing final test verification");
// everything is good again.
assertEquals(5, scanMeta());
HTableDescriptor[] htbls = TEST_UTIL.getHBaseAdmin().listTables();
LOG.info("Tables present after restart: " + Arrays.toString(htbls));
assertEquals(1, htbls.length);
assertErrors(doFsck(conf, false), new ERROR_CODE[] {});
LOG.info("Table " + table + " has " + tableRowCount(conf, table)
+ " entries.");
assertEquals(16, tableRowCount(conf, table));
}
示例10: destroy
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
@SuppressWarnings("deprecation")
public void destroy() {
if (deleteConnection) {
HConnectionManager.deleteConnection(getObject());
}
}