本文整理匯總了Java中org.apache.hadoop.hbase.client.HConnectionManager.getConnection方法的典型用法代碼示例。如果您正苦於以下問題:Java HConnectionManager.getConnection方法的具體用法?Java HConnectionManager.getConnection怎麽用?Java HConnectionManager.getConnection使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.client.HConnectionManager
的用法示例。
在下文中一共展示了HConnectionManager.getConnection方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: ReplicationAdmin
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
/**
* Constructor that creates a connection to the local ZooKeeper ensemble.
* @param conf Configuration to use
* @throws IOException if an internal replication error occurs
* @throws RuntimeException if replication isn't enabled.
*/
public ReplicationAdmin(Configuration conf) throws IOException {
if (!conf.getBoolean(HConstants.REPLICATION_ENABLE_KEY,
HConstants.REPLICATION_ENABLE_DEFAULT)) {
throw new RuntimeException("hbase.replication isn't true, please " +
"enable it in order to use replication");
}
this.connection = HConnectionManager.getConnection(conf);
ZooKeeperWatcher zkw = createZooKeeperWatcher();
try {
this.replicationPeers = ReplicationFactory.getReplicationPeers(zkw, conf, this.connection);
this.replicationPeers.init();
this.replicationQueuesClient =
ReplicationFactory.getReplicationQueuesClient(zkw, conf, this.connection);
this.replicationQueuesClient.init();
} catch (ReplicationException e) {
throw new IOException("Error initializing the replication admin client.", e);
}
}
示例2: main
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
if (args.length == 0) {
System.out.println("MajorCompactTable {TableName}");
return;
}
String tableName = args[0];
Configuration conf = HBaseConfiguration.create(new Configuration());
HConnection hConnection = HConnectionManager.getConnection(conf);
HBaseAdmin admin = new HBaseAdmin(hConnection);
admin.majorCompact(Bytes.toBytes(tableName));
admin.close();
}
示例3: getConnectionByTableName
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
private HConnection getConnectionByTableName(final TableName tableName) throws IOException {
HConnection hconn = this.tableNameToHConnectionMap.get(tableName);
if (hconn == null) {
synchronized (this.tableNameToHConnectionMap) {
hconn = this.tableNameToHConnectionMap.get(tableName);
if (hconn == null) {
hconn = HConnectionManager.getConnection(conf);
this.tableNameToHConnectionMap.put(tableName, hconn);
}
}
}
return hconn;
}
示例4: ReplicationAdmin
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
/**
* Constructor that creates a connection to the local ZooKeeper ensemble.
* @param conf Configuration to use
* @throws IOException if the connection to ZK cannot be made
* @throws RuntimeException if replication isn't enabled.
*/
public ReplicationAdmin(Configuration conf) throws IOException {
if (!conf.getBoolean(HConstants.REPLICATION_ENABLE_KEY, false)) {
throw new RuntimeException("hbase.replication isn't true, please " +
"enable it in order to use replication");
}
this.connection = HConnectionManager.getConnection(conf);
ZooKeeperWatcher zkw = this.connection.getZooKeeperWatcher();
try {
this.replicationZk = new ReplicationZookeeper(this.connection, conf, zkw);
} catch (KeeperException e) {
throw new IOException("Unable setup the ZooKeeper connection", e);
}
}
示例5: ServerManager
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
ServerManager(final Server master, final MasterServices services,
final boolean connect) throws ZooKeeperConnectionException {
this.master = master;
this.services = services;
Configuration c = master.getConfiguration();
maxSkew = c.getLong("hbase.master.maxclockskew", 30000);
warningSkew = c.getLong("hbase.master.warningclockskew", 10000);
this.deadservers = new DeadServer();
this.connection = connect ? HConnectionManager.getConnection(c) : null;
}
示例6: testClientSessionExpired
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
/**
* See HBASE-1232 and http://wiki.apache.org/hadoop/ZooKeeper/FAQ#4.
* @throws IOException
* @throws InterruptedException
*/
// fails frequently, disabled for now, see HBASE-6406
// @Test
public void testClientSessionExpired()
throws Exception {
LOG.info("testClientSessionExpired");
Configuration c = new Configuration(TEST_UTIL.getConfiguration());
new HTable(c, HConstants.META_TABLE_NAME).close();
HConnection connection = HConnectionManager.getConnection(c);
ZooKeeperWatcher connectionZK = connection.getZooKeeperWatcher();
TEST_UTIL.expireSession(connectionZK, false);
// provoke session expiration by doing something with ZK
ZKUtil.dump(connectionZK);
// Check that the old ZK connection is closed, means we did expire
System.err.println("ZooKeeper should have timed out");
LOG.info("state=" + connectionZK.getRecoverableZooKeeper().getState());
Assert.assertTrue(connectionZK.getRecoverableZooKeeper().getState().
equals(States.CLOSED));
// Check that the client recovered
ZooKeeperWatcher newConnectionZK = connection.getZooKeeperWatcher();
States state = newConnectionZK.getRecoverableZooKeeper().getState();
LOG.info("state=" + state);
Assert.assertTrue(state.equals(States.CONNECTED) || state.equals(States.CONNECTING));
}
示例7: ServerManager
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
@SuppressWarnings("deprecation")
ServerManager(final Server master, final MasterServices services,
final boolean connect) throws IOException {
this.master = master;
this.services = services;
Configuration c = master.getConfiguration();
maxSkew = c.getLong("hbase.master.maxclockskew", 30000);
warningSkew = c.getLong("hbase.master.warningclockskew", 10000);
this.connection = connect ? HConnectionManager.getConnection(c) : null;
}
示例8: init
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
/**
* Instantiation method used by region servers
*
* @param conf configuration to use
* @param fs file system to use
* @param manager replication manager to ping to
* @param stopper the atomic boolean to use to stop the regionserver
* @param peerClusterZnode the name of our znode
* @throws IOException
*/
public void init(final Configuration conf, final FileSystem fs,
final ReplicationSourceManager manager, final ReplicationQueues replicationQueues,
final ReplicationPeers replicationPeers, final Stoppable stopper,
final String peerClusterZnode, final UUID clusterId) throws IOException {
this.stopper = stopper;
this.conf = conf;
this.replicationQueueSizeCapacity =
this.conf.getLong("replication.source.size.capacity", 1024*1024*64);
this.replicationQueueNbCapacity =
this.conf.getInt("replication.source.nb.capacity", 25000);
this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 10);
this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier",
maxRetriesMultiplier * maxRetriesMultiplier);
this.queue =
new PriorityBlockingQueue<Path>(
conf.getInt("hbase.regionserver.maxlogs", 32),
new LogsComparator());
// TODO: This connection is replication specific or we should make it particular to
// replication and make replication specific settings such as compression or codec to use
// passing Cells.
this.conn = HConnectionManager.getConnection(conf);
this.replicationQueues = replicationQueues;
this.replicationPeers = replicationPeers;
this.manager = manager;
this.sleepForRetries =
this.conf.getLong("replication.source.sleepforretries", 1000);
this.fs = fs;
this.metrics = new MetricsSource(peerClusterZnode);
this.repLogReader = new ReplicationHLogReaderManager(this.fs, this.conf);
this.clusterId = clusterId;
this.peerClusterZnode = peerClusterZnode;
this.replicationQueueInfo = new ReplicationQueueInfo(peerClusterZnode);
// ReplicationQueueInfo parses the peerId out of the znode for us
this.peerId = this.replicationQueueInfo.getPeerId();
this.replicationSinkMgr = new ReplicationSinkManager(conn, peerId, replicationPeers, conf);
this.logQueueWarnThreshold = this.conf.getInt("replication.source.log.queue.warn", 2);
}
示例9: TableStats
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
public TableStats(String tableName) throws Exception {
Logger.getRootLogger().setLevel(Level.ERROR);
this.tableName = tableName;
Configuration conf = HBaseConfiguration.create(new Configuration());
hdfs = FileSystem.get(new Configuration());
hConnection = HConnectionManager.getConnection(conf);
admin = new HBaseAdmin(hConnection);
tableDescriptor = admin.getTableDescriptor(Bytes.toBytes(tableName));
regions = admin.getTableRegions(Bytes.toBytes(tableName));
table = new HTable(conf, Bytes.toBytes(tableName));
}
示例10: SmartTableMajorCompact
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
public SmartTableMajorCompact(String tableName) throws Exception {
Logger.getRootLogger().setLevel(Level.ERROR);
this.tableName = tableName;
Configuration conf = HBaseConfiguration.create(new Configuration());
hdfs = FileSystem.get(new Configuration());
hConnection = HConnectionManager.getConnection(conf);
admin = new HBaseAdmin(hConnection);
tableDescriptor = admin.getTableDescriptor(Bytes.toBytes(tableName));
regions = admin.getTableRegions(Bytes.toBytes(tableName));
table = new HTable(conf, Bytes.toBytes(tableName));
}
示例11: testClientSessionExpired
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
/**
* See HBASE-1232 and http://wiki.apache.org/hadoop/ZooKeeper/FAQ#4.
* @throws IOException
* @throws InterruptedException
*/
// fails frequently, disabled for now, see HBASE-6406
//@Test
public void testClientSessionExpired() throws Exception {
Configuration c = new Configuration(TEST_UTIL.getConfiguration());
// We don't want to share the connection as we will check its state
c.set(HConstants.HBASE_CLIENT_INSTANCE_ID, "1111");
HConnection connection = HConnectionManager.getConnection(c);
ZooKeeperWatcher connectionZK = getZooKeeperWatcher(connection);
LOG.info("ZooKeeperWatcher= 0x"+ Integer.toHexString(
connectionZK.hashCode()));
LOG.info("getRecoverableZooKeeper= 0x"+ Integer.toHexString(
connectionZK.getRecoverableZooKeeper().hashCode()));
LOG.info("session="+Long.toHexString(
connectionZK.getRecoverableZooKeeper().getSessionId()));
TEST_UTIL.expireSession(connectionZK);
LOG.info("Before using zkw state=" +
connectionZK.getRecoverableZooKeeper().getState());
// provoke session expiration by doing something with ZK
try {
connectionZK.getRecoverableZooKeeper().getZooKeeper().exists(
"/1/1", false);
} catch (KeeperException ignored) {
}
// Check that the old ZK connection is closed, means we did expire
States state = connectionZK.getRecoverableZooKeeper().getState();
LOG.info("After using zkw state=" + state);
LOG.info("session="+Long.toHexString(
connectionZK.getRecoverableZooKeeper().getSessionId()));
// It's asynchronous, so we may have to wait a little...
final long limit1 = System.currentTimeMillis() + 3000;
while (System.currentTimeMillis() < limit1 && state != States.CLOSED){
state = connectionZK.getRecoverableZooKeeper().getState();
}
LOG.info("After using zkw loop=" + state);
LOG.info("ZooKeeper should have timed out");
LOG.info("session="+Long.toHexString(
connectionZK.getRecoverableZooKeeper().getSessionId()));
// It's surprising but sometimes we can still be in connected state.
// As it's known (even if not understood) we don't make the the test fail
// for this reason.)
// Assert.assertTrue("state=" + state, state == States.CLOSED);
// Check that the client recovered
ZooKeeperWatcher newConnectionZK = getZooKeeperWatcher(connection);
States state2 = newConnectionZK.getRecoverableZooKeeper().getState();
LOG.info("After new get state=" +state2);
// As it's an asynchronous event we may got the same ZKW, if it's not
// yet invalidated. Hence this loop.
final long limit2 = System.currentTimeMillis() + 3000;
while (System.currentTimeMillis() < limit2 &&
state2 != States.CONNECTED && state2 != States.CONNECTING) {
newConnectionZK = getZooKeeperWatcher(connection);
state2 = newConnectionZK.getRecoverableZooKeeper().getState();
}
LOG.info("After new get state loop=" + state2);
Assert.assertTrue(
state2 == States.CONNECTED || state2 == States.CONNECTING);
connection.close();
}
示例12: testMergeTable
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
/**
* Test merge.
* Hand-makes regions of a mergeable size and adds the hand-made regions to
* hand-made meta. The hand-made regions are created offline. We then start
* up mini cluster, disables the hand-made table and starts in on merging.
* @throws Exception
*/
@Test (timeout=300000) public void testMergeTable() throws Exception {
// Table we are manually creating offline.
HTableDescriptor desc = new HTableDescriptor(org.apache.hadoop.hbase.TableName.valueOf(Bytes.toBytes("test")));
desc.addFamily(new HColumnDescriptor(COLUMN_NAME));
// Set maximum regionsize down.
UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE, 64L * 1024L * 1024L);
// Make it so we don't split.
UTIL.getConfiguration().setInt("hbase.regionserver.regionSplitLimit", 0);
// Startup hdfs. Its in here we'll be putting our manually made regions.
UTIL.startMiniDFSCluster(1);
// Create hdfs hbase rootdir.
Path rootdir = UTIL.createRootDir();
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
if (fs.exists(rootdir)) {
if (fs.delete(rootdir, true)) {
LOG.info("Cleaned up existing " + rootdir);
}
}
// Now create three data regions: The first is too large to merge since it
// will be > 64 MB in size. The second two will be smaller and will be
// selected for merging.
// To ensure that the first region is larger than 64MB we need to write at
// least 65536 rows. We will make certain by writing 70000
byte [] row_70001 = Bytes.toBytes("row_70001");
byte [] row_80001 = Bytes.toBytes("row_80001");
// Create regions and populate them at same time. Create the tabledir
// for them first.
new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir).createTableDescriptor(desc);
HRegion [] regions = {
createRegion(desc, null, row_70001, 1, 70000, rootdir),
createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir),
createRegion(desc, row_80001, null, 80001, 11000, rootdir)
};
// Now create the root and meta regions and insert the data regions
// created above into hbase:meta
setupMeta(rootdir, regions);
try {
LOG.info("Starting mini zk cluster");
UTIL.startMiniZKCluster();
LOG.info("Starting mini hbase cluster");
UTIL.startMiniHBaseCluster(1, 1);
Configuration c = new Configuration(UTIL.getConfiguration());
Connection connection = HConnectionManager.getConnection(c);
List<HRegionInfo> originalTableRegions =
MetaTableAccessor.getTableRegions(UTIL.getZooKeeperWatcher(), connection,
desc.getTableName());
LOG.info("originalTableRegions size=" + originalTableRegions.size() +
"; " + originalTableRegions);
Admin admin = new HBaseAdmin(c);
admin.disableTable(desc.getTableName());
HMerge.merge(c, FileSystem.get(c), desc.getTableName());
List<HRegionInfo> postMergeTableRegions =
MetaTableAccessor.getTableRegions(UTIL.getZooKeeperWatcher(), connection,
desc.getTableName());
LOG.info("postMergeTableRegions size=" + postMergeTableRegions.size() +
"; " + postMergeTableRegions);
assertTrue("originalTableRegions=" + originalTableRegions.size() +
", postMergeTableRegions=" + postMergeTableRegions.size(),
postMergeTableRegions.size() < originalTableRegions.size());
LOG.info("Done with merge");
} finally {
UTIL.shutdownMiniCluster();
LOG.info("After cluster shutdown");
}
}
示例13: init
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
/**
* Instantiation method used by region servers
*
* @param conf configuration to use
* @param fs file system to use
* @param manager replication manager to ping to
* @param stopper the atomic boolean to use to stop the regionserver
* @param replicating the atomic boolean that starts/stops replication
* @param peerClusterZnode the name of our znode
* @throws IOException
*/
public void init(final Configuration conf,
final FileSystem fs,
final ReplicationSourceManager manager,
final Stoppable stopper,
final AtomicBoolean replicating,
final String peerClusterZnode)
throws IOException {
this.stopper = stopper;
this.conf = conf;
this.replicationQueueSizeCapacity =
this.conf.getLong("replication.source.size.capacity", 1024*1024*64);
this.replicationQueueNbCapacity =
this.conf.getInt("replication.source.nb.capacity", 25000);
this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 10);
this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier",
maxRetriesMultiplier * maxRetriesMultiplier);
this.queue =
new PriorityBlockingQueue<Path>(
conf.getInt("hbase.regionserver.maxlogs", 32),
new LogsComparator());
this.conn = HConnectionManager.getConnection(conf);
this.zkHelper = manager.getRepZkWrapper();
this.ratio = this.conf.getFloat("replication.source.ratio", 0.1f);
this.currentPeers = new ArrayList<ServerName>();
this.random = new Random();
this.replicating = replicating;
this.manager = manager;
this.sleepForRetries =
this.conf.getLong("replication.source.sleepforretries", 1000);
this.fs = fs;
this.metrics = new ReplicationSourceMetrics(peerClusterZnode);
this.repLogReader = new ReplicationHLogReaderManager(this.fs, this.conf);
try {
this.clusterId = zkHelper.getUUIDForCluster(zkHelper.getZookeeperWatcher());
} catch (KeeperException ke) {
throw new IOException("Could not read cluster id", ke);
}
// Finally look if this is a recovered queue
this.checkIfQueueRecovered(peerClusterZnode);
}
示例14: getMasterAdmin
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
@Override
public HMasterInterface getMasterAdmin() throws IOException {
HConnection conn = HConnectionManager.getConnection(conf);
return conn.getMaster();
}
示例15: testMasterOpsWhileSplitting
import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
@Test
public void testMasterOpsWhileSplitting() throws Exception {
byte[] tableName = Bytes.toBytes("TestSplit");
byte[] familyName = Bytes.toBytes("fam");
HTable ht = TEST_UTIL.createTable(tableName, familyName);
TEST_UTIL.loadTable(ht, familyName);
ht.close();
HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(0);
byte []firstRow = Bytes.toBytes("aaa");
byte []splitRow = Bytes.toBytes("lll");
byte []lastRow = Bytes.toBytes("zzz");
HConnection con = HConnectionManager
.getConnection(TEST_UTIL.getConfiguration());
// this will also cache the region
byte[] regionName = con.locateRegion(tableName, splitRow).getRegionInfo()
.getRegionName();
HRegion region = server.getRegion(regionName);
SplitTransaction split = new SplitTransaction(region, splitRow);
split.prepare();
// 1. phase I
PairOfSameType<HRegion> regions = split.createDaughters(server, server);
assertFalse(test(con, tableName, firstRow, server));
assertFalse(test(con, tableName, lastRow, server));
// passing null as services prevents final step
// 2, most of phase II
split.openDaughters(server, null, regions.getFirst(), regions.getSecond());
assertFalse(test(con, tableName, firstRow, server));
assertFalse(test(con, tableName, lastRow, server));
// 3. finish phase II
// note that this replicates some code from SplitTransaction
// 2nd daughter first
server.postOpenDeployTasks(regions.getSecond(), server.getCatalogTracker(), true);
// Add to online regions
server.addToOnlineRegions(regions.getSecond());
// THIS is the crucial point:
// the 2nd daughter was added, so querying before the split key should fail.
assertFalse(test(con, tableName, firstRow, server));
// past splitkey is ok.
assertTrue(test(con, tableName, lastRow, server));
// first daughter second
server.postOpenDeployTasks(regions.getFirst(), server.getCatalogTracker(), true);
// Add to online regions
server.addToOnlineRegions(regions.getFirst());
assertTrue(test(con, tableName, firstRow, server));
assertTrue(test(con, tableName, lastRow, server));
// 4. phase III
split.transitionZKNode(server, server, regions.getFirst(),
regions.getSecond());
assertTrue(test(con, tableName, firstRow, server));
assertTrue(test(con, tableName, lastRow, server));
}