本文整理汇总了Java中org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation类的典型用法代码示例。如果您正苦于以下问题:Java HConnectionImplementation类的具体用法?Java HConnectionImplementation怎么用?Java HConnectionImplementation使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
HConnectionImplementation类属于org.apache.hadoop.hbase.client.HConnectionManager包,在下文中一共展示了HConnectionImplementation类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: abortingHConnectionRemovesItselfFromHCM
import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation; //导入依赖的package包/类
@Test
public void abortingHConnectionRemovesItselfFromHCM() throws Exception {
// Save off current HConnections
Map<HConnectionKey, HConnectionImplementation> oldHBaseInstances =
new HashMap<HConnectionKey, HConnectionImplementation>();
oldHBaseInstances.putAll(HConnectionManager.HBASE_INSTANCES);
HConnectionManager.HBASE_INSTANCES.clear();
try {
HConnection connection = HConnectionManager.getConnection(TEST_UTIL.getConfiguration());
connection.abort("test abortingHConnectionRemovesItselfFromHCM", new Exception(
"test abortingHConnectionRemovesItselfFromHCM"));
Assert.assertNotSame(connection,
HConnectionManager.getConnection(TEST_UTIL.getConfiguration()));
} finally {
// Put original HConnections back
HConnectionManager.HBASE_INSTANCES.clear();
HConnectionManager.HBASE_INSTANCES.putAll(oldHBaseInstances);
}
}
示例2: testRegionCaching
import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation; //导入依赖的package包/类
/**
* Test that when we delete a location using the first row of a region
* that we really delete it.
* @throws Exception
*/
@Test
public void testRegionCaching() throws Exception{
HTable table = TEST_UTIL.createTable(TABLE_NAME, FAM_NAM);
TEST_UTIL.createMultiRegions(table, FAM_NAM);
Put put = new Put(ROW);
put.add(FAM_NAM, ROW, ROW);
table.put(put);
HConnectionManager.HConnectionImplementation conn =
(HConnectionManager.HConnectionImplementation)table.getConnection();
assertNotNull(conn.getCachedLocation(TABLE_NAME, ROW));
conn.deleteCachedLocation(TABLE_NAME, ROW);
HRegionLocation rl = conn.getCachedLocation(TABLE_NAME, ROW);
assertNull("What is this location?? " + rl, rl);
table.close();
}
示例3: abortingHConnectionRemovesItselfFromHCM
import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation; //导入依赖的package包/类
@Test
public void abortingHConnectionRemovesItselfFromHCM() throws Exception {
// Save off current HConnections
Map<HConnectionKey, HConnectionImplementation> oldHBaseInstances =
new HashMap<HConnectionKey, HConnectionImplementation>();
oldHBaseInstances.putAll(HConnectionManager.CONNECTION_INSTANCES);
HConnectionManager.CONNECTION_INSTANCES.clear();
try {
HConnection connection = HConnectionManager.getConnection(TEST_UTIL.getConfiguration());
connection.abort("test abortingHConnectionRemovesItselfFromHCM", new Exception(
"test abortingHConnectionRemovesItselfFromHCM"));
Assert.assertNotSame(connection,
HConnectionManager.getConnection(TEST_UTIL.getConfiguration()));
} finally {
// Put original HConnections back
HConnectionManager.CONNECTION_INSTANCES.clear();
HConnectionManager.CONNECTION_INSTANCES.putAll(oldHBaseInstances);
}
}
示例4: getMockedConnection
import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation; //导入依赖的package包/类
/**
* Get a Mocked {@link HConnection} that goes with the passed <code>conf</code>
* configuration instance. Minimally the mock will return
* <code>conf</conf> when {@link HConnection#getConfiguration()} is invoked.
* Be sure to shutdown the connection when done by calling
* {@link HConnectionManager#deleteConnection(Configuration, boolean)} else it
* will stick around; this is probably not what you want.
* @param conf configuration
* @return HConnection object for <code>conf</code>
* @throws ZooKeeperConnectionException
*/
public static HConnection getMockedConnection(final Configuration conf)
throws ZooKeeperConnectionException {
HConnectionKey connectionKey = new HConnectionKey(conf);
synchronized (HConnectionManager.HBASE_INSTANCES) {
HConnectionImplementation connection =
HConnectionManager.HBASE_INSTANCES.get(connectionKey);
if (connection == null) {
connection = Mockito.mock(HConnectionImplementation.class);
Mockito.when(connection.getConfiguration()).thenReturn(conf);
HConnectionManager.HBASE_INSTANCES.put(connectionKey, connection);
}
return connection;
}
}
示例5: getSpiedConnection
import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation; //导入依赖的package包/类
/**
* Get a Mockito spied-upon {@link HConnection} that goes with the passed
* <code>conf</code> configuration instance.
* Be sure to shutdown the connection when done by calling
* {@link HConnectionManager#deleteConnection(Configuration, boolean)} else it
* will stick around; this is probably not what you want.
* @param conf configuration
* @return HConnection object for <code>conf</code>
* @throws ZooKeeperConnectionException
* @see http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)
*/
public static HConnection getSpiedConnection(final Configuration conf)
throws ZooKeeperConnectionException {
HConnectionKey connectionKey = new HConnectionKey(conf);
synchronized (HConnectionManager.HBASE_INSTANCES) {
HConnectionImplementation connection =
HConnectionManager.HBASE_INSTANCES.get(connectionKey);
if (connection == null) {
connection = Mockito.spy(new HConnectionImplementation(conf, true, null));
HConnectionManager.HBASE_INSTANCES.put(connectionKey, connection);
}
return connection;
}
}
示例6: setNumTries
import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation; //导入依赖的package包/类
private int setNumTries(HConnectionImplementation hci, int newVal) throws Exception {
Field numTries = hci.getClass().getDeclaredField("numTries");
numTries.setAccessible(true);
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
modifiersField.setInt(numTries, numTries.getModifiers() & ~Modifier.FINAL);
final int prevNumRetriesVal = (Integer)numTries.get(hci);
numTries.set(hci, newVal);
return prevNumRetriesVal;
}
示例7: getMockedConnection
import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation; //导入依赖的package包/类
/**
* Get a Mocked {@link HConnection} that goes with the passed <code>conf</code>
* configuration instance. Minimally the mock will return
* <code>conf</conf> when {@link HConnection#getConfiguration()} is invoked.
* Be sure to shutdown the connection when done by calling
* {@link HConnectionManager#deleteConnection(Configuration)} else it
* will stick around; this is probably not what you want.
* @param conf configuration
* @return HConnection object for <code>conf</code>
* @throws ZooKeeperConnectionException
*/
public static HConnection getMockedConnection(final Configuration conf)
throws ZooKeeperConnectionException {
HConnectionKey connectionKey = new HConnectionKey(conf);
synchronized (HConnectionManager.CONNECTION_INSTANCES) {
HConnectionImplementation connection =
HConnectionManager.CONNECTION_INSTANCES.get(connectionKey);
if (connection == null) {
connection = Mockito.mock(HConnectionImplementation.class);
Mockito.when(connection.getConfiguration()).thenReturn(conf);
HConnectionManager.CONNECTION_INSTANCES.put(connectionKey, connection);
}
return connection;
}
}
示例8: getSpiedConnection
import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation; //导入依赖的package包/类
/**
* Get a Mockito spied-upon {@link HConnection} that goes with the passed
* <code>conf</code> configuration instance.
* Be sure to shutdown the connection when done by calling
* {@link HConnectionManager#deleteConnection(Configuration)} else it
* will stick around; this is probably not what you want.
* @param conf configuration
* @return HConnection object for <code>conf</code>
* @throws ZooKeeperConnectionException
* @see @link
* {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)}
*/
public static HConnection getSpiedConnection(final Configuration conf)
throws IOException {
HConnectionKey connectionKey = new HConnectionKey(conf);
synchronized (HConnectionManager.CONNECTION_INSTANCES) {
HConnectionImplementation connection =
HConnectionManager.CONNECTION_INSTANCES.get(connectionKey);
if (connection == null) {
connection = Mockito.spy(new HConnectionImplementation(conf, true));
HConnectionManager.CONNECTION_INSTANCES.put(connectionKey, connection);
}
return connection;
}
}
示例9: getSpiedConnection
import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation; //导入依赖的package包/类
/**
* Get a Mockito spied-upon {@link HConnection} that goes with the passed
* <code>conf</code> configuration instance.
* Be sure to shutdown the connection when done by calling
* {@link HConnectionManager#deleteConnection(Configuration, boolean)} else it
* will stick around; this is probably not what you want.
* @param conf configuration
* @return HConnection object for <code>conf</code>
* @throws ZooKeeperConnectionException
* @see http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)
*/
public static HConnection getSpiedConnection(final Configuration conf)
throws ZooKeeperConnectionException {
HConnectionKey connectionKey = new HConnectionKey(conf);
synchronized (HConnectionManager.HBASE_INSTANCES) {
HConnectionImplementation connection =
HConnectionManager.HBASE_INSTANCES.get(connectionKey);
if (connection == null) {
connection = Mockito.spy(new HConnectionImplementation(conf, true));
HConnectionManager.HBASE_INSTANCES.put(connectionKey, connection);
}
return connection;
}
}
示例10: testClusterConnection
import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation; //导入依赖的package包/类
@Test
public void testClusterConnection() throws IOException {
ThreadPoolExecutor otherPool = new ThreadPoolExecutor(1, 1, 5, TimeUnit.SECONDS,
new SynchronousQueue<Runnable>(), Threads.newDaemonThreadFactory("test-hcm"));
HConnection con1 = HConnectionManager.createConnection(TEST_UTIL.getConfiguration());
HConnection con2 = HConnectionManager.createConnection(TEST_UTIL.getConfiguration(), otherPool);
// make sure the internally created ExecutorService is the one passed
assertTrue(otherPool == ((HConnectionImplementation) con2).getCurrentBatchPool());
String tableName = "testClusterConnection";
TEST_UTIL.createTable(tableName.getBytes(), FAM_NAM).close();
HTable t = (HTable) con1.getTable(tableName, otherPool);
// make sure passing a pool to the getTable does not trigger creation of an
// internal pool
assertNull("Internal Thread pool should be null",
((HConnectionImplementation) con1).getCurrentBatchPool());
// table should use the pool passed
assertTrue(otherPool == t.getPool());
t.close();
t = (HTable) con2.getTable(tableName);
// table should use the connectin's internal pool
assertTrue(otherPool == t.getPool());
t.close();
t = (HTable) con2.getTable(Bytes.toBytes(tableName));
// try other API too
assertTrue(otherPool == t.getPool());
t.close();
t = (HTable) con1.getTable(tableName);
ExecutorService pool = ((HConnectionImplementation) con1).getCurrentBatchPool();
// make sure an internal pool was created
assertNotNull("An internal Thread pool should have been created", pool);
// and that the table is using it
assertTrue(t.getPool() == pool);
t.close();
t = (HTable) con1.getTable(tableName);
// still using the *same* internal pool
assertTrue(t.getPool() == pool);
t.close();
con1.close();
// if the pool was created on demand it should be closed upon connectin
// close
assertTrue(pool.isShutdown());
con2.close();
// if the pool is passed, it is not closed
assertFalse(otherPool.isShutdown());
otherPool.shutdownNow();
}
示例11: testClusterConnection
import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation; //导入依赖的package包/类
@Test
public void testClusterConnection() throws IOException {
ThreadPoolExecutor otherPool = new ThreadPoolExecutor(1, 1,
5, TimeUnit.SECONDS,
new SynchronousQueue<Runnable>(),
Threads.newDaemonThreadFactory("test-hcm"));
HConnection con1 = HConnectionManager.createConnection(TEST_UTIL.getConfiguration());
HConnection con2 = HConnectionManager.createConnection(TEST_UTIL.getConfiguration(), otherPool);
// make sure the internally created ExecutorService is the one passed
assertTrue(otherPool == ((HConnectionImplementation)con2).getCurrentBatchPool());
String tableName = "testClusterConnection";
TEST_UTIL.createTable(tableName.getBytes(), FAM_NAM).close();
HTable t = (HTable)con1.getTable(tableName, otherPool);
// make sure passing a pool to the getTable does not trigger creation of an internal pool
assertNull("Internal Thread pool should be null", ((HConnectionImplementation)con1).getCurrentBatchPool());
// table should use the pool passed
assertTrue(otherPool == t.getPool());
t.close();
t = (HTable)con2.getTable(tableName);
// table should use the connectin's internal pool
assertTrue(otherPool == t.getPool());
t.close();
t = (HTable)con2.getTable(Bytes.toBytes(tableName));
// try other API too
assertTrue(otherPool == t.getPool());
t.close();
t = (HTable)con2.getTable(TableName.valueOf(tableName));
// try other API too
assertTrue(otherPool == t.getPool());
t.close();
t = (HTable)con1.getTable(tableName);
ExecutorService pool = ((HConnectionImplementation)con1).getCurrentBatchPool();
// make sure an internal pool was created
assertNotNull("An internal Thread pool should have been created", pool);
// and that the table is using it
assertTrue(t.getPool() == pool);
t.close();
t = (HTable)con1.getTable(tableName);
// still using the *same* internal pool
assertTrue(t.getPool() == pool);
t.close();
con1.close();
// if the pool was created on demand it should be closed upon connectin close
assertTrue(pool.isShutdown());
con2.close();
// if the pool is passed, it is not closed
assertFalse(otherPool.isShutdown());
otherPool.shutdownNow();
}
示例12: testClusterStatus
import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation; //导入依赖的package包/类
@Ignore ("Fails in IDEs: HBASE-9042") @Test(expected = RegionServerStoppedException.class)
public void testClusterStatus() throws Exception {
TableName tn =
TableName.valueOf("testClusterStatus");
byte[] cf = "cf".getBytes();
byte[] rk = "rk1".getBytes();
JVMClusterUtil.RegionServerThread rs = TEST_UTIL.getHBaseCluster().startRegionServer();
rs.waitForServerOnline();
final ServerName sn = rs.getRegionServer().getServerName();
HTable t = TEST_UTIL.createTable(tn, cf);
TEST_UTIL.waitTableAvailable(tn.getName());
while(TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().
getRegionStates().isRegionsInTransition()){
Thread.sleep(1);
}
final HConnectionImplementation hci = (HConnectionImplementation)t.getConnection();
while (t.getRegionLocation(rk).getPort() != sn.getPort()){
TEST_UTIL.getHBaseAdmin().move(t.getRegionLocation(rk).getRegionInfo().
getEncodedNameAsBytes(), Bytes.toBytes(sn.toString()));
while(TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().
getRegionStates().isRegionsInTransition()){
Thread.sleep(1);
}
hci.clearRegionCache(tn);
}
Assert.assertNotNull(hci.clusterStatusListener);
TEST_UTIL.assertRegionOnServer(t.getRegionLocation(rk).getRegionInfo(), sn, 20000);
Put p1 = new Put(rk);
p1.add(cf, "qual".getBytes(), "val".getBytes());
t.put(p1);
rs.getRegionServer().abort("I'm dead");
// We want the status to be updated. That's a least 10 second
TEST_UTIL.waitFor(40000, 1000, true, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return TEST_UTIL.getHBaseCluster().getMaster().getServerManager().
getDeadServers().isDeadServer(sn);
}
});
TEST_UTIL.waitFor(40000, 1000, true, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return hci.clusterStatusListener.isDeadServer(sn);
}
});
t.close();
hci.getClient(sn); // will throw an exception: RegionServerStoppedException
}
示例13: testConnectionCut
import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation; //导入依赖的package包/类
/**
* Test that the connection to the dead server is cut immediately when we receive the
* notification.
* @throws Exception
*/
@Test
public void testConnectionCut() throws Exception {
String tableName = "HCM-testConnectionCut";
TEST_UTIL.createTable(tableName.getBytes(), FAM_NAM).close();
boolean previousBalance = TEST_UTIL.getHBaseAdmin().setBalancerRunning(false, true);
Configuration c2 = new Configuration(TEST_UTIL.getConfiguration());
// We want to work on a separate connection.
c2.set(HConstants.HBASE_CLIENT_INSTANCE_ID, String.valueOf(-1));
c2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
c2.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 30 * 1000);
HTable table = new HTable(c2, tableName);
Put p = new Put(FAM_NAM);
p.add(FAM_NAM, FAM_NAM, FAM_NAM);
table.put(p);
final HConnectionImplementation hci = (HConnectionImplementation)table.getConnection();
final HRegionLocation loc = table.getRegionLocation(FAM_NAM);
Get get = new Get(FAM_NAM);
Assert.assertNotNull(table.get(get));
get = new Get(FAM_NAM);
get.setFilter(new BlockingFilter());
// This thread will mark the server as dead while we're waiting during a get.
Thread t = new Thread() {
@Override
public void run() {
synchronized (syncBlockingFilter) {
try {
syncBlockingFilter.wait();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
hci.clusterStatusListener.deadServerHandler.newDead(loc.getServerName());
}
};
t.start();
try {
table.get(get);
Assert.fail();
} catch (IOException expected) {
LOG.debug("Received: " + expected);
Assert.assertFalse(expected instanceof SocketTimeoutException);
Assert.assertFalse(syncBlockingFilter.get());
} finally {
syncBlockingFilter.set(true);
t.join();
HConnectionManager.getConnection(c2).close();
TEST_UTIL.getHBaseAdmin().setBalancerRunning(previousBalance, true);
}
table.close();
}
示例14: testCacheSeqNums
import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation; //导入依赖的package包/类
/**
* Test that stale cache updates don't override newer cached values.
*/
@Test(timeout = 60000)
public void testCacheSeqNums() throws Exception{
HTable table = TEST_UTIL.createTable(TABLE_NAME2, FAM_NAM);
TEST_UTIL.createMultiRegions(table, FAM_NAM);
Put put = new Put(ROW);
put.add(FAM_NAM, ROW, ROW);
table.put(put);
HConnectionManager.HConnectionImplementation conn =
(HConnectionManager.HConnectionImplementation)table.getConnection();
HRegionLocation location = conn.getCachedLocation(TABLE_NAME2, ROW);
assertNotNull(location);
HRegionLocation anySource = new HRegionLocation(location.getRegionInfo(), ServerName.valueOf(
location.getHostname(), location.getPort() - 1, 0L));
// Same server as already in cache reporting - overwrites any value despite seqNum.
int nextPort = location.getPort() + 1;
conn.updateCachedLocation(location.getRegionInfo(), location,
ServerName.valueOf("127.0.0.1", nextPort, 0), location.getSeqNum() - 1);
location = conn.getCachedLocation(TABLE_NAME2, ROW);
Assert.assertEquals(nextPort, location.getPort());
// No source specified - same.
nextPort = location.getPort() + 1;
conn.updateCachedLocation(location.getRegionInfo(), location,
ServerName.valueOf("127.0.0.1", nextPort, 0), location.getSeqNum() - 1);
location = conn.getCachedLocation(TABLE_NAME2, ROW);
Assert.assertEquals(nextPort, location.getPort());
// Higher seqNum - overwrites lower seqNum.
nextPort = location.getPort() + 1;
conn.updateCachedLocation(location.getRegionInfo(), anySource,
ServerName.valueOf("127.0.0.1", nextPort, 0), location.getSeqNum() + 1);
location = conn.getCachedLocation(TABLE_NAME2, ROW);
Assert.assertEquals(nextPort, location.getPort());
// Lower seqNum - does not overwrite higher seqNum.
nextPort = location.getPort() + 1;
conn.updateCachedLocation(location.getRegionInfo(), anySource,
ServerName.valueOf("127.0.0.1", nextPort, 0), location.getSeqNum() - 1);
location = conn.getCachedLocation(TABLE_NAME2, ROW);
Assert.assertEquals(nextPort - 1, location.getPort());
table.close();
}