本文整理汇总了Java中org.apache.hadoop.hbase.RegionLocations类的典型用法代码示例。如果您正苦于以下问题:Java RegionLocations类的具体用法?Java RegionLocations怎么用?Java RegionLocations使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
RegionLocations类属于org.apache.hadoop.hbase包,在下文中一共展示了RegionLocations类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getRegionServer
import org.apache.hadoop.hbase.RegionLocations; //导入依赖的package包/类
/**
* Returns the {@link ServerName} from catalog table {@link Result}
* where the region is transitioning. It should be the same as
* {@link HRegionInfo#getServerName(Result)} if the server is at OPEN state.
* @param r Result to pull the transitioning server name from
* @return A ServerName instance or {@link HRegionInfo#getServerName(Result)}
* if necessary fields not found or empty.
*/
static ServerName getRegionServer(final Result r, int replicaId) {
Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getServerNameColumn(replicaId));
if (cell == null || cell.getValueLength() == 0) {
RegionLocations locations = MetaTableAccessor.getRegionLocations(r);
if (locations != null) {
HRegionLocation location = locations.getRegionLocation(replicaId);
if (location != null) {
return location.getServerName();
}
}
return null;
}
return ServerName.parseServerName(Bytes.toString(cell.getValueArray(),
cell.getValueOffset(), cell.getValueLength()));
}
示例2: replicateUsingCallable
import org.apache.hadoop.hbase.RegionLocations; //导入依赖的package包/类
private void replicateUsingCallable(ClusterConnection connection, Queue<Entry> entries)
throws IOException, RuntimeException {
Entry entry;
while ((entry = entries.poll()) != null) {
byte[] row = entry.getEdit().getCells().get(0).getRow();
RegionLocations locations = connection.locateRegion(tableName, row, true, true);
RegionReplicaReplayCallable callable = new RegionReplicaReplayCallable(connection,
RpcControllerFactory.instantiate(connection.getConfiguration()),
table.getName(), locations.getRegionLocation(1),
locations.getRegionLocation(1).getRegionInfo(), row, Lists.newArrayList(entry),
new AtomicLong());
RpcRetryingCallerFactory factory = RpcRetryingCallerFactory.instantiate(
connection.getConfiguration());
factory.<ReplicateWALEntryResponse> newCaller().callWithRetries(callable, 10000);
}
}
示例3: testHBaseFsckWithFewerMetaReplicas
import org.apache.hadoop.hbase.RegionLocations; //导入依赖的package包/类
@Test
public void testHBaseFsckWithFewerMetaReplicas() throws Exception {
ClusterConnection c = (ClusterConnection)ConnectionFactory.createConnection(
TEST_UTIL.getConfiguration());
RegionLocations rl = c.locateRegion(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW,
false, false);
HBaseFsckRepair.closeRegionSilentlyAndWait(c,
rl.getRegionLocation(1).getServerName(), rl.getRegionLocation(1).getRegionInfo());
// check that problem exists
HBaseFsck hbck = doFsck(TEST_UTIL.getConfiguration(), false);
assertErrors(hbck, new ERROR_CODE[]{ERROR_CODE.UNKNOWN,ERROR_CODE.NO_META_REGION});
// fix the problem
hbck = doFsck(TEST_UTIL.getConfiguration(), true);
// run hbck again to make sure we don't see any errors
hbck = doFsck(TEST_UTIL.getConfiguration(), false);
assertErrors(hbck, new ERROR_CODE[]{});
}
示例4: testHBaseFsckWithFewerMetaReplicaZnodes
import org.apache.hadoop.hbase.RegionLocations; //导入依赖的package包/类
@Test
public void testHBaseFsckWithFewerMetaReplicaZnodes() throws Exception {
ClusterConnection c = (ClusterConnection)ConnectionFactory.createConnection(
TEST_UTIL.getConfiguration());
RegionLocations rl = c.locateRegion(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW,
false, false);
HBaseFsckRepair.closeRegionSilentlyAndWait(c,
rl.getRegionLocation(2).getServerName(), rl.getRegionLocation(2).getRegionInfo());
ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
ZKUtil.deleteNode(zkw, zkw.getZNodeForReplica(2));
// check that problem exists
HBaseFsck hbck = doFsck(TEST_UTIL.getConfiguration(), false);
assertErrors(hbck, new ERROR_CODE[]{ERROR_CODE.UNKNOWN,ERROR_CODE.NO_META_REGION});
// fix the problem
hbck = doFsck(TEST_UTIL.getConfiguration(), true);
// run hbck again to make sure we don't see any errors
hbck = doFsck(TEST_UTIL.getConfiguration(), false);
assertErrors(hbck, new ERROR_CODE[]{});
}
示例5: testShutdownOfReplicaHolder
import org.apache.hadoop.hbase.RegionLocations; //导入依赖的package包/类
@Test
public void testShutdownOfReplicaHolder() throws Exception {
// checks that the when the server holding meta replica is shut down, the meta replica
// can be recovered
RegionLocations rl = ConnectionManager.getConnectionInternal(TEST_UTIL.getConfiguration()).
locateRegion(TableName.META_TABLE_NAME, Bytes.toBytes(""), false, true);
HRegionLocation hrl = rl.getRegionLocation(1);
ServerName oldServer = hrl.getServerName();
TEST_UTIL.getHBaseClusterInterface().killRegionServer(oldServer);
int i = 0;
do {
LOG.debug("Waiting for the replica " + hrl.getRegionInfo() + " to come up");
Thread.sleep(30000); //wait for the detection/recovery
rl = ConnectionManager.getConnectionInternal(TEST_UTIL.getConfiguration()).
locateRegion(TableName.META_TABLE_NAME, Bytes.toBytes(""), false, true);
hrl = rl.getRegionLocation(1);
i++;
} while ((hrl == null || hrl.getServerName().equals(oldServer)) && i < 3);
assertTrue(i != 3);
}
示例6: testLocations
import org.apache.hadoop.hbase.RegionLocations; //导入依赖的package包/类
@Test
public void testLocations() throws Exception {
byte[] b1 = "testLocations".getBytes();
openRegion(hriSecondary);
ClusterConnection hc = (ClusterConnection) HTU.getHBaseAdmin().getConnection();
try {
hc.clearRegionCache();
RegionLocations rl = hc.locateRegion(table.getName(), b1, false, false);
Assert.assertEquals(2, rl.size());
rl = hc.locateRegion(table.getName(), b1, true, false);
Assert.assertEquals(2, rl.size());
hc.clearRegionCache();
rl = hc.locateRegion(table.getName(), b1, true, false);
Assert.assertEquals(2, rl.size());
rl = hc.locateRegion(table.getName(), b1, false, false);
Assert.assertEquals(2, rl.size());
} finally {
closeRegion(hriSecondary);
}
}
示例7: printLocations
import org.apache.hadoop.hbase.RegionLocations; //导入依赖的package包/类
private void printLocations(Result r) {
RegionLocations rl = null;
if (r == null) {
LOG.info("FAILED FOR null Result");
return;
}
LOG.info("FAILED FOR " + resultToString(r) + " Stale " + r.isStale());
if (r.getRow() == null) {
return;
}
try {
rl = ((ClusterConnection)connection).locateRegion(tableName, r.getRow(), true, true);
} catch (IOException e) {
LOG.warn("Couldn't get locations for row " + Bytes.toString(r.getRow()));
}
HRegionLocation locations[] = rl.getRegionLocations();
for (HRegionLocation h : locations) {
LOG.info("LOCATION " + h);
}
}
示例8: allTableRegions
import org.apache.hadoop.hbase.RegionLocations; //导入依赖的package包/类
/**
* Lists all of the table regions currently in META.
* @param connection
* @param tableName
* @return Map of all user-space regions to servers
* @throws IOException
*/
public static NavigableMap<HRegionInfo, ServerName> allTableRegions(
Connection connection, final TableName tableName) throws IOException {
final NavigableMap<HRegionInfo, ServerName> regions =
new TreeMap<HRegionInfo, ServerName>();
MetaScannerVisitor visitor = new TableMetaScannerVisitor(tableName) {
@Override
public boolean processRowInternal(Result result) throws IOException {
RegionLocations locations = MetaTableAccessor.getRegionLocations(result);
if (locations == null) return true;
for (HRegionLocation loc : locations.getRegionLocations()) {
if (loc != null) {
HRegionInfo regionInfo = loc.getRegionInfo();
regions.put(new UnmodifyableHRegionInfo(regionInfo), loc.getServerName());
}
}
return true;
}
};
metaScan(connection, visitor, tableName);
return regions;
}
示例9: listTableRegionLocations
import org.apache.hadoop.hbase.RegionLocations; //导入依赖的package包/类
/**
* Lists table regions and locations grouped by region range from META.
*/
public static List<RegionLocations> listTableRegionLocations(Configuration conf,
Connection connection, final TableName tableName) throws IOException {
final List<RegionLocations> regions = new ArrayList<RegionLocations>();
MetaScannerVisitor visitor = new TableMetaScannerVisitor(tableName) {
@Override
public boolean processRowInternal(Result result) throws IOException {
RegionLocations locations = MetaTableAccessor.getRegionLocations(result);
if (locations == null) return true;
regions.add(locations);
return true;
}
};
metaScan(connection, visitor, tableName);
return regions;
}
示例10: prepare
import org.apache.hadoop.hbase.RegionLocations; //导入依赖的package包/类
/**
* Two responsibilities
* - if the call is already completed (by another replica) stops the retries.
* - set the location to the right region, depending on the replica.
*/
@Override
public void prepare(final boolean reload) throws IOException {
if (controller.isCanceled()) return;
if (Thread.interrupted()) {
throw new InterruptedIOException();
}
if (reload || location == null) {
RegionLocations rl = getRegionLocations(false, id, cConnection, tableName, get.getRow());
location = id < rl.size() ? rl.getRegionLocation(id) : null;
}
if (location == null || location.getServerName() == null) {
// With this exception, there will be a retry. The location can be null for a replica
// when the table is created or after a split.
throw new HBaseIOException("There is no location for replica id #" + id);
}
ServerName dest = location.getServerName();
setStub(cConnection.getClient(dest));
}
示例11: cacheLocation
import org.apache.hadoop.hbase.RegionLocations; //导入依赖的package包/类
/**
* Put a newly discovered HRegionLocation into the cache.
* @param tableName The table name.
* @param locations the new locations
*/
public void cacheLocation(final TableName tableName, final RegionLocations locations) {
byte [] startKey = locations.getRegionLocation().getRegionInfo().getStartKey();
ConcurrentMap<byte[], RegionLocations> tableLocations = getTableLocations(tableName);
RegionLocations oldLocation = tableLocations.putIfAbsent(startKey, locations);
boolean isNewCacheEntry = (oldLocation == null);
if (isNewCacheEntry) {
if (LOG.isTraceEnabled()) {
LOG.trace("Cached location: " + locations);
}
addToCachedServers(locations);
return;
}
// merge old and new locations and add it to the cache
// Meta record might be stale - some (probably the same) server has closed the region
// with later seqNum and told us about the new location.
RegionLocations mergedLocation = oldLocation.mergeLocations(locations);
boolean replaced = tableLocations.replace(startKey, oldLocation, mergedLocation);
if (replaced && LOG.isTraceEnabled()) {
LOG.trace("Merged cached locations: " + mergedLocation);
}
addToCachedServers(locations);
}
示例12: getTableLocations
import org.apache.hadoop.hbase.RegionLocations; //导入依赖的package包/类
/**
* @param tableName
* @return Map of cached locations for passed <code>tableName</code>
*/
private ConcurrentNavigableMap<byte[], RegionLocations>
getTableLocations(final TableName tableName) {
// find the map of cached locations for this table
ConcurrentNavigableMap<byte[], RegionLocations> result;
result = this.cachedRegionLocations.get(tableName);
// if tableLocations for this table isn't built yet, make one
if (result == null) {
result = new CopyOnWriteArrayMap<>(Bytes.BYTES_COMPARATOR);
ConcurrentNavigableMap<byte[], RegionLocations> old =
this.cachedRegionLocations.putIfAbsent(tableName, result);
if (old != null) {
return old;
}
}
return result;
}
示例13: clearCache
import org.apache.hadoop.hbase.RegionLocations; //导入依赖的package包/类
/**
* Delete a cached location, no matter what it is. Called when we were told to not use cache.
* @param tableName tableName
* @param row
*/
public void clearCache(final TableName tableName, final byte [] row, int replicaId) {
ConcurrentMap<byte[], RegionLocations> tableLocations = getTableLocations(tableName);
boolean removed = false;
RegionLocations regionLocations = getCachedLocation(tableName, row);
if (regionLocations != null) {
HRegionLocation toBeRemoved = regionLocations.getRegionLocation(replicaId);
RegionLocations updatedLocations = regionLocations.remove(replicaId);
if (updatedLocations != regionLocations) {
byte[] startKey = regionLocations.getRegionLocation().getRegionInfo().getStartKey();
if (updatedLocations.isEmpty()) {
removed = tableLocations.remove(startKey, regionLocations);
} else {
removed = tableLocations.replace(startKey, regionLocations, updatedLocations);
}
}
if (removed && LOG.isTraceEnabled() && toBeRemoved != null) {
LOG.trace("Removed " + toBeRemoved + " from cache");
}
}
}
示例14: addCallsForOtherReplicas
import org.apache.hadoop.hbase.RegionLocations; //导入依赖的package包/类
private void addCallsForOtherReplicas(
ResultBoundedCompletionService<Pair<Result[], ScannerCallable>> cs, RegionLocations rl,
int min, int max) {
if (scan.getConsistency() == Consistency.STRONG) {
return; // not scheduling on other replicas for strong consistency
}
for (int id = min; id <= max; id++) {
if (currentScannerCallable.id == id) {
continue; //this was already scheduled earlier
}
ScannerCallable s = currentScannerCallable.getScannerCallableForReplica(id);
setStartRowForReplicaCallable(s);
outstandingCallables.add(s);
RetryingRPC retryingOnReplica = new RetryingRPC(s);
cs.submit(retryingOnReplica, scannerTimeout, id);
}
}
示例15: locateRegions
import org.apache.hadoop.hbase.RegionLocations; //导入依赖的package包/类
@Override
public List<HRegionLocation> locateRegions(final TableName tableName,
final boolean useCache, final boolean offlined) throws IOException {
NavigableMap<HRegionInfo, ServerName> regions = MetaScanner.allTableRegions(this, tableName);
final List<HRegionLocation> locations = new ArrayList<HRegionLocation>();
for (HRegionInfo regionInfo : regions.keySet()) {
RegionLocations list = locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
if (list != null) {
for (HRegionLocation loc : list.getRegionLocations()) {
if (loc != null) {
locations.add(loc);
}
}
}
}
return locations;
}