本文整理匯總了Java中org.apache.hadoop.hbase.HRegionInfo.getStartKey方法的典型用法代碼示例。如果您正苦於以下問題:Java HRegionInfo.getStartKey方法的具體用法?Java HRegionInfo.getStartKey怎麽用?Java HRegionInfo.getStartKey使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.HRegionInfo
的用法示例。
在下文中一共展示了HRegionInfo.getStartKey方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: prepare
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
* Does checks on split inputs.
* @return <code>true</code> if the region is splittable else
* <code>false</code> if it is not (e.g. its already closed, etc.).
*/
public boolean prepare() throws IOException {
if (!this.parent.isSplittable()) return false;
// Split key can be null if this region is unsplittable; i.e. has refs.
if (this.splitrow == null) return false;
HRegionInfo hri = this.parent.getRegionInfo();
parent.prepareToSplit();
// Check splitrow.
byte [] startKey = hri.getStartKey();
byte [] endKey = hri.getEndKey();
if (Bytes.equals(startKey, splitrow) ||
!this.parent.getRegionInfo().containsRow(splitrow)) {
LOG.info("Split row is not inside region key range or is equal to " +
"startkey: " + Bytes.toStringBinary(this.splitrow));
return false;
}
long rid = getDaughterRegionIdTimestamp(hri);
this.hri_a = new HRegionInfo(hri.getTable(), startKey, this.splitrow, false, rid);
this.hri_b = new HRegionInfo(hri.getTable(), this.splitrow, endKey, false, rid);
transition(SplitTransactionPhase.PREPARED);
return true;
}
示例2: validateFromSnapshotFromMeta
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
private void validateFromSnapshotFromMeta(HBaseTestingUtility util, TableName table,
int numRegions, int numReplica, Connection connection) throws IOException {
SnapshotOfRegionAssignmentFromMeta snapshot = new SnapshotOfRegionAssignmentFromMeta(
connection);
snapshot.initialize();
Map<HRegionInfo, ServerName> regionToServerMap = snapshot.getRegionToRegionServerMap();
assert(regionToServerMap.size() == numRegions * numReplica + 1); //'1' for the namespace
Map<ServerName, List<HRegionInfo>> serverToRegionMap = snapshot.getRegionServerToRegionMap();
for (Map.Entry<ServerName, List<HRegionInfo>> entry : serverToRegionMap.entrySet()) {
if (entry.getKey().equals(util.getHBaseCluster().getMaster().getServerName())) {
continue;
}
List<HRegionInfo> regions = entry.getValue();
Set<byte[]> setOfStartKeys = new HashSet<byte[]>();
for (HRegionInfo region : regions) {
byte[] startKey = region.getStartKey();
if (region.getTable().equals(table)) {
setOfStartKeys.add(startKey); //ignore other tables
LOG.info("--STARTKEY " + new String(startKey)+"--");
}
}
// the number of startkeys will be equal to the number of regions hosted in each server
// (each server will be hosting one replica of a region)
assertEquals(numRegions, setOfStartKeys.size());
}
}
示例3: verifyBounds
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
private void verifyBounds(List<byte[]> expectedBounds, TableName tableName)
throws Exception {
// Get region boundaries from the cluster and verify their endpoints
final Configuration conf = UTIL.getConfiguration();
final int numRegions = expectedBounds.size()-1;
final HTable hTable = new HTable(conf, tableName);
final Map<HRegionInfo, ServerName> regionInfoMap = hTable.getRegionLocations();
assertEquals(numRegions, regionInfoMap.size());
for (Map.Entry<HRegionInfo, ServerName> entry: regionInfoMap.entrySet()) {
final HRegionInfo regionInfo = entry.getKey();
byte[] regionStart = regionInfo.getStartKey();
byte[] regionEnd = regionInfo.getEndKey();
// This region's start key should be one of the region boundaries
int startBoundaryIndex = indexOfBytes(expectedBounds, regionStart);
assertNotSame(-1, startBoundaryIndex);
// This region's end key should be the region boundary that comes
// after the starting boundary.
byte[] expectedRegionEnd = expectedBounds.get(
startBoundaryIndex+1);
assertEquals(0, Bytes.compareTo(regionEnd, expectedRegionEnd));
}
hTable.close();
}
示例4: verifyTableRegions
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
void verifyTableRegions(Set<HRegionInfo> regions) {
log("Verifying " + regions.size() + " regions: " + regions);
byte[][] startKeys = new byte[regions.size()][];
byte[][] endKeys = new byte[regions.size()][];
int i=0;
for (HRegionInfo region : regions) {
startKeys[i] = region.getStartKey();
endKeys[i] = region.getEndKey();
i++;
}
Pair<byte[][], byte[][]> keys = new Pair<byte[][], byte[][]>(startKeys, endKeys);
verifyStartEndKeys(keys);
}
示例5: blockUntilRegionIsOpened
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
public static void blockUntilRegionIsOpened(Configuration conf, long timeout, HRegionInfo hri)
throws IOException, InterruptedException {
log("blocking until region is opened for reading:" + hri.getRegionNameAsString());
long start = System.currentTimeMillis();
try (Connection conn = ConnectionFactory.createConnection(conf);
Table table = conn.getTable(hri.getTable())) {
byte[] row = hri.getStartKey();
// Check for null/empty row. If we find one, use a key that is likely to be in first region.
if (row == null || row.length <= 0) row = new byte[] { '0' };
Get get = new Get(row);
while (System.currentTimeMillis() - start < timeout) {
try {
table.get(get);
break;
} catch (IOException ex) {
// wait some more
}
Threads.sleep(10);
}
}
}
示例6: cloneRegionInfo
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
public static HRegionInfo cloneRegionInfo(TableName tableName, HRegionInfo snapshotRegionInfo) {
HRegionInfo regionInfo = new HRegionInfo(tableName,
snapshotRegionInfo.getStartKey(), snapshotRegionInfo.getEndKey(),
snapshotRegionInfo.isSplit(), snapshotRegionInfo.getRegionId());
regionInfo.setOffline(snapshotRegionInfo.isOffline());
return regionInfo;
}
示例7: getRowForRegion
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
public static byte[] getRowForRegion(HRegionInfo hri) {
byte[] startKey = hri.getStartKey();
if (startKey.length == 0) {
// empty row key is not allowed in mutations because it is both the start key and the end key
// we return the smallest byte[] that is bigger (in lex comparison) than byte[0].
return new byte[] {0};
}
return startKey;
}
示例8: getMergedRegionInfo
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
* Get merged region info through the specified two regions
* @param a merging region A
* @param b merging region B
* @return the merged region info
*/
public static HRegionInfo getMergedRegionInfo(final HRegionInfo a,
final HRegionInfo b) {
long rid = EnvironmentEdgeManager.currentTime();
// Regionid is timestamp. Merged region's id can't be less than that of
// merging regions else will insert at wrong location in hbase:meta
if (rid < a.getRegionId() || rid < b.getRegionId()) {
LOG.warn("Clock skew; merging regions id are " + a.getRegionId()
+ " and " + b.getRegionId() + ", but current time here is " + rid);
rid = Math.max(a.getRegionId(), b.getRegionId()) + 1;
}
byte[] startKey = null;
byte[] endKey = null;
// Choose the smaller as start key
if (a.compareTo(b) <= 0) {
startKey = a.getStartKey();
} else {
startKey = b.getStartKey();
}
// Choose the bigger as end key
if (Bytes.equals(a.getEndKey(), HConstants.EMPTY_BYTE_ARRAY)
|| (!Bytes.equals(b.getEndKey(), HConstants.EMPTY_BYTE_ARRAY)
&& Bytes.compareTo(a.getEndKey(), b.getEndKey()) > 0)) {
endKey = a.getEndKey();
} else {
endKey = b.getEndKey();
}
// Merged region is sorted between two merging regions in META
HRegionInfo mergedRegionInfo = new HRegionInfo(a.getTable(), startKey,
endKey, false, rid);
return mergedRegionInfo;
}
示例9: rowIsInRange
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
public static boolean rowIsInRange(HRegionInfo info, final byte[] row, final int offset,
final short length) {
return ((info.getStartKey().length == 0) || (
Bytes.compareTo(info.getStartKey(), 0, info.getStartKey().length, row, offset, length)
<= 0)) && ((info.getEndKey().length == 0) || (
Bytes.compareTo(info.getEndKey(), 0, info.getEndKey().length, row, offset, length) > 0));
}
示例10: split
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
@VisibleForTesting
public void split(final ServerName sn, final HRegionInfo hri,
byte[] splitPoint) throws IOException {
if (hri.getStartKey() != null && splitPoint != null &&
Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) {
throw new IOException("should not give a splitkey which equals to startkey!");
}
PayloadCarryingRpcController controller = rpcControllerFactory.newController();
controller.setPriority(hri.getTable());
// TODO: this does not do retries, it should. Set priority and timeout in controller
AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
ProtobufUtil.split(controller, admin, hri, splitPoint);
}
示例11: compareRegionInfosWithoutReplicaId
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
private static int compareRegionInfosWithoutReplicaId(HRegionInfo regionInfoA,
HRegionInfo regionInfoB) {
int result = regionInfoA.getTable().compareTo(regionInfoB.getTable());
if (result != 0) {
return result;
}
// Compare start keys.
result = Bytes.compareTo(regionInfoA.getStartKey(), regionInfoB.getStartKey());
if (result != 0) {
return result;
}
// Compare end keys.
result = Bytes.compareTo(regionInfoA.getEndKey(), regionInfoB.getEndKey());
if (result != 0) {
if (regionInfoA.getStartKey().length != 0
&& regionInfoA.getEndKey().length == 0) {
return 1; // this is last region
}
if (regionInfoB.getStartKey().length != 0
&& regionInfoB.getEndKey().length == 0) {
return -1; // o is the last region
}
return result;
}
// regionId is usually milli timestamp -- this defines older stamps
// to be "smaller" than newer stamps in sort order.
if (regionInfoA.getRegionId() > regionInfoB.getRegionId()) {
return 1;
} else if (regionInfoA.getRegionId() < regionInfoB.getRegionId()) {
return -1;
}
return 0;
}
示例12: getStartEndKeys
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
@VisibleForTesting
Pair<byte[][], byte[][]> getStartEndKeys(List<RegionLocations> regions) {
final byte[][] startKeyList = new byte[regions.size()][];
final byte[][] endKeyList = new byte[regions.size()][];
for (int i = 0; i < regions.size(); i++) {
HRegionInfo region = regions.get(i).getRegionLocation().getRegionInfo();
startKeyList[i] = region.getStartKey();
endKeyList[i] = region.getEndKey();
}
return new Pair<>(startKeyList, endKeyList);
}
示例13: checkTableInfo
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
void checkTableInfo(TableInfoModel model) {
assertEquals(model.getName(), TABLE.getNameAsString());
Iterator<TableRegionModel> regions = model.getRegions().iterator();
assertTrue(regions.hasNext());
while (regions.hasNext()) {
TableRegionModel region = regions.next();
boolean found = false;
for (HRegionLocation e: regionMap) {
HRegionInfo hri = e.getRegionInfo();
String hriRegionName = hri.getRegionNameAsString();
String regionName = region.getName();
if (hriRegionName.equals(regionName)) {
found = true;
byte[] startKey = hri.getStartKey();
byte[] endKey = hri.getEndKey();
ServerName serverName = e.getServerName();
InetSocketAddress sa =
new InetSocketAddress(serverName.getHostname(), serverName.getPort());
String location = sa.getHostName() + ":" +
Integer.valueOf(sa.getPort());
assertEquals(hri.getRegionId(), region.getId());
assertTrue(Bytes.equals(startKey, region.getStartKey()));
assertTrue(Bytes.equals(endKey, region.getEndKey()));
assertEquals(location, region.getLocation());
break;
}
}
assertTrue(found);
}
}
示例14: checkRegionBoundaries
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
public void checkRegionBoundaries() {
try {
ByteArrayComparator comparator = new ByteArrayComparator();
List<HRegionInfo> regions = MetaScanner.listAllRegions(getConf(), connection, false);
final RegionBoundariesInformation currentRegionBoundariesInformation =
new RegionBoundariesInformation();
Path hbaseRoot = FSUtils.getRootDir(getConf());
for (HRegionInfo regionInfo : regions) {
Path tableDir = FSUtils.getTableDir(hbaseRoot, regionInfo.getTable());
currentRegionBoundariesInformation.regionName = regionInfo.getRegionName();
// For each region, get the start and stop key from the META and compare them to the
// same information from the Stores.
Path path = new Path(tableDir, regionInfo.getEncodedName());
FileSystem fs = path.getFileSystem(getConf());
FileStatus[] files = fs.listStatus(path);
// For all the column families in this region...
byte[] storeFirstKey = null;
byte[] storeLastKey = null;
for (FileStatus file : files) {
String fileName = file.getPath().toString();
fileName = fileName.substring(fileName.lastIndexOf("/") + 1);
if (!fileName.startsWith(".") && !fileName.endsWith("recovered.edits")) {
FileStatus[] storeFiles = fs.listStatus(file.getPath());
// For all the stores in this column family.
for (FileStatus storeFile : storeFiles) {
HFile.Reader reader = HFile.createReader(fs, storeFile.getPath(), new CacheConfig(
getConf()), getConf());
if ((reader.getFirstKey() != null)
&& ((storeFirstKey == null) || (comparator.compare(storeFirstKey,
reader.getFirstKey()) > 0))) {
storeFirstKey = reader.getFirstKey();
}
if ((reader.getLastKey() != null)
&& ((storeLastKey == null) || (comparator.compare(storeLastKey,
reader.getLastKey())) < 0)) {
storeLastKey = reader.getLastKey();
}
reader.close();
}
}
}
currentRegionBoundariesInformation.metaFirstKey = regionInfo.getStartKey();
currentRegionBoundariesInformation.metaLastKey = regionInfo.getEndKey();
currentRegionBoundariesInformation.storesFirstKey = keyOnly(storeFirstKey);
currentRegionBoundariesInformation.storesLastKey = keyOnly(storeLastKey);
if (currentRegionBoundariesInformation.metaFirstKey.length == 0)
currentRegionBoundariesInformation.metaFirstKey = null;
if (currentRegionBoundariesInformation.metaLastKey.length == 0)
currentRegionBoundariesInformation.metaLastKey = null;
// For a region to be correct, we need the META start key to be smaller or equal to the
// smallest start key from all the stores, and the start key from the next META entry to
// be bigger than the last key from all the current stores. First region start key is null;
// Last region end key is null; some regions can be empty and not have any store.
boolean valid = true;
// Checking start key.
if ((currentRegionBoundariesInformation.storesFirstKey != null)
&& (currentRegionBoundariesInformation.metaFirstKey != null)) {
valid = valid
&& comparator.compare(currentRegionBoundariesInformation.storesFirstKey,
currentRegionBoundariesInformation.metaFirstKey) >= 0;
}
// Checking stop key.
if ((currentRegionBoundariesInformation.storesLastKey != null)
&& (currentRegionBoundariesInformation.metaLastKey != null)) {
valid = valid
&& comparator.compare(currentRegionBoundariesInformation.storesLastKey,
currentRegionBoundariesInformation.metaLastKey) < 0;
}
if (!valid) {
errors.reportError(ERROR_CODE.BOUNDARIES_ERROR, "Found issues with regions boundaries",
tablesInfo.get(regionInfo.getTable()));
LOG.warn("Region's boundaries not alligned between stores and META for:");
LOG.warn(currentRegionBoundariesInformation);
}
}
} catch (IOException e) {
LOG.error(e);
}
}
示例15: testInterClusterReplication
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
@Test (timeout=120000)
public void testInterClusterReplication() throws Exception {
final String id = "testInterClusterReplication";
List<HRegion> regions = utility1.getHBaseCluster().getRegions(tableName);
int totEdits = 0;
// Make sure edits are spread across regions because we do region based batching
// before shipping edits.
for(HRegion region: regions) {
HRegionInfo hri = region.getRegionInfo();
byte[] row = hri.getStartKey();
for (int i = 0; i < 100; i++) {
if (row.length > 0) {
Put put = new Put(row);
put.addColumn(famName, row, row);
region.put(put);
totEdits++;
}
}
}
admin.addPeer(id,
new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(conf2))
.setReplicationEndpointImpl(InterClusterReplicationEndpointForTest.class.getName()),
null);
final int numEdits = totEdits;
Waiter.waitFor(conf1, 30000, new Waiter.ExplainingPredicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return InterClusterReplicationEndpointForTest.replicateCount.get() == numEdits;
}
@Override
public String explainFailure() throws Exception {
String failure = "Failed to replicate all edits, expected = " + numEdits
+ " replicated = " + InterClusterReplicationEndpointForTest.replicateCount.get();
return failure;
}
});
admin.removePeer("testInterClusterReplication");
utility1.deleteTableData(tableName);
}