本文整理匯總了Java中org.apache.hadoop.hbase.HRegionInfo.getRegionName方法的典型用法代碼示例。如果您正苦於以下問題:Java HRegionInfo.getRegionName方法的具體用法?Java HRegionInfo.getRegionName怎麽用?Java HRegionInfo.getRegionName使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.HRegionInfo
的用法示例。
在下文中一共展示了HRegionInfo.getRegionName方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testGetRegion
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
@Test (timeout=300000)
public void testGetRegion() throws Exception {
// We use actual HBaseAdmin instance instead of going via Admin interface in
// here because makes use of an internal HBA method (TODO: Fix.).
HBaseAdmin rawAdmin = new HBaseAdmin(TEST_UTIL.getConfiguration());
final TableName tableName = TableName.valueOf("testGetRegion");
LOG.info("Started " + tableName);
HTable t = TEST_UTIL.createMultiRegionTable(tableName, HConstants.CATALOG_FAMILY);
HRegionLocation regionLocation = t.getRegionLocation("mmm");
HRegionInfo region = regionLocation.getRegionInfo();
byte[] regionName = region.getRegionName();
Pair<HRegionInfo, ServerName> pair = rawAdmin.getRegion(regionName);
assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
pair = rawAdmin.getRegion(region.getEncodedNameAsBytes());
assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
}
示例2: removeRegionFromMeta
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
private void removeRegionFromMeta(HRegion meta, HRegionInfo regioninfo)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Removing region: " + regioninfo + " from " + meta);
}
Delete delete = new Delete(regioninfo.getRegionName(),
System.currentTimeMillis());
meta.delete(delete);
}
示例3: setUp
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
@BeforeClass
public static void setUp() throws Exception {
conf = hbaseTestingUtility.getConfiguration();
hbaseTestingUtility.startMiniCluster(1,1);
t1 = hbaseTestingUtility.createTable(TABLE1, COLUMN_FAMILY1);
@SuppressWarnings("deprecation")
HRegionInfo firstHRI = t1.getRegionLocations().keySet().iterator().next();
r1name = firstHRI.getRegionName();
rs1 = hbaseTestingUtility.getHBaseCluster().getRegionServer(
hbaseTestingUtility.getHBaseCluster().getServerWith(r1name));
r1 = rs1.getRegion(r1name);
}
示例4: toRegionEventDescriptor
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
public static RegionEventDescriptor toRegionEventDescriptor(
EventType eventType, HRegionInfo hri, long seqId, ServerName server,
Map<byte[], List<Path>> storeFiles) {
final byte[] tableNameAsBytes = hri.getTable().getName();
final byte[] encodedNameAsBytes = hri.getEncodedNameAsBytes();
final byte[] regionNameAsBytes = hri.getRegionName();
return toRegionEventDescriptor(eventType,
tableNameAsBytes,
encodedNameAsBytes,
regionNameAsBytes,
seqId,
server,
storeFiles);
}
示例5: checkRegionBoundaries
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
public void checkRegionBoundaries() {
try {
ByteArrayComparator comparator = new ByteArrayComparator();
List<HRegionInfo> regions = MetaScanner.listAllRegions(getConf(), connection, false);
final RegionBoundariesInformation currentRegionBoundariesInformation =
new RegionBoundariesInformation();
Path hbaseRoot = FSUtils.getRootDir(getConf());
for (HRegionInfo regionInfo : regions) {
Path tableDir = FSUtils.getTableDir(hbaseRoot, regionInfo.getTable());
currentRegionBoundariesInformation.regionName = regionInfo.getRegionName();
// For each region, get the start and stop key from the META and compare them to the
// same information from the Stores.
Path path = new Path(tableDir, regionInfo.getEncodedName());
FileSystem fs = path.getFileSystem(getConf());
FileStatus[] files = fs.listStatus(path);
// For all the column families in this region...
byte[] storeFirstKey = null;
byte[] storeLastKey = null;
for (FileStatus file : files) {
String fileName = file.getPath().toString();
fileName = fileName.substring(fileName.lastIndexOf("/") + 1);
if (!fileName.startsWith(".") && !fileName.endsWith("recovered.edits")) {
FileStatus[] storeFiles = fs.listStatus(file.getPath());
// For all the stores in this column family.
for (FileStatus storeFile : storeFiles) {
HFile.Reader reader = HFile.createReader(fs, storeFile.getPath(), new CacheConfig(
getConf()), getConf());
if ((reader.getFirstKey() != null)
&& ((storeFirstKey == null) || (comparator.compare(storeFirstKey,
reader.getFirstKey()) > 0))) {
storeFirstKey = reader.getFirstKey();
}
if ((reader.getLastKey() != null)
&& ((storeLastKey == null) || (comparator.compare(storeLastKey,
reader.getLastKey())) < 0)) {
storeLastKey = reader.getLastKey();
}
reader.close();
}
}
}
currentRegionBoundariesInformation.metaFirstKey = regionInfo.getStartKey();
currentRegionBoundariesInformation.metaLastKey = regionInfo.getEndKey();
currentRegionBoundariesInformation.storesFirstKey = keyOnly(storeFirstKey);
currentRegionBoundariesInformation.storesLastKey = keyOnly(storeLastKey);
if (currentRegionBoundariesInformation.metaFirstKey.length == 0)
currentRegionBoundariesInformation.metaFirstKey = null;
if (currentRegionBoundariesInformation.metaLastKey.length == 0)
currentRegionBoundariesInformation.metaLastKey = null;
// For a region to be correct, we need the META start key to be smaller or equal to the
// smallest start key from all the stores, and the start key from the next META entry to
// be bigger than the last key from all the current stores. First region start key is null;
// Last region end key is null; some regions can be empty and not have any store.
boolean valid = true;
// Checking start key.
if ((currentRegionBoundariesInformation.storesFirstKey != null)
&& (currentRegionBoundariesInformation.metaFirstKey != null)) {
valid = valid
&& comparator.compare(currentRegionBoundariesInformation.storesFirstKey,
currentRegionBoundariesInformation.metaFirstKey) >= 0;
}
// Checking stop key.
if ((currentRegionBoundariesInformation.storesLastKey != null)
&& (currentRegionBoundariesInformation.metaLastKey != null)) {
valid = valid
&& comparator.compare(currentRegionBoundariesInformation.storesLastKey,
currentRegionBoundariesInformation.metaLastKey) < 0;
}
if (!valid) {
errors.reportError(ERROR_CODE.BOUNDARIES_ERROR, "Found issues with regions boundaries",
tablesInfo.get(regionInfo.getTable()));
LOG.warn("Region's boundaries not alligned between stores and META for:");
LOG.warn(currentRegionBoundariesInformation);
}
}
} catch (IOException e) {
LOG.error(e);
}
}
示例6: testHbckWithExcessReplica
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
@Test
public void testHbckWithExcessReplica() throws Exception {
TableName table =
TableName.valueOf("testHbckWithExcessReplica");
try {
setupTableWithRegionReplica(table, 2);
TEST_UTIL.getHBaseAdmin().flush(table.getName());
assertNoErrors(doFsck(conf, false));
assertEquals(ROWKEYS.length, countRows());
// the next few lines inject a location in meta for a replica, and then
// asks the master to assign the replica (the meta needs to be injected
// for the master to treat the request for assignment as valid; the master
// checks the region is valid either from its memory or meta)
HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
List<HRegionInfo> regions = TEST_UTIL.getHBaseAdmin().getTableRegions(table);
byte[] startKey = Bytes.toBytes("B");
byte[] endKey = Bytes.toBytes("C");
byte[] metaKey = null;
HRegionInfo newHri = null;
for (HRegionInfo h : regions) {
if (Bytes.compareTo(h.getStartKey(), startKey) == 0 &&
Bytes.compareTo(h.getEndKey(), endKey) == 0 &&
h.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
metaKey = h.getRegionName();
//create a hri with replicaId as 2 (since we already have replicas with replicaid 0 and 1)
newHri = RegionReplicaUtil.getRegionInfoForReplica(h, 2);
break;
}
}
Put put = new Put(metaKey);
ServerName sn = TEST_UTIL.getHBaseAdmin().getClusterStatus().getServers()
.toArray(new ServerName[0])[0];
//add a location with replicaId as 2 (since we already have replicas with replicaid 0 and 1)
MetaTableAccessor.addLocation(put, sn, sn.getStartcode(), -1, 2);
meta.put(put);
meta.flushCommits();
// assign the new replica
HBaseFsckRepair.fixUnassigned((HBaseAdmin)TEST_UTIL.getHBaseAdmin(), newHri);
HBaseFsckRepair.waitUntilAssigned((HBaseAdmin)TEST_UTIL.getHBaseAdmin(), newHri);
// now reset the meta row to its original value
Delete delete = new Delete(metaKey);
delete.deleteColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(2));
delete.deleteColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(2));
delete.deleteColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getSeqNumColumn(2));
meta.delete(delete);
meta.flushCommits();
meta.close();
// check that problem exists
HBaseFsck hbck = doFsck(conf, false);
assertErrors(hbck, new ERROR_CODE[]{ERROR_CODE.NOT_IN_META});
// fix the problem
hbck = doFsck(conf, true);
// run hbck again to make sure we don't see any errors
hbck = doFsck(conf, false);
assertErrors(hbck, new ERROR_CODE[]{});
} finally {
cleanupTable(table);
}
}
示例7: testRegionShouldNotBeDeployed
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
* The region is not deployed when the table is disabled.
*/
@Test (timeout=180000)
public void testRegionShouldNotBeDeployed() throws Exception {
TableName table =
TableName.valueOf("tableRegionShouldNotBeDeployed");
try {
LOG.info("Starting testRegionShouldNotBeDeployed.");
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
assertTrue(cluster.waitForActiveAndReadyMaster());
byte[][] SPLIT_KEYS = new byte[][] { new byte[0], Bytes.toBytes("aaa"),
Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), Bytes.toBytes("ddd") };
HTableDescriptor htdDisabled = new HTableDescriptor(table);
htdDisabled.addFamily(new HColumnDescriptor(FAM));
// Write the .tableinfo
FSTableDescriptors fstd = new FSTableDescriptors(conf);
fstd.createTableDescriptor(htdDisabled);
List<HRegionInfo> disabledRegions =
TEST_UTIL.createMultiRegionsInMeta(conf, htdDisabled, SPLIT_KEYS);
// Let's just assign everything to first RS
HRegionServer hrs = cluster.getRegionServer(0);
// Create region files.
admin.disableTable(table);
admin.enableTable(table);
// Disable the table and close its regions
admin.disableTable(table);
HRegionInfo region = disabledRegions.remove(0);
byte[] regionName = region.getRegionName();
// The region should not be assigned currently
assertTrue(cluster.getServerWith(regionName) == -1);
// Directly open a region on a region server.
// If going through AM/ZK, the region won't be open.
// Even it is opened, AM will close it which causes
// flakiness of this test.
HRegion r = HRegion.openHRegion(
region, htdDisabled, hrs.getWAL(region), conf);
hrs.addToOnlineRegions(r);
HBaseFsck hbck = doFsck(conf, false);
assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.SHOULD_NOT_BE_DEPLOYED });
// fix this fault
doFsck(conf, true);
// check result
assertNoErrors(doFsck(conf, false));
} finally {
admin.enableTable(table);
cleanupTable(table);
}
}
示例8: testLingeringSplitParent
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
* A split parent in meta, in hdfs, and not deployed
*/
@Test (timeout=180000)
public void testLingeringSplitParent() throws Exception {
TableName table =
TableName.valueOf("testLingeringSplitParent");
Table meta = null;
try {
setupTable(table);
assertEquals(ROWKEYS.length, countRows());
// make sure data in regions, if in wal only there is no data loss
admin.flush(table);
HRegionLocation location = tbl.getRegionLocation("B");
// Delete one region from meta, but not hdfs, unassign it.
deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
Bytes.toBytes("C"), true, true, false);
// Create a new meta entry to fake it as a split parent.
meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService);
HRegionInfo hri = location.getRegionInfo();
HRegionInfo a = new HRegionInfo(tbl.getName(),
Bytes.toBytes("B"), Bytes.toBytes("BM"));
HRegionInfo b = new HRegionInfo(tbl.getName(),
Bytes.toBytes("BM"), Bytes.toBytes("C"));
hri.setOffline(true);
hri.setSplit(true);
MetaTableAccessor.addRegionToMeta(meta, hri, a, b);
meta.close();
admin.flush(TableName.META_TABLE_NAME);
HBaseFsck hbck = doFsck(conf, false);
assertErrors(hbck, new ERROR_CODE[] {
ERROR_CODE.LINGERING_SPLIT_PARENT, ERROR_CODE.HOLE_IN_REGION_CHAIN});
// regular repair cannot fix lingering split parent
hbck = doFsck(conf, true);
assertErrors(hbck, new ERROR_CODE[] {
ERROR_CODE.LINGERING_SPLIT_PARENT, ERROR_CODE.HOLE_IN_REGION_CHAIN });
assertFalse(hbck.shouldRerun());
hbck = doFsck(conf, false);
assertErrors(hbck, new ERROR_CODE[] {
ERROR_CODE.LINGERING_SPLIT_PARENT, ERROR_CODE.HOLE_IN_REGION_CHAIN});
// fix lingering split parent
hbck = new HBaseFsck(conf, hbfsckExecutorService);
hbck.connect();
hbck.setDisplayFullReport(); // i.e. -details
hbck.setTimeLag(0);
hbck.setFixSplitParents(true);
hbck.onlineHbck();
assertTrue(hbck.shouldRerun());
hbck.close();
Get get = new Get(hri.getRegionName());
Result result = meta.get(get);
assertTrue(result.getColumnCells(HConstants.CATALOG_FAMILY,
HConstants.SPLITA_QUALIFIER).isEmpty());
assertTrue(result.getColumnCells(HConstants.CATALOG_FAMILY,
HConstants.SPLITB_QUALIFIER).isEmpty());
admin.flush(TableName.META_TABLE_NAME);
// fix other issues
doFsck(conf, true);
// check that all are fixed
assertNoErrors(doFsck(conf, false));
assertEquals(ROWKEYS.length, countRows());
} finally {
cleanupTable(table);
IOUtils.closeQuietly(meta);
}
}
示例9: testValidLingeringSplitParent
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
* Tests that LINGERING_SPLIT_PARENT is not erroneously reported for
* valid cases where the daughters are there.
*/
@Test (timeout=180000)
public void testValidLingeringSplitParent() throws Exception {
TableName table =
TableName.valueOf("testLingeringSplitParent");
Table meta = null;
try {
setupTable(table);
assertEquals(ROWKEYS.length, countRows());
// make sure data in regions, if in wal only there is no data loss
admin.flush(table);
HRegionLocation location = tbl.getRegionLocation(Bytes.toBytes("B"));
meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService);
HRegionInfo hri = location.getRegionInfo();
// do a regular split
byte[] regionName = location.getRegionInfo().getRegionName();
admin.splitRegion(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
TestEndToEndSplitTransaction.blockUntilRegionSplit(conf, 60000, regionName, true);
// TODO: fixHdfsHoles does not work against splits, since the parent dir lingers on
// for some time until children references are deleted. HBCK erroneously sees this as
// overlapping regions
HBaseFsck hbck = doFsck(
conf, true, true, false, false, false, true, true, true, false, false, false, null);
assertErrors(hbck, new ERROR_CODE[] {}); //no LINGERING_SPLIT_PARENT reported
// assert that the split hbase:meta entry is still there.
Get get = new Get(hri.getRegionName());
Result result = meta.get(get);
assertNotNull(result);
assertNotNull(MetaTableAccessor.getHRegionInfo(result));
assertEquals(ROWKEYS.length, countRows());
// assert that we still have the split regions
assertEquals(tbl.getStartKeys().length, SPLITS.length + 1 + 1); //SPLITS + 1 is # regions pre-split.
assertNoErrors(doFsck(conf, false));
} finally {
cleanupTable(table);
IOUtils.closeQuietly(meta);
}
}
示例10: FlushRegionCallable
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
public FlushRegionCallable(ClusterConnection connection,
RpcControllerFactory rpcControllerFactory, HRegionInfo regionInfo,
boolean writeFlushWalMarker) {
this(connection, rpcControllerFactory, regionInfo.getTable(), regionInfo.getRegionName(),
regionInfo.getStartKey(), writeFlushWalMarker);
}