本文整理匯總了Java中org.apache.hadoop.hbase.HRegionInfo.setSplit方法的典型用法代碼示例。如果您正苦於以下問題:Java HRegionInfo.setSplit方法的具體用法?Java HRegionInfo.setSplit怎麽用?Java HRegionInfo.setSplit使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.HRegionInfo
的用法示例。
在下文中一共展示了HRegionInfo.setSplit方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: resetSplitParent
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
* Reset the split parent region info in meta table
*/
private void resetSplitParent(HbckInfo hi) throws IOException {
RowMutations mutations = new RowMutations(hi.metaEntry.getRegionName());
Delete d = new Delete(hi.metaEntry.getRegionName());
d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
mutations.add(d);
HRegionInfo hri = new HRegionInfo(hi.metaEntry);
hri.setOffline(false);
hri.setSplit(false);
Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
mutations.add(p);
meta.mutateRow(mutations);
LOG.info("Reset split parent " + hi.metaEntry.getRegionNameAsString() + " in META" );
}
示例2: offlineParentInMetaAndputMetaEntries
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
private void offlineParentInMetaAndputMetaEntries(HConnection hConnection,
HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
ServerName serverName, List<Mutation> metaEntries, int regionReplication)
throws IOException {
List<Mutation> mutations = metaEntries;
HRegionInfo copyOfParent = new HRegionInfo(parent);
copyOfParent.setOffline(true);
copyOfParent.setSplit(true);
//Put for parent
Put putParent = MetaTableAccessor.makePutFromRegionInfo(copyOfParent);
MetaTableAccessor.addDaughtersToPut(putParent, splitA, splitB);
mutations.add(putParent);
//Puts for daughters
Put putA = MetaTableAccessor.makePutFromRegionInfo(splitA);
Put putB = MetaTableAccessor.makePutFromRegionInfo(splitB);
addLocation(putA, serverName, 1); //these are new regions, openSeqNum = 1 is fine.
addLocation(putB, serverName, 1);
mutations.add(putA);
mutations.add(putB);
// Add empty locations for region replicas of daughters so that number of replicas can be
// cached whenever the primary region is looked up from meta
for (int i = 1; i < regionReplication; i++) {
addEmptyLocation(putA, i);
addEmptyLocation(putB, i);
}
MetaTableAccessor.mutateMetaTable(hConnection, mutations);
}
示例3: testWALArchiving
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
* Tests wal archiving by adding data, doing flushing/rolling and checking we archive old logs
* and also don't archive "live logs" (that is, a log with un-flushed entries).
* <p>
* This is what it does:
* It creates two regions, and does a series of inserts along with log rolling.
* Whenever a WAL is rolled, HLogBase checks previous wals for archiving. A wal is eligible for
* archiving if for all the regions which have entries in that wal file, have flushed - past
* their maximum sequence id in that wal file.
* <p>
* @throws IOException
*/
@Test
public void testWALArchiving() throws IOException {
LOG.debug("testWALArchiving");
HTableDescriptor table1 =
new HTableDescriptor(TableName.valueOf("t1")).addFamily(new HColumnDescriptor("row"));
HTableDescriptor table2 =
new HTableDescriptor(TableName.valueOf("t2")).addFamily(new HColumnDescriptor("row"));
final Configuration localConf = new Configuration(conf);
localConf.set(WALFactory.WAL_PROVIDER, DefaultWALProvider.class.getName());
final WALFactory wals = new WALFactory(localConf, null, currentTest.getMethodName());
try {
final WAL wal = wals.getWAL(UNSPECIFIED_REGION);
assertEquals(0, DefaultWALProvider.getNumRolledLogFiles(wal));
HRegionInfo hri1 =
new HRegionInfo(table1.getTableName(), HConstants.EMPTY_START_ROW,
HConstants.EMPTY_END_ROW);
HRegionInfo hri2 =
new HRegionInfo(table2.getTableName(), HConstants.EMPTY_START_ROW,
HConstants.EMPTY_END_ROW);
// ensure that we don't split the regions.
hri1.setSplit(false);
hri2.setSplit(false);
// variables to mock region sequenceIds.
// start with the testing logic: insert a waledit, and roll writer
addEdits(wal, hri1, table1, 1);
wal.rollWriter();
// assert that the wal is rolled
assertEquals(1, DefaultWALProvider.getNumRolledLogFiles(wal));
// add edits in the second wal file, and roll writer.
addEdits(wal, hri1, table1, 1);
wal.rollWriter();
// assert that the wal is rolled
assertEquals(2, DefaultWALProvider.getNumRolledLogFiles(wal));
// add a waledit to table1, and flush the region.
addEdits(wal, hri1, table1, 3);
flushRegion(wal, hri1.getEncodedNameAsBytes(), table1.getFamiliesKeys());
// roll log; all old logs should be archived.
wal.rollWriter();
assertEquals(0, DefaultWALProvider.getNumRolledLogFiles(wal));
// add an edit to table2, and roll writer
addEdits(wal, hri2, table2, 1);
wal.rollWriter();
assertEquals(1, DefaultWALProvider.getNumRolledLogFiles(wal));
// add edits for table1, and roll writer
addEdits(wal, hri1, table1, 2);
wal.rollWriter();
assertEquals(2, DefaultWALProvider.getNumRolledLogFiles(wal));
// add edits for table2, and flush hri1.
addEdits(wal, hri2, table2, 2);
flushRegion(wal, hri1.getEncodedNameAsBytes(), table2.getFamiliesKeys());
// the log : region-sequenceId map is
// log1: region2 (unflushed)
// log2: region1 (flushed)
// log3: region2 (unflushed)
// roll the writer; log2 should be archived.
wal.rollWriter();
assertEquals(2, DefaultWALProvider.getNumRolledLogFiles(wal));
// flush region2, and all logs should be archived.
addEdits(wal, hri2, table2, 2);
flushRegion(wal, hri2.getEncodedNameAsBytes(), table2.getFamiliesKeys());
wal.rollWriter();
assertEquals(0, DefaultWALProvider.getNumRolledLogFiles(wal));
} finally {
if (wals != null) {
wals.close();
}
}
}
示例4: testLingeringSplitParent
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
* A split parent in meta, in hdfs, and not deployed
*/
@Test (timeout=180000)
public void testLingeringSplitParent() throws Exception {
TableName table =
TableName.valueOf("testLingeringSplitParent");
Table meta = null;
try {
setupTable(table);
assertEquals(ROWKEYS.length, countRows());
// make sure data in regions, if in wal only there is no data loss
admin.flush(table);
HRegionLocation location = tbl.getRegionLocation("B");
// Delete one region from meta, but not hdfs, unassign it.
deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
Bytes.toBytes("C"), true, true, false);
// Create a new meta entry to fake it as a split parent.
meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService);
HRegionInfo hri = location.getRegionInfo();
HRegionInfo a = new HRegionInfo(tbl.getName(),
Bytes.toBytes("B"), Bytes.toBytes("BM"));
HRegionInfo b = new HRegionInfo(tbl.getName(),
Bytes.toBytes("BM"), Bytes.toBytes("C"));
hri.setOffline(true);
hri.setSplit(true);
MetaTableAccessor.addRegionToMeta(meta, hri, a, b);
meta.close();
admin.flush(TableName.META_TABLE_NAME);
HBaseFsck hbck = doFsck(conf, false);
assertErrors(hbck, new ERROR_CODE[] {
ERROR_CODE.LINGERING_SPLIT_PARENT, ERROR_CODE.HOLE_IN_REGION_CHAIN});
// regular repair cannot fix lingering split parent
hbck = doFsck(conf, true);
assertErrors(hbck, new ERROR_CODE[] {
ERROR_CODE.LINGERING_SPLIT_PARENT, ERROR_CODE.HOLE_IN_REGION_CHAIN });
assertFalse(hbck.shouldRerun());
hbck = doFsck(conf, false);
assertErrors(hbck, new ERROR_CODE[] {
ERROR_CODE.LINGERING_SPLIT_PARENT, ERROR_CODE.HOLE_IN_REGION_CHAIN});
// fix lingering split parent
hbck = new HBaseFsck(conf, hbfsckExecutorService);
hbck.connect();
hbck.setDisplayFullReport(); // i.e. -details
hbck.setTimeLag(0);
hbck.setFixSplitParents(true);
hbck.onlineHbck();
assertTrue(hbck.shouldRerun());
hbck.close();
Get get = new Get(hri.getRegionName());
Result result = meta.get(get);
assertTrue(result.getColumnCells(HConstants.CATALOG_FAMILY,
HConstants.SPLITA_QUALIFIER).isEmpty());
assertTrue(result.getColumnCells(HConstants.CATALOG_FAMILY,
HConstants.SPLITB_QUALIFIER).isEmpty());
admin.flush(TableName.META_TABLE_NAME);
// fix other issues
doFsck(conf, true);
// check that all are fixed
assertNoErrors(doFsck(conf, false));
assertEquals(ROWKEYS.length, countRows());
} finally {
cleanupTable(table);
IOUtils.closeQuietly(meta);
}
}
示例5: testMasterRestartAtRegionSplitPendingCatalogJanitor
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
* Verifies HBASE-5806. Here the case is that splitting is completed but before the
* CJ could remove the parent region the master is killed and restarted.
* @throws IOException
* @throws InterruptedException
* @throws NodeExistsException
* @throws KeeperException
*/
@Test (timeout = 300000)
public void testMasterRestartAtRegionSplitPendingCatalogJanitor()
throws IOException, InterruptedException, NodeExistsException,
KeeperException, ServiceException {
final TableName tableName = TableName
.valueOf("testMasterRestartAtRegionSplitPendingCatalogJanitor");
// Create table then get the single region for our new table.
HTable t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
List<HRegion> regions = cluster.getRegions(tableName);
HRegionInfo hri = getAndCheckSingleTableRegion(regions);
int tableRegionIndex = ensureTableRegionNotOnSameServerAsMeta(admin, hri);
// Turn off balancer so it doesn't cut in and mess up our placements.
this.admin.setBalancerRunning(false, true);
// Turn off the meta scanner so it don't remove parent on us.
cluster.getMaster().setCatalogJanitorEnabled(false);
ZooKeeperWatcher zkw = new ZooKeeperWatcher(t.getConfiguration(),
"testMasterRestartAtRegionSplitPendingCatalogJanitor", new UselessTestAbortable());
try {
// Add a bit of load up into the table so splittable.
TESTING_UTIL.loadTable(t, HConstants.CATALOG_FAMILY, false);
// Get region pre-split.
HRegionServer server = cluster.getRegionServer(tableRegionIndex);
printOutRegions(server, "Initial regions: ");
this.admin.split(hri.getRegionNameAsString());
checkAndGetDaughters(tableName);
// Assert the ephemeral node is up in zk.
String path = ZKAssign.getNodeName(zkw, hri.getEncodedName());
Stat stats = zkw.getRecoverableZooKeeper().exists(path, false);
LOG.info("EPHEMERAL NODE BEFORE SERVER ABORT, path=" + path + ", stats="
+ stats);
String node = ZKAssign.getNodeName(zkw, hri.getEncodedName());
Stat stat = new Stat();
byte[] data = ZKUtil.getDataNoWatch(zkw, node, stat);
// ZKUtil.create
for (int i=0; data != null && i<60; i++) {
Thread.sleep(1000);
data = ZKUtil.getDataNoWatch(zkw, node, stat);
}
assertNull("Waited too long for ZK node to be removed: "+node, data);
MockMasterWithoutCatalogJanitor master = abortAndWaitForMaster();
this.admin = new HBaseAdmin(TESTING_UTIL.getConfiguration());
// Update the region to be offline and split, so that HRegionInfo#equals
// returns true in checking rebuilt region states map.
hri.setOffline(true);
hri.setSplit(true);
RegionStates regionStates = master.getAssignmentManager().getRegionStates();
assertTrue("Split parent should be in SPLIT state",
regionStates.isRegionInState(hri, State.SPLIT));
ServerName regionServerOfRegion = regionStates.getRegionServerOfRegion(hri);
assertTrue(regionServerOfRegion == null);
} finally {
this.admin.setBalancerRunning(true, false);
cluster.getMaster().setCatalogJanitorEnabled(true);
t.close();
zkw.close();
}
}
示例6: preSplitBeforePONR
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
@Override
public void preSplitBeforePONR(ObserverContext<RegionCoprocessorEnvironment> ctx,
byte[] splitKey, List<Mutation> metaEntries) throws IOException {
RegionCoprocessorEnvironment environment = ctx.getEnvironment();
HRegionServer rs = (HRegionServer) environment.getRegionServerServices();
List<Region> onlineRegions =
rs.getOnlineRegions(TableName.valueOf("testSplitHooksBeforeAndAfterPONR_2"));
Region region = onlineRegions.get(0);
for (Region r : onlineRegions) {
if (r.getRegionInfo().containsRow(splitKey)) {
region = r;
break;
}
}
st = new SplitTransactionImpl((HRegion) region, splitKey);
if (!st.prepare()) {
LOG.error("Prepare for the table " + region.getTableDesc().getNameAsString()
+ " failed. So returning null. ");
ctx.bypass();
return;
}
((HRegion)region).forceSplit(splitKey);
daughterRegions = st.stepsBeforePONR(rs, rs, false);
HRegionInfo copyOfParent = new HRegionInfo(region.getRegionInfo());
copyOfParent.setOffline(true);
copyOfParent.setSplit(true);
// Put for parent
Put putParent = MetaTableAccessor.makePutFromRegionInfo(copyOfParent);
MetaTableAccessor.addDaughtersToPut(putParent, daughterRegions.getFirst().getRegionInfo(),
daughterRegions.getSecond().getRegionInfo());
metaEntries.add(putParent);
// Puts for daughters
Put putA = MetaTableAccessor.makePutFromRegionInfo(
daughterRegions.getFirst().getRegionInfo());
Put putB = MetaTableAccessor.makePutFromRegionInfo(
daughterRegions.getSecond().getRegionInfo());
st.addLocation(putA, rs.getServerName(), 1);
st.addLocation(putB, rs.getServerName(), 1);
metaEntries.add(putA);
metaEntries.add(putB);
}