本文整理汇总了Java中org.apache.hadoop.hbase.catalog.MetaEditor.updateRegionLocation方法的典型用法代码示例。如果您正苦于以下问题:Java MetaEditor.updateRegionLocation方法的具体用法?Java MetaEditor.updateRegionLocation怎么用?Java MetaEditor.updateRegionLocation使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.catalog.MetaEditor
的用法示例。
在下文中一共展示了MetaEditor.updateRegionLocation方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testOpenClosingRegion
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入方法依赖的package包/类
@Test
public void testOpenClosingRegion() throws Exception {
Assert.assertTrue(getRS().getRegion(regionName).isAvailable());
try {
// we re-opened meta so some of its data is lost
ServerName sn = getRS().getServerName();
MetaEditor.updateRegionLocation(getRS().catalogTracker,
hri, sn, getRS().getRegion(regionName).getOpenSeqNum());
// fake region to be closing now, need to clear state afterwards
getRS().regionsInTransitionInRS.put(hri.getEncodedNameAsBytes(), Boolean.FALSE);
AdminProtos.OpenRegionRequest orr =
RequestConverter.buildOpenRegionRequest(sn, hri, 0, null);
getRS().rpcServices.openRegion(null, orr);
Assert.fail("The closing region should not be opened");
} catch (ServiceException se) {
Assert.assertTrue("The region should be already in transition",
se.getCause() instanceof RegionAlreadyInTransitionException);
} finally {
getRS().regionsInTransitionInRS.remove(hri.getEncodedNameAsBytes());
}
}
示例2: postOpenDeployTasks
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入方法依赖的package包/类
@Override
public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct, final boolean daughter)
throws KeeperException, IOException {
checkOpen();
LOG.info("Post open deploy tasks for region=" + r.getRegionNameAsString() + ", daughter="
+ daughter);
// Do checks to see if we need to compact (references or too many files)
for (Store s : r.getStores().values()) {
if (s.hasReferences() || s.needsCompaction()) {
getCompactionRequester().requestCompaction(r, s, "Opening Region", null);
}
}
// Update ZK, ROOT or META
if (r.getRegionInfo().isRootRegion()) {
RootLocationEditor.setRootLocation(getZooKeeper(), this.serverNameFromMasterPOV);
} else if (r.getRegionInfo().isMetaRegion()) {
MetaEditor.updateMetaLocation(ct, r.getRegionInfo(), this.serverNameFromMasterPOV);
} else {
if (daughter) {
// If daughter of a split, update whole row, not just location.
MetaEditor.addDaughter(ct, r.getRegionInfo(), this.serverNameFromMasterPOV);
} else {
MetaEditor.updateRegionLocation(ct, r.getRegionInfo(), this.serverNameFromMasterPOV);
}
}
LOG.info("Done with post open deploy task for region=" + r.getRegionNameAsString()
+ ", daughter=" + daughter);
}
示例3: postOpenDeployTasks
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入方法依赖的package包/类
@Override
public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct)
throws KeeperException, IOException {
checkOpen();
LOG.info("Post open deploy tasks for region=" + r.getRegionNameAsString());
// Do checks to see if we need to compact (references or too many files)
for (Store s : r.getStores().values()) {
if (s.hasReferences() || s.needsCompaction()) {
this.compactSplitThread.requestSystemCompaction(r, s, "Opening Region");
}
}
long openSeqNum = r.getOpenSeqNum();
if (openSeqNum == HConstants.NO_SEQNUM) {
// If we opened a region, we should have read some sequence number from it.
LOG.error("No sequence number found when opening " + r.getRegionNameAsString());
openSeqNum = 0;
}
// Update flushed sequence id of a recovering region in ZK
updateRecoveringRegionLastFlushedSequenceId(r);
// Update ZK, or META
if (r.getRegionInfo().isMetaRegion()) {
MetaRegionTracker.setMetaLocation(getZooKeeper(),
this.serverNameFromMasterPOV);
} else {
MetaEditor.updateRegionLocation(ct, r.getRegionInfo(),
this.serverNameFromMasterPOV, openSeqNum);
}
LOG.info("Finished post open deploy task for " + r.getRegionNameAsString());
}
示例4: postOpenDeployTasks
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入方法依赖的package包/类
@Override
public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct,
final boolean daughter)
throws KeeperException, IOException {
checkOpen();
LOG.info("Post open deploy tasks for region=" + r.getRegionNameAsString() +
", daughter=" + daughter);
// Do checks to see if we need to compact (references or too many files)
for (Store s : r.getStores().values()) {
if (s.hasReferences() || s.needsCompaction()) {
getCompactionRequester().requestCompaction(r, s, "Opening Region", null);
}
}
// Update ZK, ROOT or META
if (r.getRegionInfo().isRootRegion()) {
RootLocationEditor.setRootLocation(getZooKeeper(),
this.serverNameFromMasterPOV);
} else if (r.getRegionInfo().isMetaRegion()) {
MetaEditor.updateMetaLocation(ct, r.getRegionInfo(),
this.serverNameFromMasterPOV);
} else {
if (daughter) {
// If daughter of a split, update whole row, not just location.
MetaEditor.addDaughter(ct, r.getRegionInfo(),
this.serverNameFromMasterPOV);
} else {
MetaEditor.updateRegionLocation(ct, r.getRegionInfo(),
this.serverNameFromMasterPOV);
}
}
LOG.info("Done with post open deploy task for region=" +
r.getRegionNameAsString() + ", daughter=" + daughter);
}
示例5: postOpenDeployTasks
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入方法依赖的package包/类
@Override
public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct,
final boolean daughter)
throws KeeperException, IOException {
LOG.info("Post open deploy tasks for region=" + r.getRegionNameAsString() +
", daughter=" + daughter);
// Do checks to see if we need to compact (references or too many files)
for (Store s : r.getStores().values()) {
if (s.hasReferences() || s.needsCompaction()) {
getCompactionRequester().requestCompaction(r, s, "Opening Region");
}
}
// Update ZK, ROOT or META
if (r.getRegionInfo().isRootRegion()) {
RootLocationEditor.setRootLocation(getZooKeeper(),
this.serverNameFromMasterPOV);
} else if (r.getRegionInfo().isMetaRegion()) {
MetaEditor.updateMetaLocation(ct, r.getRegionInfo(),
this.serverNameFromMasterPOV);
} else {
if (daughter) {
// If daughter of a split, update whole row, not just location.
MetaEditor.addDaughter(ct, r.getRegionInfo(),
this.serverNameFromMasterPOV);
} else {
MetaEditor.updateRegionLocation(ct, r.getRegionInfo(),
this.serverNameFromMasterPOV);
}
}
LOG.info("Done with post open deploy task for region=" +
r.getRegionNameAsString() + ", daughter=" + daughter);
}
示例6: postOpenDeployTasks
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入方法依赖的package包/类
@Override
public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct)
throws KeeperException, IOException {
rpcServices.checkOpen();
LOG.info("Post open deploy tasks for " + r.getRegionNameAsString());
// Do checks to see if we need to compact (references or too many files)
for (Store s : r.getStores().values()) {
if (s.hasReferences() || s.needsCompaction()) {
this.compactSplitThread.requestSystemCompaction(r, s, "Opening Region");
}
}
long openSeqNum = r.getOpenSeqNum();
if (openSeqNum == HConstants.NO_SEQNUM) {
// If we opened a region, we should have read some sequence number from it.
LOG.error("No sequence number found when opening " + r.getRegionNameAsString());
openSeqNum = 0;
}
// Update flushed sequence id of a recovering region in ZK
updateRecoveringRegionLastFlushedSequenceId(r);
// Update ZK, or META
if (r.getRegionInfo().isMetaRegion()) {
MetaRegionTracker.setMetaLocation(getZooKeeper(), serverName);
} else {
MetaEditor.updateRegionLocation(ct, r.getRegionInfo(),
this.serverName, openSeqNum);
}
LOG.debug("Finished post open deploy task for " + r.getRegionNameAsString());
}
示例7: postOpenDeployTasks
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入方法依赖的package包/类
@Override
public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct,
final boolean daughter)
throws KeeperException, IOException {
checkOpen();
LOG.info("Post open deploy tasks for region=" + r.getRegionNameAsString() +
", daughter=" + daughter);
// Do checks to see if we need to compact (references or too many files)
for (Store s : r.getStores().values()) {
if (s.hasReferences() || s.needsCompaction()) {
getCompactionRequester().requestCompaction(r, s, "Opening Region");
}
}
// Update ZK, ROOT or META
if (r.getRegionInfo().isRootRegion()) {
RootRegionTracker.setRootLocation(getZooKeeper(),
this.serverNameFromMasterPOV);
} else if (r.getRegionInfo().isMetaRegion()) {
MetaEditor.updateMetaLocation(ct, r.getRegionInfo(),
this.serverNameFromMasterPOV);
} else {
if (daughter) {
// If daughter of a split, update whole row, not just location.
MetaEditor.addDaughter(ct, r.getRegionInfo(),
this.serverNameFromMasterPOV);
} else {
MetaEditor.updateRegionLocation(ct, r.getRegionInfo(),
this.serverNameFromMasterPOV);
}
}
LOG.info("Done with post open deploy task for region=" +
r.getRegionNameAsString() + ", daughter=" + daughter);
}
示例8: testWhenRegionsAreNotAssignedAccordingToMeta
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testWhenRegionsAreNotAssignedAccordingToMeta() throws Exception {
String table = "testWhenRegionsAreNotAssignedAccordingToMeta";
TableName tableName = TableName.valueOf(table);
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor(new String("cf")));
byte[][] splits = new byte[10][];
char c = 'A';
for (int i = 0; i < 10; i++) {
byte[] b = { (byte) c };
splits[i] = b;
c++;
}
admin.createTable(htd, splits);
ServerName sn = ServerName.valueOf("example.org", 1234, 5678);
HMaster master = UTIL.getMiniHBaseCluster().getMaster(0);
List<HRegionInfo> tableRegions = admin.getTableRegions(Bytes.toBytes(table));
List<HRegion> hRegions = UTIL.getMiniHBaseCluster().getRegions(Bytes.toBytes(table));
for (int i = 0; i < 5; i++) {
MetaEditor.updateRegionLocation(master.getCatalogTracker(), tableRegions.get(i), sn, hRegions
.get(i).getOpenSeqNum());
}
SecondaryIndexColocator colocator = new SecondaryIndexColocator(UTIL.getConfiguration());
colocator.setUp();
colocator.checkForCoLocationInconsistency();
List<RegionServerThread> serverThreads =
UTIL.getMiniHBaseCluster().getLiveRegionServerThreads();
List<HRegionServer> rs = new ArrayList<HRegionServer>();
for (RegionServerThread regionServerThread : serverThreads) {
rs.add(regionServerThread.getRegionServer());
}
List<HRegionInfo> onlineregions = new ArrayList<HRegionInfo>();
for (HRegionServer hrs : rs) {
List<HRegion> regions = hrs.getOnlineRegions(tableName);
for (HRegion region : regions) {
onlineregions.add(region.getRegionInfo());
}
}
boolean regionOffline = false;
for (HRegionInfo hri : tableRegions) {
if (!onlineregions.contains(hri)) {
regionOffline = true;
break;
}
}
assertFalse("All the regions with wrong META info should be assiged to some online server.",
regionOffline);
}
示例9: testWhenRegionsAreNotAssignedAccordingToMeta
import org.apache.hadoop.hbase.catalog.MetaEditor; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testWhenRegionsAreNotAssignedAccordingToMeta() throws Exception {
String table = "testWhenRegionsAreNotAssignedAccordingToMeta";
HBaseAdmin admin = new HBaseAdmin(UTIL.getConfiguration());
HTableDescriptor htd = new HTableDescriptor(table);
htd.addFamily(new HColumnDescriptor(new String("cf")));
byte[][] splits = new byte[10][];
char c = 'A';
for (int i = 0; i < 10; i++) {
byte[] b = { (byte) c };
splits[i] = b;
c++;
}
admin.createTable(htd, splits);
ZKAssign.blockUntilNoRIT(HBaseTestingUtility.getZooKeeperWatcher(UTIL));
ServerName sn = new ServerName("example.org", 1234, 5678);
HMaster master = UTIL.getMiniHBaseCluster().getMaster(0);
List<HRegionInfo> tableRegions = admin.getTableRegions(Bytes.toBytes(table));
for (int i = 0; i < 5; i++) {
MetaEditor.updateRegionLocation(master.getCatalogTracker(), tableRegions.get(i), sn);
}
SecondaryIndexColocator colocator = new SecondaryIndexColocator(UTIL.getConfiguration());
colocator.setUp();
colocator.checkForCoLocationInconsistency();
List<RegionServerThread> serverThreads =
UTIL.getMiniHBaseCluster().getLiveRegionServerThreads();
List<HRegionServer> rs = new ArrayList<HRegionServer>();
for (RegionServerThread regionServerThread : serverThreads) {
rs.add(regionServerThread.getRegionServer());
}
Set<HRegionInfo> onlineregions = new HashSet<HRegionInfo>();
for (HRegionServer hrs : rs) {
onlineregions.addAll(hrs.getOnlineRegions());
}
boolean regionOffline = false;
for (HRegionInfo hri : tableRegions) {
if (!onlineregions.contains(hri)) {
regionOffline = true;
break;
}
}
Assert.assertFalse(
"All the regions with wrong META info should be assiged to some online server.",
regionOffline);
}