本文整理匯總了Java中org.apache.hadoop.hbase.HRegionInfo.getReplicaId方法的典型用法代碼示例。如果您正苦於以下問題:Java HRegionInfo.getReplicaId方法的具體用法?Java HRegionInfo.getReplicaId怎麽用?Java HRegionInfo.getReplicaId使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.HRegionInfo
的用法示例。
在下文中一共展示了HRegionInfo.getReplicaId方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: getWAL
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
@Override public WAL getWAL(HRegionInfo regionInfo) throws IOException {
WAL wal;
LogRoller roller = walRoller;
//_ROOT_ and hbase:meta regions have separate WAL.
if (regionInfo != null && regionInfo.isMetaTable()
&& regionInfo.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
roller = ensureMetaWALRoller();
wal = walFactory.getMetaWAL(regionInfo.getEncodedNameAsBytes());
} else if (regionInfo == null) {
wal = walFactory.getWAL(UNSPECIFIED_REGION);
} else {
wal = walFactory.getWAL(regionInfo.getEncodedNameAsBytes());
}
roller.addWAL(wal);
return wal;
}
示例2: RegionReplicaReplayCallable
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
public RegionReplicaReplayCallable(ClusterConnection connection,
RpcControllerFactory rpcControllerFactory, TableName tableName,
HRegionLocation location, HRegionInfo regionInfo, byte[] row,List<Entry> entries,
AtomicLong skippedEntries) {
super(connection, rpcControllerFactory, location, tableName, row, regionInfo.getReplicaId());
this.entries = entries;
this.skippedEntries = skippedEntries;
this.initialEncodedRegionName = regionInfo.getEncodedNameAsBytes();
}
示例3: addServer
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
public synchronized void addServer(HRegionInfo hri, ServerName server) {
OnlineEntry rse = new OnlineEntry() ;
rse.hri = hri;
rse.hsa = server;
this.deployedEntries.add(rse);
this.deployedOn.add(server);
// save the replicaId that we see deployed in the cluster
this.deployedReplicaId = hri.getReplicaId();
this.primaryHRIForDeployedReplica =
RegionReplicaUtil.getRegionInfoForDefaultReplica(hri);
}
示例4: postWALWrite
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
@Override
public void postWALWrite(ObserverContext<? extends WALCoprocessorEnvironment> ctx,
HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {
// only keep primary region's edits
if (logKey.getTablename().equals(tableName) && info.getReplicaId() == 0) {
entries.add(new Entry(logKey, logEdit));
}
}
示例5: shouldBeOnMaster
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
* Check if a region belongs to some small system table.
* If so, the primary replica may be expected to be put on the master regionserver.
*/
public boolean shouldBeOnMaster(HRegionInfo region) {
return tablesOnMaster.contains(region.getTable().getNameAsString())
&& region.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID;
}
示例6: testHbckWithExcessReplica
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
@Test
public void testHbckWithExcessReplica() throws Exception {
TableName table =
TableName.valueOf("testHbckWithExcessReplica");
try {
setupTableWithRegionReplica(table, 2);
TEST_UTIL.getHBaseAdmin().flush(table.getName());
assertNoErrors(doFsck(conf, false));
assertEquals(ROWKEYS.length, countRows());
// the next few lines inject a location in meta for a replica, and then
// asks the master to assign the replica (the meta needs to be injected
// for the master to treat the request for assignment as valid; the master
// checks the region is valid either from its memory or meta)
HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
List<HRegionInfo> regions = TEST_UTIL.getHBaseAdmin().getTableRegions(table);
byte[] startKey = Bytes.toBytes("B");
byte[] endKey = Bytes.toBytes("C");
byte[] metaKey = null;
HRegionInfo newHri = null;
for (HRegionInfo h : regions) {
if (Bytes.compareTo(h.getStartKey(), startKey) == 0 &&
Bytes.compareTo(h.getEndKey(), endKey) == 0 &&
h.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
metaKey = h.getRegionName();
//create a hri with replicaId as 2 (since we already have replicas with replicaid 0 and 1)
newHri = RegionReplicaUtil.getRegionInfoForReplica(h, 2);
break;
}
}
Put put = new Put(metaKey);
ServerName sn = TEST_UTIL.getHBaseAdmin().getClusterStatus().getServers()
.toArray(new ServerName[0])[0];
//add a location with replicaId as 2 (since we already have replicas with replicaid 0 and 1)
MetaTableAccessor.addLocation(put, sn, sn.getStartcode(), -1, 2);
meta.put(put);
meta.flushCommits();
// assign the new replica
HBaseFsckRepair.fixUnassigned((HBaseAdmin)TEST_UTIL.getHBaseAdmin(), newHri);
HBaseFsckRepair.waitUntilAssigned((HBaseAdmin)TEST_UTIL.getHBaseAdmin(), newHri);
// now reset the meta row to its original value
Delete delete = new Delete(metaKey);
delete.deleteColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(2));
delete.deleteColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(2));
delete.deleteColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getSeqNumColumn(2));
meta.delete(delete);
meta.flushCommits();
meta.close();
// check that problem exists
HBaseFsck hbck = doFsck(conf, false);
assertErrors(hbck, new ERROR_CODE[]{ERROR_CODE.NOT_IN_META});
// fix the problem
hbck = doFsck(conf, true);
// run hbck again to make sure we don't see any errors
hbck = doFsck(conf, false);
assertErrors(hbck, new ERROR_CODE[]{});
} finally {
cleanupTable(table);
}
}
示例7: isDefaultReplica
import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/** @return true if this region is a default replica for the region */
public static boolean isDefaultReplica(HRegionInfo hri) {
return hri.getReplicaId() == DEFAULT_REPLICA_ID;
}