本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.HRegion类的典型用法代码示例。如果您正苦于以下问题:Java HRegion类的具体用法?Java HRegion怎么用?Java HRegion使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
HRegion类属于org.apache.hadoop.hbase.regionserver包,在下文中一共展示了HRegion类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: updateMeta
import org.apache.hadoop.hbase.regionserver.HRegion; //导入依赖的package包/类
@Override
protected void updateMeta(final byte [] oldRegion1,
final byte [] oldRegion2,
HRegion newRegion)
throws IOException {
byte[][] regionsToDelete = {oldRegion1, oldRegion2};
for (int r = 0; r < regionsToDelete.length; r++) {
if(Bytes.equals(regionsToDelete[r], latestRegion.getRegionName())) {
latestRegion = null;
}
Delete delete = new Delete(regionsToDelete[r]);
table.delete(delete);
if(LOG.isDebugEnabled()) {
LOG.debug("updated columns in row: " + Bytes.toStringBinary(regionsToDelete[r]));
}
}
newRegion.getRegionInfo().setOffline(true);
MetaTableAccessor.addRegionToMeta(table, newRegion.getRegionInfo());
if(LOG.isDebugEnabled()) {
LOG.debug("updated columns in row: "
+ Bytes.toStringBinary(newRegion.getRegionInfo().getRegionName()));
}
}
示例2: startSplitTransaction
import org.apache.hadoop.hbase.regionserver.HRegion; //导入依赖的package包/类
/**
* Creates a new ephemeral node in the PENDING_SPLIT state for the specified region. Create it
* ephemeral in case regionserver dies mid-split.
* <p>
* Does not transition nodes from other states. If a node already exists for this region, an
* Exception will be thrown.
* @param parent region to be created as offline
* @param serverName server event originates from
* @param hri_a daughter region
* @param hri_b daughter region
* @throws IOException
*/
@Override
public void startSplitTransaction(HRegion parent, ServerName serverName, HRegionInfo hri_a,
HRegionInfo hri_b) throws IOException {
HRegionInfo region = parent.getRegionInfo();
try {
LOG.debug(watcher.prefix("Creating ephemeral node for " + region.getEncodedName()
+ " in PENDING_SPLIT state"));
byte[] payload = HRegionInfo.toDelimitedByteArray(hri_a, hri_b);
RegionTransition rt =
RegionTransition.createRegionTransition(RS_ZK_REQUEST_REGION_SPLIT,
region.getRegionName(), serverName, payload);
String node = ZKAssign.getNodeName(watcher, region.getEncodedName());
if (!ZKUtil.createEphemeralNodeAndWatch(watcher, node, rt.toByteArray())) {
throw new IOException("Failed create of ephemeral " + node);
}
} catch (KeeperException e) {
throw new IOException("Failed creating PENDING_SPLIT znode on "
+ parent.getRegionInfo().getRegionNameAsString(), e);
}
}
示例3: initHRegion
import org.apache.hadoop.hbase.regionserver.HRegion; //导入依赖的package包/类
Region initHRegion (TableName tableName, String callingMethod,
Configuration conf, Class<?> [] implClasses, byte [][] families)
throws IOException {
HTableDescriptor htd = new HTableDescriptor(tableName);
for(byte [] family : families) {
htd.addFamily(new HColumnDescriptor(family));
}
HRegionInfo info = new HRegionInfo(tableName, null, null, false);
Path path = new Path(DIR + callingMethod);
HRegion r = HRegion.createHRegion(info, path, conf, htd);
// this following piece is a hack.
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
r.setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {
host.load(implClass, Coprocessor.PRIORITY_USER, conf);
Coprocessor c = host.findCoprocessor(implClass.getName());
assertNotNull(c);
}
// Here we have to call pre and postOpen explicitly.
host.preOpen();
host.postOpen();
return r;
}
示例4: testRegionCoprocessorHostTableLoadingDisabled
import org.apache.hadoop.hbase.regionserver.HRegion; //导入依赖的package包/类
@Test
public void testRegionCoprocessorHostTableLoadingDisabled() throws Exception {
Configuration conf = new Configuration(CONF);
conf.setBoolean(CoprocessorHost.COPROCESSORS_ENABLED_CONF_KEY, true); // if defaults change
conf.setBoolean(CoprocessorHost.USER_COPROCESSORS_ENABLED_CONF_KEY, false);
HRegion region = mock(HRegion.class);
when(region.getRegionInfo()).thenReturn(REGIONINFO);
when(region.getTableDesc()).thenReturn(TABLEDESC);
RegionServerServices rsServices = mock(RegionServerServices.class);
systemCoprocessorLoaded.set(false);
tableCoprocessorLoaded.set(false);
new RegionCoprocessorHost(region, rsServices, conf);
assertTrue("System coprocessors should have been loaded",
systemCoprocessorLoaded.get());
assertFalse("Table coprocessors should not have been loaded",
tableCoprocessorLoaded.get());
}
示例5: internalGetTopBlockLocation
import org.apache.hadoop.hbase.regionserver.HRegion; //导入依赖的package包/类
/**
* Returns an ordered list of hosts that are hosting the blocks for this
* region. The weight of each host is the sum of the block lengths of all
* files on that host, so the first host in the list is the server which holds
* the most bytes of the given region's HFiles.
*
* @param region region
* @return ordered list of hosts holding blocks of the specified region
*/
protected HDFSBlocksDistribution internalGetTopBlockLocation(HRegionInfo region) {
try {
HTableDescriptor tableDescriptor = getTableDescriptor(region.getTable());
if (tableDescriptor != null) {
HDFSBlocksDistribution blocksDistribution =
HRegion.computeHDFSBlocksDistribution(getConf(), tableDescriptor, region);
return blocksDistribution;
}
} catch (IOException ioe) {
LOG.warn("IOException during HDFSBlocksDistribution computation. for " + "region = "
+ region.getEncodedName(), ioe);
}
return new HDFSBlocksDistribution();
}
示例6: handleRegionStartKeyNotEmpty
import org.apache.hadoop.hbase.regionserver.HRegion; //导入依赖的package包/类
/**
* This is a special case hole -- when the first region of a table is
* missing from META, HBase doesn't acknowledge the existance of the
* table.
*/
@Override
public void handleRegionStartKeyNotEmpty(HbckInfo next) throws IOException {
errors.reportError(ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY,
"First region should start with an empty key. Creating a new " +
"region and regioninfo in HDFS to plug the hole.",
getTableInfo(), next);
HTableDescriptor htd = getTableInfo().getHTD();
// from special EMPTY_START_ROW to next region's startKey
HRegionInfo newRegion = new HRegionInfo(htd.getTableName(),
HConstants.EMPTY_START_ROW, next.getStartKey());
// TODO test
HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
LOG.info("Table region start key was not empty. Created new empty region: "
+ newRegion + " " +region);
fixes++;
}
示例7: handleHoleInRegionChain
import org.apache.hadoop.hbase.regionserver.HRegion; //导入依赖的package包/类
/**
* There is a hole in the hdfs regions that violates the table integrity
* rules. Create a new empty region that patches the hole.
*/
@Override
public void handleHoleInRegionChain(byte[] holeStartKey, byte[] holeStopKey) throws IOException {
errors.reportError(
ERROR_CODE.HOLE_IN_REGION_CHAIN,
"There is a hole in the region chain between "
+ Bytes.toStringBinary(holeStartKey) + " and "
+ Bytes.toStringBinary(holeStopKey)
+ ". Creating a new regioninfo and region "
+ "dir in hdfs to plug the hole.");
HTableDescriptor htd = getTableInfo().getHTD();
HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), holeStartKey, holeStopKey);
HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
LOG.info("Plugged hole by creating new empty region: "+ newRegion + " " +region);
fixes++;
}
示例8: createRegion
import org.apache.hadoop.hbase.regionserver.HRegion; //导入依赖的package包/类
/**
* Create new set of regions on the specified file-system.
* @param conf {@link Configuration}
* @param rootDir Root directory for HBase instance
* @param tableDir table directory
* @param hTableDescriptor description of the table
* @param newRegion {@link HRegionInfo} that describes the region to create
* @param task {@link RegionFillTask} custom code to populate region after creation
* @throws IOException
*/
public static HRegionInfo createRegion(final Configuration conf, final Path rootDir,
final Path tableDir, final HTableDescriptor hTableDescriptor, final HRegionInfo newRegion,
final RegionFillTask task) throws IOException {
// 1. Create HRegion
HRegion region = HRegion.createHRegion(newRegion,
rootDir, tableDir, conf, hTableDescriptor, null,
false, true);
try {
// 2. Custom user code to interact with the created region
if (task != null) {
task.fillRegion(region);
}
} finally {
// 3. Close the new region to flush to disk. Close log file too.
region.close();
}
return region.getRegionInfo();
}
示例9: getRegionArchiveDir
import org.apache.hadoop.hbase.regionserver.HRegion; //导入依赖的package包/类
/**
* Get the archive directory for a given region under the specified table
* @param tableName the table name. Cannot be null.
* @param regiondir the path to the region directory. Cannot be null.
* @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
* should not be archived
*/
public static Path getRegionArchiveDir(Path rootDir,
TableName tableName,
Path regiondir) {
// get the archive directory for a table
Path archiveDir = getTableArchivePath(rootDir, tableName);
// then add on the region path under the archive
String encodedRegionName = regiondir.getName();
return HRegion.getRegionDir(archiveDir, encodedRegionName);
}
示例10: reopenRegion
import org.apache.hadoop.hbase.regionserver.HRegion; //导入依赖的package包/类
Region reopenRegion(final Region closedRegion, Class<?> ... implClasses)
throws IOException {
//HRegionInfo info = new HRegionInfo(tableName, null, null, false);
Region r = HRegion.openHRegion(closedRegion, null);
// this following piece is a hack. currently a coprocessorHost
// is secretly loaded at OpenRegionHandler. we don't really
// start a region server here, so just manually create cphost
// and set it to region.
Configuration conf = TEST_UTIL.getConfiguration();
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
((HRegion)r).setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {
host.load(implClass, Coprocessor.PRIORITY_USER, conf);
}
// we need to manually call pre- and postOpen here since the
// above load() is not the real case for CP loading. A CP is
// expected to be loaded by default from 1) configuration; or 2)
// HTableDescriptor. If it's loaded after HRegion initialized,
// the pre- and postOpen() won't be triggered automatically.
// Here we have to call pre and postOpen explicitly.
host.preOpen();
host.postOpen();
return r;
}
示例11: preprocess
import org.apache.hadoop.hbase.regionserver.HRegion; //导入依赖的package包/类
public static ConditionTree preprocess(HRegion region, Filter filter, float maxScale) {
if (filter == null) return null;
ConditionTree tree = null;
if (isIndexFilter(region, filter)) {
System.out.println("preprocess A");
tree = new ConditionTreeNoneLeafNode(region, (SingleColumnValueFilter) filter, maxScale);
} else if (filter instanceof FilterList) {
System.out.println("preprocess B");
tree = new ConditionTreeNoneLeafNode(region, (FilterList) filter, maxScale);
}
if (tree.isPrune()) {
System.out.println("return null for prune");
return null;
} else {
return tree;
}
}
示例12: testMergeRegions
import org.apache.hadoop.hbase.regionserver.HRegion; //导入依赖的package包/类
@Test (timeout=180000)
public void testMergeRegions() throws Exception {
final TableName tname = TableName.valueOf("testMergeRegions");
createTestTable(tname);
try {
final List<HRegion> regions = TEST_UTIL.getHBaseCluster().findRegionsForTable(tname);
assertTrue("not enough regions: " + regions.size(), regions.size() >= 2);
AccessTestAction action = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preMerge(ObserverContext.createAndPrepare(RSCP_ENV, null),
regions.get(0), regions.get(1));
return null;
}
};
verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN);
verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ,
USER_GROUP_WRITE, USER_GROUP_CREATE);
} finally {
deleteTable(TEST_UTIL, tname);
}
}
示例13: createHRegion
import org.apache.hadoop.hbase.regionserver.HRegion; //导入依赖的package包/类
private HRegion createHRegion (byte [] tableName, String callingMethod,
WAL log, Durability durability)
throws IOException {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
htd.setDurability(durability);
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
htd.addFamily(hcd);
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
Path path = new Path(DIR + callingMethod);
if (FS.exists(path)) {
if (!FS.delete(path, true)) {
throw new IOException("Failed delete of " + path);
}
}
return HRegion.createHRegion(info, path, CONF, htd, log);
}
示例14: testRegionCoprocessorHostAllDisabled
import org.apache.hadoop.hbase.regionserver.HRegion; //导入依赖的package包/类
@Test
public void testRegionCoprocessorHostAllDisabled() throws Exception {
Configuration conf = new Configuration(CONF);
conf.setBoolean(CoprocessorHost.COPROCESSORS_ENABLED_CONF_KEY, false);
HRegion region = mock(HRegion.class);
when(region.getRegionInfo()).thenReturn(REGIONINFO);
when(region.getTableDesc()).thenReturn(TABLEDESC);
RegionServerServices rsServices = mock(RegionServerServices.class);
systemCoprocessorLoaded.set(false);
tableCoprocessorLoaded.set(false);
new RegionCoprocessorHost(region, rsServices, conf);
assertFalse("System coprocessors should not have been loaded",
systemCoprocessorLoaded.get());
assertFalse("Table coprocessors should not have been loaded",
tableCoprocessorLoaded.get());
}
示例15: setUp
import org.apache.hadoop.hbase.regionserver.HRegion; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
HTableDescriptor htd = new HTableDescriptor(
TableName.valueOf(TABLE_NAME_BYTES));
htd.addFamily(new HColumnDescriptor(FAMILY_NAME_BYTES));
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
this.region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(),
TEST_UTIL.getConfiguration(), htd);
Put put = new Put(ROW_BYTES);
for (int i = 0; i < 10; i += 2) {
// puts 0, 2, 4, 6 and 8
put.add(FAMILY_NAME_BYTES, Bytes.toBytes(QUALIFIER_PREFIX + i), i,
Bytes.toBytes(VALUE_PREFIX + i));
}
this.region.put(put);
this.region.flush(true);
}