本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.HRegion.createHRegion方法的典型用法代码示例。如果您正苦于以下问题:Java HRegion.createHRegion方法的具体用法?Java HRegion.createHRegion怎么用?Java HRegion.createHRegion使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.regionserver.HRegion
的用法示例。
在下文中一共展示了HRegion.createHRegion方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: bootstrap
import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
private static void bootstrap(final Path rd, final Configuration c)
throws IOException {
LOG.info("BOOTSTRAP: creating hbase:meta region");
try {
// Bootstrapping, make sure blockcache is off. Else, one will be
// created here in bootstrap and it'll need to be cleaned up. Better to
// not make it in first place. Turn off block caching for bootstrap.
// Enable after.
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
setInfoFamilyCachingForMeta(metaDescriptor, false);
HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor, null, true, true);
setInfoFamilyCachingForMeta(metaDescriptor, true);
HRegion.closeHRegion(meta);
} catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e);
LOG.error("bootstrap", e);
throw e;
}
}
示例2: createRegion
import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
* Create new set of regions on the specified file-system.
* @param conf {@link Configuration}
* @param rootDir Root directory for HBase instance
* @param tableDir table directory
* @param hTableDescriptor description of the table
* @param newRegion {@link HRegionInfo} that describes the region to create
* @param task {@link RegionFillTask} custom code to populate region after creation
* @throws IOException
*/
public static HRegionInfo createRegion(final Configuration conf, final Path rootDir,
final Path tableDir, final HTableDescriptor hTableDescriptor, final HRegionInfo newRegion,
final RegionFillTask task) throws IOException {
// 1. Create HRegion
HRegion region = HRegion.createHRegion(newRegion,
rootDir, tableDir, conf, hTableDescriptor, null,
false, true);
try {
// 2. Custom user code to interact with the created region
if (task != null) {
task.fillRegion(region);
}
} finally {
// 3. Close the new region to flush to disk. Close log file too.
region.close();
}
return region.getRegionInfo();
}
示例3: setUp
import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
HTableDescriptor htd = new HTableDescriptor(
TableName.valueOf(TABLE_NAME_BYTES));
htd.addFamily(new HColumnDescriptor(FAMILY_NAME_BYTES));
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
this.region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(),
TEST_UTIL.getConfiguration(), htd);
Put put = new Put(ROW_BYTES);
for (int i = 0; i < 10; i += 2) {
// puts 0, 2, 4, 6 and 8
put.add(FAMILY_NAME_BYTES, Bytes.toBytes(QUALIFIER_PREFIX + i), i,
Bytes.toBytes(VALUE_PREFIX + i));
}
this.region.put(put);
this.region.flush(true);
}
示例4: initHRegion
import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
Region initHRegion (TableName tableName, String callingMethod,
Configuration conf, Class<?> [] implClasses, byte [][] families)
throws IOException {
HTableDescriptor htd = new HTableDescriptor(tableName);
for(byte [] family : families) {
htd.addFamily(new HColumnDescriptor(family));
}
HRegionInfo info = new HRegionInfo(tableName, null, null, false);
Path path = new Path(DIR + callingMethod);
HRegion r = HRegion.createHRegion(info, path, conf, htd);
// this following piece is a hack.
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
r.setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {
host.load(implClass, Coprocessor.PRIORITY_USER, conf);
Coprocessor c = host.findCoprocessor(implClass.getName());
assertNotNull(c);
}
// Here we have to call pre and postOpen explicitly.
host.preOpen();
host.postOpen();
return r;
}
示例5: createHRegion
import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
private HRegion createHRegion (byte [] tableName, String callingMethod,
WAL log, Durability durability)
throws IOException {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
htd.setDurability(durability);
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
htd.addFamily(hcd);
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
Path path = new Path(DIR + callingMethod);
if (FS.exists(path)) {
if (!FS.delete(path, true)) {
throw new IOException("Failed delete of " + path);
}
}
return HRegion.createHRegion(info, path, CONF, htd, log);
}
示例6: initHRegion
import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
Region initHRegion(byte[] tableName, String callingMethod, Configuration conf,
byte[]... families) throws IOException {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
for (byte[] family : families) {
htd.addFamily(new HColumnDescriptor(family));
}
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
Path path = new Path(DIR + callingMethod);
HRegion r = HRegion.createHRegion(info, path, conf, htd);
// this following piece is a hack. currently a coprocessorHost
// is secretly loaded at OpenRegionHandler. we don't really
// start a region server here, so just manually create cphost
// and set it to region.
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
r.setCoprocessorHost(host);
return r;
}
示例7: createNewMeta
import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
* This borrows code from MasterFileSystem.bootstrap()
*
* @return an open hbase:meta HRegion
*/
private HRegion createNewMeta() throws IOException {
Path rootdir = FSUtils.getRootDir(getConf());
Configuration c = getConf();
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, false);
HRegion meta = HRegion.createHRegion(metaHRI, rootdir, c, metaDescriptor);
MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, true);
return meta;
}
示例8: createRegionAndWAL
import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
* Create a region with it's own WAL. Be sure to call
* {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
*/
public static HRegion createRegionAndWAL(final HRegionInfo info, final Path rootDir,
final Configuration conf, final HTableDescriptor htd, boolean initialize)
throws IOException {
WAL wal = createWal(conf, rootDir, info);
return HRegion.createHRegion(info, rootDir, conf, htd, wal, initialize);
}
示例9: createHRegion
import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
* Create an HRegion. Be sure to call {@link HBaseTestingUtility#closeRegion(Region)}
* when you're finished with it.
*/
public HRegion createHRegion(
final HRegionInfo info,
final Path rootDir,
final Configuration conf,
final HTableDescriptor htd) throws IOException {
return HRegion.createHRegion(info, rootDir, conf, htd);
}
示例10: createRegion
import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
HRegion createRegion(final HRegionInfo hri, final Path rootdir, final Configuration c,
final HTableDescriptor htd)
throws IOException {
HRegion r = HRegion.createHRegion(hri, rootdir, c, htd);
// The above call to create a region will create an wal file. Each
// log file create will also create a running thread to do syncing. We need
// to close out this log else we will have a running thread trying to sync
// the file system continuously which is ugly when dfs is taken away at the
// end of the test.
HRegion.closeHRegion(r);
return r;
}
示例11: createRegion
import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
private HRegion createRegion(final HTableDescriptor desc,
byte [] startKey, byte [] endKey, int firstRow, int nrows, Path rootdir)
throws IOException {
HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
HRegion region = HRegion.createHRegion(hri, rootdir, UTIL.getConfiguration(), desc);
LOG.info("Created region " + region.getRegionInfo().getRegionNameAsString());
for(int i = firstRow; i < firstRow + nrows; i++) {
Put put = new Put(Bytes.toBytes("row_" + String.format("%1$05d", i)));
put.setDurability(Durability.SKIP_WAL);
put.add(COLUMN_NAME, null, VALUE);
region.put(put);
if (i % 10000 == 0) {
LOG.info("Flushing write #" + i);
region.flush(true);
}
}
HRegion.closeHRegion(region);
return region;
}
示例12: createNewHRegion
import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
public HRegion createNewHRegion(HTableDescriptor desc, byte [] startKey,
byte [] endKey, Configuration conf)
throws IOException {
HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
return HRegion.createHRegion(hri, testDir, conf, desc);
}
示例13: createMetaRegion
import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
* You must call {@link #closeRootAndMeta()} when done after calling this
* method. It does cleanup.
* @throws IOException
*/
protected void createMetaRegion() throws IOException {
FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(conf);
meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir, conf,
fsTableDescriptors.get(TableName.META_TABLE_NAME));
}
示例14: testScanLimitAndOffset
import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
* Test from client side for scan with maxResultPerCF set
*
* @throws Exception
*/
@Test
public void testScanLimitAndOffset() throws Exception {
//byte [] TABLE = HTestConst.DEFAULT_TABLE_BYTES;
byte [][] ROWS = HTestConst.makeNAscii(HTestConst.DEFAULT_ROW_BYTES, 2);
byte [][] FAMILIES = HTestConst.makeNAscii(HTestConst.DEFAULT_CF_BYTES, 3);
byte [][] QUALIFIERS = HTestConst.makeNAscii(HTestConst.DEFAULT_QUALIFIER_BYTES, 10);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(HTestConst.DEFAULT_TABLE_BYTES));
HRegionInfo info = new HRegionInfo(HTestConst.DEFAULT_TABLE, null, null, false);
for (byte[] family : FAMILIES) {
HColumnDescriptor hcd = new HColumnDescriptor(family);
htd.addFamily(hcd);
}
HRegion region =
HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
try {
Put put;
Scan scan;
Result result;
boolean toLog = true;
List<Cell> kvListExp = new ArrayList<Cell>();
int storeOffset = 1;
int storeLimit = 3;
for (int r = 0; r < ROWS.length; r++) {
put = new Put(ROWS[r]);
for (int c = 0; c < FAMILIES.length; c++) {
for (int q = 0; q < QUALIFIERS.length; q++) {
KeyValue kv = new KeyValue(ROWS[r], FAMILIES[c], QUALIFIERS[q], 1,
HTestConst.DEFAULT_VALUE_BYTES);
put.add(kv);
if (storeOffset <= q && q < storeOffset + storeLimit) {
kvListExp.add(kv);
}
}
}
region.put(put);
}
scan = new Scan();
scan.setRowOffsetPerColumnFamily(storeOffset);
scan.setMaxResultsPerColumnFamily(storeLimit);
RegionScanner scanner = region.getScanner(scan);
List<Cell> kvListScan = new ArrayList<Cell>();
List<Cell> results = new ArrayList<Cell>();
while (scanner.next(results) || !results.isEmpty()) {
kvListScan.addAll(results);
results.clear();
}
result = Result.create(kvListScan);
TestScannersFromClientSide.verifyResult(result, kvListExp, toLog,
"Testing scan with storeOffset and storeLimit");
} finally {
region.close();
}
}
示例15: testRegionServerAbortionDueToFailureTransitioningToOpened
import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
* Test the openregionhandler can deal with perceived failure of transitioning to OPENED state
* due to intermittent zookeeper malfunctioning.
* @see <a href="https://issues.apache.org/jira/browse/HBASE-9387">HBASE-9387</a>
* @throws IOException
* @throws NodeExistsException
* @throws KeeperException
*/
@Test
public void testRegionServerAbortionDueToFailureTransitioningToOpened()
throws IOException, NodeExistsException, KeeperException {
final Server server = new MockServer(HTU);
final RegionServerServices rss = HTU.createMockRegionServerService();
HTableDescriptor htd = TEST_HTD;
final HRegionInfo hri = TEST_HRI;
HRegion region =
HRegion.createHRegion(hri, HTU.getDataTestDir(), HTU
.getConfiguration(), htd);
assertNotNull(region);
try {
ZkCoordinatedStateManager csm = new ZkCoordinatedStateManager();
csm.initialize(server);
csm.start();
ZkOpenRegionCoordination.ZkOpenRegionDetails zkCrd =
new ZkOpenRegionCoordination.ZkOpenRegionDetails();
zkCrd.setServerName(server.getServerName());
ZkOpenRegionCoordination openRegionCoordination =
new ZkOpenRegionCoordination(csm, server.getZooKeeper()) {
@Override
public boolean transitionToOpened(final HRegion r, OpenRegionDetails ord)
throws IOException {
// remove znode simulating intermittent zookeeper connection issue
ZooKeeperWatcher zkw = server.getZooKeeper();
String node = ZKAssign.getNodeName(zkw, hri.getEncodedName());
try {
ZKUtil.deleteNodeFailSilent(zkw, node);
} catch (KeeperException e) {
throw new RuntimeException("Ugh failed delete of " + node, e);
}
// then try to transition to OPENED
return super.transitionToOpened(r, ord);
}
};
OpenRegionHandler handler = new OpenRegionHandler(server, rss, hri, htd,
-1, openRegionCoordination, zkCrd);
rss.getRegionsInTransitionInRS().put(
hri.getEncodedNameAsBytes(), Boolean.TRUE);
// Call process without first creating OFFLINE region in zk, see if
// exception or just quiet return (expected).
handler.process();
rss.getRegionsInTransitionInRS().put(
hri.getEncodedNameAsBytes(), Boolean.TRUE);
ZKAssign.createNodeOffline(server.getZooKeeper(), hri, server.getServerName());
// Call process again but this time yank the zk znode out from under it
// post OPENING; again will expect it to come back w/o NPE or exception.
handler.process();
} catch (IOException ioe) {
} finally {
HRegion.closeHRegion(region);
}
// Region server is expected to abort due to OpenRegionHandler perceiving transitioning
// to OPENED as failed
// This was corresponding to the second handler.process() call above.
assertTrue("region server should have aborted", server.isAborted());
}