本文整理匯總了Java中org.apache.hadoop.hbase.client.HBaseAdmin.split方法的典型用法代碼示例。如果您正苦於以下問題:Java HBaseAdmin.split方法的具體用法?Java HBaseAdmin.split怎麽用?Java HBaseAdmin.split使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.client.HBaseAdmin
的用法示例。
在下文中一共展示了HBaseAdmin.split方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: perform
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Override
public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
HBaseAdmin admin = util.getHBaseAdmin();
LOG.info("Performing action: Split random region of table " + tableName);
List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes);
if (regions == null || regions.isEmpty()) {
LOG.info("Table " + tableName + " doesn't have regions to split");
return;
}
HRegionInfo region = PolicyBasedChaosMonkey.selectRandomItem(
regions.toArray(new HRegionInfo[regions.size()]));
LOG.debug("Splitting region " + region.getRegionNameAsString());
try {
admin.split(region.getRegionName());
} catch (Exception ex) {
LOG.warn("Split failed, might be caused by other chaos: " + ex.getMessage());
}
if (sleepTime > 0) {
Thread.sleep(sleepTime);
}
}
示例2: testPersistence
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Test
public void testPersistence() throws Exception {
Configuration conf = UTIL.getConfiguration();
AccessControlLists.addUserPermission(conf,
new UserPermission(Bytes.toBytes("albert"), TEST_TABLE, null,
(byte[])null, TablePermission.Action.READ));
AccessControlLists.addUserPermission(conf,
new UserPermission(Bytes.toBytes("betty"), TEST_TABLE, null,
(byte[])null, TablePermission.Action.READ,
TablePermission.Action.WRITE));
AccessControlLists.addUserPermission(conf,
new UserPermission(Bytes.toBytes("clark"),
TEST_TABLE, TEST_FAMILY,
TablePermission.Action.READ));
AccessControlLists.addUserPermission(conf,
new UserPermission(Bytes.toBytes("dwight"),
TEST_TABLE, TEST_FAMILY, TEST_QUALIFIER,
TablePermission.Action.WRITE));
// verify permissions survive changes in table metadata
ListMultimap<String,TablePermission> preperms =
AccessControlLists.getTablePermissions(conf, TEST_TABLE);
HTable table = new HTable(conf, TEST_TABLE);
table.put(new Put(Bytes.toBytes("row1"))
.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("v1")));
table.put(new Put(Bytes.toBytes("row2"))
.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("v2")));
HBaseAdmin admin = UTIL.getHBaseAdmin();
admin.split(TEST_TABLE.getName());
// wait for split
Thread.sleep(10000);
ListMultimap<String,TablePermission> postperms =
AccessControlLists.getTablePermissions(conf, TEST_TABLE);
checkMultimapEqual(preperms, postperms);
}
示例3: setUpBeforeClass
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster(3);
REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
client = new Client(new Cluster().add("localhost",
REST_TEST_UTIL.getServletPort()));
context = JAXBContext.newInstance(
TableModel.class,
TableInfoModel.class,
TableListModel.class,
TableRegionModel.class);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) {
return;
}
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(COLUMN_FAMILY));
admin.createTable(htd);
HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
byte[] k = new byte[3];
byte [][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(COLUMN));
for (byte b1 = 'a'; b1 < 'z'; b1++) {
for (byte b2 = 'a'; b2 < 'z'; b2++) {
for (byte b3 = 'a'; b3 < 'z'; b3++) {
k[0] = b1;
k[1] = b2;
k[2] = b3;
Put put = new Put(k);
put.setWriteToWAL(false);
put.add(famAndQf[0], famAndQf[1], k);
table.put(put);
}
}
}
table.flushCommits();
// get the initial layout (should just be one region)
Map<HRegionInfo,HServerAddress> m = table.getRegionsInfo();
assertEquals(m.size(), 1);
// tell the master to split the table
admin.split(TABLE);
// give some time for the split to happen
long timeout = System.currentTimeMillis() + (15 * 1000);
while (System.currentTimeMillis() < timeout && m.size()!=2){
try {
Thread.sleep(250);
} catch (InterruptedException e) {
LOG.warn(StringUtils.stringifyException(e));
}
// check again
m = table.getRegionsInfo();
}
// should have two regions now
assertEquals(m.size(), 2);
regionMap = m;
LOG.info("regions: " + regionMap);
table.close();
}
示例4: testValidLingeringSplitParent
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
/**
* Tests that LINGERING_SPLIT_PARENT is not erroneously reported for
* valid cases where the daughters are there.
*/
@Test
public void testValidLingeringSplitParent() throws Exception {
String table = "testLingeringSplitParent";
HTable meta = null;
try {
setupTable(table);
assertEquals(ROWKEYS.length, countRows());
// make sure data in regions, if in hlog only there is no data loss
TEST_UTIL.getHBaseAdmin().flush(table);
HRegionLocation location = tbl.getRegionLocation("B");
meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getName());
HRegionInfo hri = location.getRegionInfo();
// do a regular split
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
byte[] regionName = location.getRegionInfo().getRegionName();
admin.split(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
TestEndToEndSplitTransaction.blockUntilRegionSplit(
TEST_UTIL.getConfiguration(), 60000, regionName, true);
// TODO: fixHdfsHoles does not work against splits, since the parent dir lingers on
// for some time until children references are deleted. HBCK erroneously sees this as
// overlapping regions
HBaseFsck hbck = doFsck(conf, true, true, false, false, false, true, true, true, null);
assertErrors(hbck, new ERROR_CODE[] {}); //no LINGERING_SPLIT_PARENT reported
// assert that the split META entry is still there.
Get get = new Get(hri.getRegionName());
Result result = meta.get(get);
assertNotNull(result);
assertNotNull(MetaReader.parseCatalogResult(result).getFirst());
assertEquals(ROWKEYS.length, countRows());
// assert that we still have the split regions
assertEquals(tbl.getStartKeys().length, SPLITS.length + 1 + 1); //SPLITS + 1 is # regions pre-split.
assertNoErrors(doFsck(conf, false));
} finally {
deleteTable(table);
IOUtils.closeQuietly(meta);
}
}
示例5: setUpBeforeClass
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster(3);
REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
client = new Client(new Cluster().add("localhost",
REST_TEST_UTIL.getServletPort()));
context = JAXBContext.newInstance(
TableModel.class,
TableInfoModel.class,
TableListModel.class,
TableRegionModel.class);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) {
return;
}
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
htd.addFamily(new HColumnDescriptor(COLUMN_FAMILY));
admin.createTable(htd);
HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
byte[] k = new byte[3];
byte [][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(COLUMN));
for (byte b1 = 'a'; b1 < 'z'; b1++) {
for (byte b2 = 'a'; b2 < 'z'; b2++) {
for (byte b3 = 'a'; b3 < 'z'; b3++) {
k[0] = b1;
k[1] = b2;
k[2] = b3;
Put put = new Put(k);
put.setDurability(Durability.SKIP_WAL);
put.add(famAndQf[0], famAndQf[1], k);
table.put(put);
}
}
}
table.flushCommits();
// get the initial layout (should just be one region)
Map<HRegionInfo, ServerName> m = table.getRegionLocations();
assertEquals(m.size(), 1);
// tell the master to split the table
admin.split(TABLE);
// give some time for the split to happen
long timeout = System.currentTimeMillis() + (15 * 1000);
while (System.currentTimeMillis() < timeout && m.size()!=2){
try {
Thread.sleep(250);
} catch (InterruptedException e) {
LOG.warn(StringUtils.stringifyException(e));
}
// check again
m = table.getRegionLocations();
}
// should have two regions now
assertEquals(m.size(), 2);
regionMap = m;
LOG.info("regions: " + regionMap);
table.close();
}
示例6: testAssignmentListener
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Test(timeout=60000)
public void testAssignmentListener() throws IOException, InterruptedException {
AssignmentManager am = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager();
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
DummyAssignmentListener listener = new DummyAssignmentListener();
am.registerListener(listener);
try {
final String TABLE_NAME_STR = "testtb";
final TableName TABLE_NAME = TableName.valueOf(TABLE_NAME_STR);
final byte[] FAMILY = Bytes.toBytes("cf");
// Create a new table, with a single region
LOG.info("Create Table");
TEST_UTIL.createTable(TABLE_NAME, FAMILY);
listener.awaitModifications(1);
assertEquals(1, listener.getLoadCount());
assertEquals(0, listener.getCloseCount());
// Add some data
HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE_NAME);
try {
for (int i = 0; i < 10; ++i) {
byte[] key = Bytes.toBytes("row-" + i);
Put put = new Put(key);
put.add(FAMILY, null, key);
table.put(put);
}
} finally {
table.close();
}
// Split the table in two
LOG.info("Split Table");
listener.reset();
admin.split(TABLE_NAME_STR, "row-3");
listener.awaitModifications(3);
assertEquals(2, listener.getLoadCount()); // daughters added
assertEquals(1, listener.getCloseCount()); // parent removed
// Wait for the Regions to be mergeable
MiniHBaseCluster miniCluster = TEST_UTIL.getMiniHBaseCluster();
int mergeable = 0;
while (mergeable < 2) {
Thread.sleep(100);
admin.majorCompact(TABLE_NAME_STR);
mergeable = 0;
for (JVMClusterUtil.RegionServerThread regionThread: miniCluster.getRegionServerThreads()) {
for (HRegion region: regionThread.getRegionServer().getOnlineRegions(TABLE_NAME)) {
mergeable += region.isMergeable() ? 1 : 0;
}
}
}
// Merge the two regions
LOG.info("Merge Regions");
listener.reset();
List<HRegionInfo> regions = admin.getTableRegions(TABLE_NAME);
assertEquals(2, regions.size());
admin.mergeRegions(regions.get(0).getEncodedNameAsBytes(),
regions.get(1).getEncodedNameAsBytes(), true);
listener.awaitModifications(3);
assertEquals(1, admin.getTableRegions(TABLE_NAME).size());
assertEquals(1, listener.getLoadCount()); // new merged region added
assertEquals(2, listener.getCloseCount()); // daughters removed
// Delete the table
LOG.info("Drop Table");
listener.reset();
TEST_UTIL.deleteTable(TABLE_NAME);
listener.awaitModifications(1);
assertEquals(0, listener.getLoadCount());
assertEquals(1, listener.getCloseCount());
} finally {
am.unregisterListener(listener);
}
}
示例7: testValidLingeringSplitParent
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
/**
* Tests that LINGERING_SPLIT_PARENT is not erroneously reported for
* valid cases where the daughters are there.
*/
@Test
public void testValidLingeringSplitParent() throws Exception {
TableName table =
TableName.valueOf("testLingeringSplitParent");
HTable meta = null;
try {
setupTable(table);
assertEquals(ROWKEYS.length, countRows());
// make sure data in regions, if in hlog only there is no data loss
TEST_UTIL.getHBaseAdmin().flush(table.getName());
HRegionLocation location = tbl.getRegionLocation("B");
meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName());
HRegionInfo hri = location.getRegionInfo();
// do a regular split
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
byte[] regionName = location.getRegionInfo().getRegionName();
admin.split(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
TestEndToEndSplitTransaction.blockUntilRegionSplit(
TEST_UTIL.getConfiguration(), 60000, regionName, true);
// TODO: fixHdfsHoles does not work against splits, since the parent dir lingers on
// for some time until children references are deleted. HBCK erroneously sees this as
// overlapping regions
HBaseFsck hbck = doFsck(conf, true, true, false, false, false, true, true, true, false, false, null);
assertErrors(hbck, new ERROR_CODE[] {}); //no LINGERING_SPLIT_PARENT reported
// assert that the split hbase:meta entry is still there.
Get get = new Get(hri.getRegionName());
Result result = meta.get(get);
assertNotNull(result);
assertNotNull(HRegionInfo.getHRegionInfo(result));
assertEquals(ROWKEYS.length, countRows());
// assert that we still have the split regions
assertEquals(tbl.getStartKeys().length, SPLITS.length + 1 + 1); //SPLITS + 1 is # regions pre-split.
assertNoErrors(doFsck(conf, false));
} finally {
deleteTable(table);
IOUtils.closeQuietly(meta);
}
}
示例8: testWithFastDiffEncoding
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Test(timeout = 180000)
public void testWithFastDiffEncoding() throws Exception {
HBaseAdmin admin = new IndexAdmin(UTIL.getConfiguration());
String tableName = "testWithFastDiffEncoding";
String idxTableName = "testWithFastDiffEncoding_idx";
HTableDescriptor htd = TestUtils.createIndexedHTableDescriptor(tableName, CF1, "idx1", CF1, COL1);
admin.createTable(htd);
HTable ht = new HTable(UTIL.getConfiguration(), tableName);
HTable hti = new HTable(UTIL.getConfiguration(), idxTableName);
Put put = new Put("a".getBytes());
put.add(CF1.getBytes(), COL1.getBytes(), "1".getBytes());
ht.put(put);
put = new Put("d".getBytes());
put.add(CF1.getBytes(), COL1.getBytes(), "1".getBytes());
ht.put(put);
put = new Put("k".getBytes());
put.add(CF1.getBytes(), COL1.getBytes(), "1".getBytes());
ht.put(put);
put = new Put("z".getBytes());
put.add(CF1.getBytes(), COL1.getBytes(), "1".getBytes());
ht.put(put);
Delete delete = new Delete("z".getBytes());
ht.delete(delete);
admin.flush(tableName);
admin.flush(idxTableName);
NavigableMap<HRegionInfo, ServerName> regionLocations = ht.getRegionLocations();
byte[] regionName = null;
for (Entry<HRegionInfo, ServerName> e : regionLocations.entrySet()) {
regionName = e.getKey().getRegionName();
break;
}
// Splitting the single region.
admin.split(regionName, "e".getBytes());
// Sleeping so that the compaction can complete.
// Split will initiate a compaction.
Thread.sleep(5 * 1000);
Scan scan = new Scan();
ResultScanner scanner = hti.getScanner(scan);
Result res = scanner.next();
int count = 0;
while (res != null) {
count++;
res = scanner.next();
}
assertEquals(3, count);
admin.close();
}