本文整理汇总了Java中org.apache.hadoop.hbase.util.ModifyRegionUtils类的典型用法代码示例。如果您正苦于以下问题:Java ModifyRegionUtils类的具体用法?Java ModifyRegionUtils怎么用?Java ModifyRegionUtils使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ModifyRegionUtils类属于org.apache.hadoop.hbase.util包,在下文中一共展示了ModifyRegionUtils类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: assignRegions
import org.apache.hadoop.hbase.util.ModifyRegionUtils; //导入依赖的package包/类
protected static void assignRegions(final MasterProcedureEnv env,
final TableName tableName, final List<HRegionInfo> regions)
throws HBaseException, IOException {
ProcedureSyncWait.waitRegionServers(env);
final AssignmentManager assignmentManager = env.getMasterServices().getAssignmentManager();
// Mark the table as Enabling
assignmentManager.getTableStateManager().setTableState(tableName,
ZooKeeperProtos.Table.State.ENABLING);
// Trigger immediate assignment of the regions in round-robin fashion
ModifyRegionUtils.assignRegions(assignmentManager, regions);
// Enable table
assignmentManager.getTableStateManager()
.setTableState(tableName, ZooKeeperProtos.Table.State.ENABLED);
}
示例2: testCreateExisting
import org.apache.hadoop.hbase.util.ModifyRegionUtils; //导入依赖的package包/类
@Test(timeout=60000, expected=TableExistsException.class)
public void testCreateExisting() throws Exception {
final TableName tableName = TableName.valueOf("testCreateExisting");
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
final HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f");
final HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);
// create the table
long procId1 = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions), nonceGroup, nonce);
// create another with the same name
ProcedurePrepareLatch latch2 = new ProcedurePrepareLatch.CompatibilityLatch();
long procId2 = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions, latch2),
nonceGroup + 1,
nonce + 1);
ProcedureTestingUtility.waitProcedure(procExec, procId1);
ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId1));
ProcedureTestingUtility.waitProcedure(procExec, procId2);
latch2.await();
}
示例3: testCreateTwiceWithSameNonce
import org.apache.hadoop.hbase.util.ModifyRegionUtils; //导入依赖的package包/类
@Test(timeout=60000)
public void testCreateTwiceWithSameNonce() throws Exception {
final TableName tableName = TableName.valueOf("testCreateTwiceWithSameNonce");
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
final HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f");
final HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);
// create the table
long procId1 = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions), nonceGroup, nonce);
// create another with the same name
long procId2 = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions), nonceGroup, nonce);
ProcedureTestingUtility.waitProcedure(procExec, procId1);
ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId1));
ProcedureTestingUtility.waitProcedure(procExec, procId2);
ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2));
assertTrue(procId1 == procId2);
}
示例4: testRecoveryAndDoubleExecution
import org.apache.hadoop.hbase.util.ModifyRegionUtils; //导入依赖的package包/类
@Test(timeout=60000)
public void testRecoveryAndDoubleExecution() throws Exception {
final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution");
// create the table
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
// Start the Create procedure && kill the executor
byte[][] splitKeys = null;
HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
long procId = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions), nonceGroup, nonce);
// Restart the executor and execute the step twice
// NOTE: the 6 (number of CreateTableState steps) is hardcoded,
// so you have to look at this test at least once when you add a new step.
MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(
procExec, procId, 6, CreateTableState.values());
MasterProcedureTestingUtility.validateTableCreation(
UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
}
示例5: createSystemTable
import org.apache.hadoop.hbase.util.ModifyRegionUtils; //导入依赖的package包/类
@Override
public long createSystemTable(final TableDescriptor tableDescriptor) throws IOException {
if (isStopped()) {
throw new MasterNotRunningException();
}
TableName tableName = tableDescriptor.getTableName();
if (!(tableName.isSystemTable())) {
throw new IllegalArgumentException(
"Only system table creation can use this createSystemTable API");
}
RegionInfo[] newRegions = ModifyRegionUtils.createRegionInfos(tableDescriptor, null);
LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor);
// This special create table is called locally to master. Therefore, no RPC means no need
// to use nonce to detect duplicated RPC call.
long procId = this.procedureExecutor.submitProcedure(
new CreateTableProcedure(procedureExecutor.getEnvironment(), tableDescriptor, newRegions));
return procId;
}
示例6: testCreateWithFailoverAtStep
import org.apache.hadoop.hbase.util.ModifyRegionUtils; //导入依赖的package包/类
private void testCreateWithFailoverAtStep(final int step) throws Exception {
final TableName tableName = TableName.valueOf("testCreateWithFailoverAtStep" + step);
// create the table
ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, true);
ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, true);
// Start the Create procedure && kill the executor
byte[][] splitKeys = null;
TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, splitKeys);
long procId = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
testRecoveryAndDoubleExecution(UTIL, procId, step);
MasterProcedureTestingUtility.validateTableCreation(
UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
}
示例7: testCreateWithoutColumnFamily
import org.apache.hadoop.hbase.util.ModifyRegionUtils; //导入依赖的package包/类
@Test
public void testCreateWithoutColumnFamily() throws Exception {
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
final TableName tableName = TableName.valueOf(name.getMethodName());
// create table with 0 families will fail
final TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(MasterProcedureTestingUtility.createHTD(tableName));
// disable sanity check
builder.setValue("hbase.table.sanity.checks", Boolean.FALSE.toString());
TableDescriptor htd = builder.build();
final RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, null);
long procId =
ProcedureTestingUtility.submitAndWait(procExec,
new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
final Procedure<?> result = procExec.getResult(procId);
assertEquals(true, result.isFailed());
Throwable cause = ProcedureTestingUtility.getExceptionCause(result);
assertTrue("expected DoNotRetryIOException, got " + cause,
cause instanceof DoNotRetryIOException);
}
示例8: testCreateExisting
import org.apache.hadoop.hbase.util.ModifyRegionUtils; //导入依赖的package包/类
@Test(expected=TableExistsException.class)
public void testCreateExisting() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
final TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f");
final RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, null);
// create the table
long procId1 = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
// create another with the same name
ProcedurePrepareLatch latch2 = new ProcedurePrepareLatch.CompatibilityLatch();
long procId2 = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions, latch2));
ProcedureTestingUtility.waitProcedure(procExec, procId1);
ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId1));
ProcedureTestingUtility.waitProcedure(procExec, procId2);
latch2.await();
}
示例9: testRecoveryAndDoubleExecution
import org.apache.hadoop.hbase.util.ModifyRegionUtils; //导入依赖的package包/类
@Test
public void testRecoveryAndDoubleExecution() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
// create the table
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
// Start the Create procedure && kill the executor
byte[][] splitKeys = null;
TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, splitKeys);
long procId = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
// Restart the executor and execute the step twice
MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId);
MasterProcedureTestingUtility.validateTableCreation(getMaster(), tableName, regions, F1, F2);
}
示例10: testRollbackAndDoubleExecution
import org.apache.hadoop.hbase.util.ModifyRegionUtils; //导入依赖的package包/类
private void testRollbackAndDoubleExecution(TableDescriptorBuilder builder) throws Exception {
// create the table
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
// Start the Create procedure && kill the executor
final byte[][] splitKeys = new byte[][] {
Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
};
builder.setRegionReplication(3);
TableDescriptor htd = builder.build();
RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, splitKeys);
long procId = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
int numberOfSteps = 0; // failing at pre operation
MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps);
TableName tableName = htd.getTableName();
MasterProcedureTestingUtility.validateTableDeletion(getMaster(), tableName);
// are we able to create the table after a rollback?
resetProcExecutorTestingKillFlag();
testSimpleCreate(tableName, splitKeys);
}
示例11: handleCreateHdfsRegions
import org.apache.hadoop.hbase.util.ModifyRegionUtils; //导入依赖的package包/类
/**
* Create the on-disk structure for the table, and returns the regions info.
* @param tableRootDir directory where the table is being created
* @param tableName name of the table under construction
* @return the list of regions created
*/
protected List<HRegionInfo> handleCreateHdfsRegions(final Path tableRootDir,
final TableName tableName)
throws IOException {
return ModifyRegionUtils.createRegions(conf, tableRootDir,
hTableDescriptor, newRegions, null);
}
示例12: createTable
import org.apache.hadoop.hbase.util.ModifyRegionUtils; //导入依赖的package包/类
@Override
public long createTable(
final HTableDescriptor hTableDescriptor,
final byte [][] splitKeys,
final long nonceGroup,
final long nonce) throws IOException {
if (isStopped()) {
throw new MasterNotRunningException();
}
String namespace = hTableDescriptor.getTableName().getNamespaceAsString();
ensureNamespaceExists(namespace);
HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(hTableDescriptor, splitKeys);
checkInitialized();
sanityCheckTableDescriptor(hTableDescriptor);
if (cpHost != null) {
cpHost.preCreateTable(hTableDescriptor, newRegions);
}
LOG.info(getClientIdAuditPrefix() + " create " + hTableDescriptor);
// TODO: We can handle/merge duplicate requests, and differentiate the case of
// TableExistsException by saying if the schema is the same or not.
ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch();
long procId = this.procedureExecutor.submitProcedure(
new CreateTableProcedure(
procedureExecutor.getEnvironment(), hTableDescriptor, newRegions, latch),
nonceGroup,
nonce);
latch.await();
if (cpHost != null) {
cpHost.postCreateTable(hTableDescriptor, newRegions);
}
return procId;
}
示例13: removeHdfsRegions
import org.apache.hadoop.hbase.util.ModifyRegionUtils; //导入依赖的package包/类
/**
* Remove specified regions from the file-system, using the archiver.
*/
private void removeHdfsRegions(final ThreadPoolExecutor exec, final List<HRegionInfo> regions)
throws IOException {
if (regions == null || regions.size() == 0) return;
ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
@Override
public void editRegion(final HRegionInfo hri) throws IOException {
HFileArchiver.archiveRegion(conf, fs, hri);
}
});
}
示例14: restoreHdfsRegions
import org.apache.hadoop.hbase.util.ModifyRegionUtils; //导入依赖的package包/类
/**
* Restore specified regions by restoring content to the snapshot state.
*/
private void restoreHdfsRegions(final ThreadPoolExecutor exec,
final Map<String, SnapshotRegionManifest> regionManifests,
final List<HRegionInfo> regions) throws IOException {
if (regions == null || regions.size() == 0) return;
ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
@Override
public void editRegion(final HRegionInfo hri) throws IOException {
restoreRegion(hri, regionManifests.get(hri.getEncodedName()));
}
});
}
示例15: cloneHdfsRegions
import org.apache.hadoop.hbase.util.ModifyRegionUtils; //导入依赖的package包/类
/**
* Clone specified regions. For each region create a new region
* and create a HFileLink for each hfile.
*/
private HRegionInfo[] cloneHdfsRegions(final ThreadPoolExecutor exec,
final Map<String, SnapshotRegionManifest> regionManifests,
final List<HRegionInfo> regions) throws IOException {
if (regions == null || regions.size() == 0) return null;
final Map<String, HRegionInfo> snapshotRegions =
new HashMap<String, HRegionInfo>(regions.size());
// clone region info (change embedded tableName with the new one)
HRegionInfo[] clonedRegionsInfo = new HRegionInfo[regions.size()];
for (int i = 0; i < clonedRegionsInfo.length; ++i) {
// clone the region info from the snapshot region info
HRegionInfo snapshotRegionInfo = regions.get(i);
clonedRegionsInfo[i] = cloneRegionInfo(snapshotRegionInfo);
// add the region name mapping between snapshot and cloned
String snapshotRegionName = snapshotRegionInfo.getEncodedName();
String clonedRegionName = clonedRegionsInfo[i].getEncodedName();
regionsMap.put(Bytes.toBytes(snapshotRegionName), Bytes.toBytes(clonedRegionName));
LOG.info("clone region=" + snapshotRegionName + " as " + clonedRegionName);
// Add mapping between cloned region name and snapshot region info
snapshotRegions.put(clonedRegionName, snapshotRegionInfo);
}
// create the regions on disk
ModifyRegionUtils.createRegions(exec, conf, rootDir, tableDir,
tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() {
@Override
public void fillRegion(final HRegion region) throws IOException {
HRegionInfo snapshotHri = snapshotRegions.get(region.getRegionInfo().getEncodedName());
cloneRegion(region, snapshotHri, regionManifests.get(snapshotHri.getEncodedName()));
}
});
return clonedRegionsInfo;
}