本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.SplitTransaction类的典型用法代码示例。如果您正苦于以下问题:Java SplitTransaction类的具体用法?Java SplitTransaction怎么用?Java SplitTransaction使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
SplitTransaction类属于org.apache.hadoop.hbase.regionserver包,在下文中一共展示了SplitTransaction类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: preRollBack
import org.apache.hadoop.hbase.regionserver.SplitTransaction; //导入依赖的package包/类
@Override
public void preRollBack(ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException {
RegionCoprocessorEnvironment environment = ctx.getEnvironment();
HRegionServer rs = (HRegionServer) environment.getRegionServerServices();
HRegion region = environment.getRegion();
String userTableName = region.getTableDesc().getNameAsString();
if (IndexUtils.isIndexTable(userTableName)) {
return;
}
LOG.trace("Entering preRollBack for the table " + userTableName + " for the region "
+ region.getRegionInfo());
SplitInfo splitInfo = splitThreadLocal.get();
SplitTransaction splitTransaction = splitInfo.getSplitTransaction();
try {
if (splitTransaction != null) {
splitTransaction.rollback(rs, rs);
LOG.info("preRollBack successfully done for the table " + userTableName
+ " for the region " + region.getRegionInfo());
}
} catch (Exception e) {
LOG.error(
"Error while rolling back the split failure for index region "
+ splitTransaction.getParent(), e);
rs.abort("Abort; we got an error during rollback of index");
}
}
示例2: split
import org.apache.hadoop.hbase.regionserver.SplitTransaction; //导入依赖的package包/类
private Region [] split(final Region r, final byte [] splitRow) throws IOException {
Region[] regions = new Region[2];
SplitTransaction st = new SplitTransactionFactory(TEST_UTIL.getConfiguration())
.create(r, splitRow);
int i = 0;
if (!st.prepare()) {
// test fails.
assertTrue(false);
}
try {
Server mockServer = Mockito.mock(Server.class);
when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
PairOfSameType<Region> daughters = st.execute(mockServer, null);
for (Region each_daughter: daughters) {
regions[i] = each_daughter;
i++;
}
} catch (IOException ioe) {
LOG.info("Split transaction of " + r.getRegionInfo().getRegionNameAsString() +
" failed:" + ioe.getMessage());
assertTrue(false);
} catch (RuntimeException e) {
LOG.info("Failed rollback of failed split of " +
r.getRegionInfo().getRegionNameAsString() + e.getMessage());
}
assertTrue(i == 2);
return regions;
}
示例3: split
import org.apache.hadoop.hbase.regionserver.SplitTransaction; //导入依赖的package包/类
private HRegion [] split(final HRegion r, final byte [] splitRow)
throws IOException {
HRegion[] regions = new HRegion[2];
SplitTransaction st = new SplitTransaction(r, splitRow);
int i = 0;
if (!st.prepare()) {
// test fails.
assertTrue(false);
}
try {
Server mockServer = Mockito.mock(Server.class);
when(mockServer.getConfiguration()).thenReturn(
TEST_UTIL.getConfiguration());
PairOfSameType<HRegion> daughters = st.execute(mockServer, null);
for (HRegion each_daughter: daughters) {
regions[i] = each_daughter;
i++;
}
} catch (IOException ioe) {
LOG.info("Split transaction of " + r.getRegionNameAsString() +
" failed:" + ioe.getMessage());
assertTrue(false);
} catch (RuntimeException e) {
LOG.info("Failed rollback of failed split of " +
r.getRegionNameAsString() + e.getMessage());
}
assertTrue(i == 2);
return regions;
}
示例4: preSplitAfterPONR
import org.apache.hadoop.hbase.regionserver.SplitTransaction; //导入依赖的package包/类
@Override
public void preSplitAfterPONR(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException {
RegionCoprocessorEnvironment environment = e.getEnvironment();
HRegionServer rs = (HRegionServer) environment.getRegionServerServices();
HRegion region = environment.getRegion();
String userTableName = region.getTableDesc().getNameAsString();
String indexTableName = IndexUtils.getIndexTableName(userTableName);
if (IndexUtils.isIndexTable(userTableName)) {
return;
}
LOG.trace("Entering postSplit for the table " + userTableName + " for the region "
+ region.getRegionInfo());
SplitTransaction splitTransaction = null;
if (region.getTableDesc().getValue(Constants.INDEX_SPEC_KEY) != null) {
try {
SplitInfo splitInfo = splitThreadLocal.get();
if (splitInfo == null) return;
splitTransaction = splitInfo.getSplitTransaction();
PairOfSameType<HRegion> daughters = splitInfo.getDaughters();
if (splitTransaction != null && daughters != null) {
splitTransaction.stepsAfterPONR(rs, rs, daughters);
LOG.info("Daughter regions are opened and split transaction finished for zknodes for index table "
+ indexTableName + " for the region " + region.getRegionInfo());
}
} catch (Exception ex) {
String msg =
"Splitting of index region has failed in stepsAfterPONR stage so aborting the server";
LOG.error(msg, ex);
rs.abort(msg);
}
}
}
示例5: preRollBackSplit
import org.apache.hadoop.hbase.regionserver.SplitTransaction; //导入依赖的package包/类
@Override
public void preRollBackSplit(ObserverContext<RegionCoprocessorEnvironment> ctx)
throws IOException {
RegionCoprocessorEnvironment environment = ctx.getEnvironment();
HRegionServer rs = (HRegionServer) environment.getRegionServerServices();
HRegion region = environment.getRegion();
HTableDescriptor tableDesc = region.getTableDesc();
String userTableName = tableDesc.getNameAsString();
if (isNotIndexedTableDescriptor(tableDesc)) {
return;
}
LOG.trace("Entering preRollBack for the table " + userTableName + " for the region "
+ region.getRegionInfo());
SplitInfo splitInfo = splitThreadLocal.get();
if (splitInfo == null) return;
SplitTransaction splitTransaction = splitInfo.getSplitTransaction();
try {
if (splitTransaction != null) {
splitTransaction.rollback(rs, rs);
LOG.info("preRollBack successfully done for the table " + userTableName
+ " for the region " + region.getRegionInfo());
}
} catch (Exception e) {
LOG.error(
"Error while rolling back the split failure for index region " + splitInfo.getParent(), e);
rs.abort("Abort; we got an error during rollback of index");
}
}
示例6: preSplitAfterPONR
import org.apache.hadoop.hbase.regionserver.SplitTransaction; //导入依赖的package包/类
@Override
public void preSplitAfterPONR(ObserverContext<RegionCoprocessorEnvironment> ctx)
throws IOException {
RegionCoprocessorEnvironment environment = ctx.getEnvironment();
HRegionServer rs = (HRegionServer) environment.getRegionServerServices();
HRegion region = environment.getRegion();
String userTableName = region.getTableDesc().getNameAsString();
String indexTableName = IndexUtils.getIndexTableName(userTableName);
if (IndexUtils.isIndexTable(userTableName)) {
return;
}
LOG.trace("Entering postSplit for the table " + userTableName + " for the region "
+ region.getRegionInfo());
IndexManager indexManager = IndexManager.getInstance();
SplitTransaction splitTransaction = null;
if (indexManager.getIndicesForTable(userTableName) != null) {
try {
SplitInfo splitInfo = splitThreadLocal.get();
splitTransaction = splitInfo.getSplitTransaction();
PairOfSameType<HRegion> daughters = splitInfo.getDaughters();
if (splitTransaction != null && daughters != null) {
splitTransaction.stepsAfterPONR(rs, rs, daughters);
LOG.info("Daughter regions are opened and split transaction finished"
+ " for zknodes for index table " + indexTableName + " for the region "
+ region.getRegionInfo());
}
} catch (Exception ex) {
String msg =
"Splitting of index region has failed in stepsAfterPONR stage so aborting the server";
LOG.error(msg, ex);
rs.abort(msg);
}
}
}
示例7: testCleanUpDaughtersNotInMetaAfterFailedSplit
import org.apache.hadoop.hbase.regionserver.SplitTransaction; //导入依赖的package包/类
@Test (timeout=180000)
public void testCleanUpDaughtersNotInMetaAfterFailedSplit() throws Exception {
TableName table = TableName.valueOf("testCleanUpDaughtersNotInMetaAfterFailedSplit");
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
try {
HTableDescriptor desc = new HTableDescriptor(table);
desc.addFamily(new HColumnDescriptor(Bytes.toBytes("f")));
admin.createTable(desc);
tbl = new HTable(cluster.getConfiguration(), desc.getTableName());
for (int i = 0; i < 5; i++) {
Put p1 = new Put(("r" + i).getBytes());
p1.add(Bytes.toBytes("f"), "q1".getBytes(), "v".getBytes());
tbl.put(p1);
}
admin.flush(desc.getTableName());
List<HRegion> regions = cluster.getRegions(desc.getTableName());
int serverWith = cluster.getServerWith(regions.get(0).getRegionName());
HRegionServer regionServer = cluster.getRegionServer(serverWith);
cluster.getServerWith(regions.get(0).getRegionName());
SplitTransaction st = new SplitTransaction(regions.get(0), Bytes.toBytes("r3"));
st.prepare();
st.stepsBeforePONR(regionServer, regionServer, false);
AssignmentManager am = cluster.getMaster().getAssignmentManager();
Map<String, RegionState> regionsInTransition = am.getRegionStates().getRegionsInTransition();
for (RegionState state : regionsInTransition.values()) {
am.regionOffline(state.getRegion());
}
ZKAssign.deleteNodeFailSilent(regionServer.getZooKeeper(), regions.get(0).getRegionInfo());
Map<HRegionInfo, ServerName> regionsMap = new HashMap<HRegionInfo, ServerName>();
regionsMap.put(regions.get(0).getRegionInfo(), regionServer.getServerName());
am.assign(regionsMap);
am.waitForAssignment(regions.get(0).getRegionInfo());
HBaseFsck hbck = doFsck(conf, false);
assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
// holes are separate from overlap groups
assertEquals(0, hbck.getOverlapGroups(table).size());
// fix hole
assertErrors(
doFsck(
conf, false, true, false, false, false, false, false, false, false, false, false, null),
new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
// check that hole fixed
assertNoErrors(doFsck(conf, false));
assertEquals(5, countRows());
} finally {
if (tbl != null) {
tbl.close();
tbl = null;
}
cleanupTable(table);
}
}
示例8: SplitInfo
import org.apache.hadoop.hbase.regionserver.SplitTransaction; //导入依赖的package包/类
public SplitInfo(final HRegion parent, final PairOfSameType<HRegion> pairOfSameType,
final SplitTransaction st) {
this.parent = parent;
this.daughterRegions = pairOfSameType;
this.st = st;
}
示例9: getSplitTransaction
import org.apache.hadoop.hbase.regionserver.SplitTransaction; //导入依赖的package包/类
public SplitTransaction getSplitTransaction() {
return this.st;
}
示例10: testIndexManagerWithFailedSplitTransaction
import org.apache.hadoop.hbase.regionserver.SplitTransaction; //导入依赖的package包/类
@Test(timeout = 180000)
public void testIndexManagerWithFailedSplitTransaction() throws Exception {
Configuration conf = UTIL.getConfiguration();
conf.setBoolean("hbase.use.secondary.index", true);
String userTableName = "testIndexManagerWithFailedSplitTransaction";
HTableDescriptor ihtd = new HTableDescriptor(TableName.valueOf(userTableName));
HColumnDescriptor hcd = new HColumnDescriptor("col1");
ihtd.addFamily(hcd);
IndexSpecification iSpec = new IndexSpecification("Index1");
iSpec.addIndexColumn(hcd, "ql", ValueType.String, 10);
TableIndices indices = new TableIndices();
indices.addIndex(iSpec);
ihtd.setValue(Constants.INDEX_SPEC_KEY, indices.toByteArray());
admin.createTable(ihtd);
IndexManager manager = IndexManager.getInstance();
int count = manager.getTableRegionCount(userTableName);
Assert.assertEquals(1, count);
HTable table = new HTable(conf, userTableName);
Put p = null;
for (int i = 0; i < 10; i++) {
p = new Put(Bytes.toBytes("row" + i));
p.add(Bytes.toBytes("col1"), Bytes.toBytes("ql"), Bytes.toBytes("test_val"));
table.put(p);
}
List<HRegion> regions = UTIL.getMiniHBaseCluster().getRegions(Bytes.toBytes(userTableName));
HRegionServer rs = UTIL.getMiniHBaseCluster().getRegionServer(0);
SplitTransaction st = null;
st = new MockedSplitTransaction(regions.get(0), "row5".getBytes());
try {
st.prepare();
st.execute(rs, rs);
} catch (IOException e) {
st.rollback(rs, rs);
}
count = manager.getTableRegionCount(userTableName);
Assert.assertEquals(1, count);
}
示例11: testIndexManagerWithFailedSplitTransaction
import org.apache.hadoop.hbase.regionserver.SplitTransaction; //导入依赖的package包/类
@Test(timeout = 180000)
public void testIndexManagerWithFailedSplitTransaction() throws Exception {
HBaseAdmin admin = new HBaseAdmin(UTIL.getConfiguration());
Configuration conf = admin.getConfiguration();
conf.setBoolean("hbase.use.secondary.index", true);
ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(UTIL);
String userTableName = "testIndexManagerWithFailedSplitTransaction";
IndexedHTableDescriptor ihtd = new IndexedHTableDescriptor(userTableName);
HColumnDescriptor hcd = new HColumnDescriptor("col1");
ihtd.addFamily(hcd);
IndexSpecification iSpec = new IndexSpecification("Index1");
iSpec.addIndexColumn(hcd, "ql", ValueType.String, 10);
ihtd.addIndex(iSpec);
admin.createTable(ihtd);
ZKAssign.blockUntilNoRIT(zkw);
IndexManager manager = IndexManager.getInstance();
int count = manager.getTableRegionCount(userTableName);
Assert.assertEquals(1, count);
HTable table = new HTable(conf, userTableName);
Put p = null;
for (int i = 0; i < 10; i++) {
p = new Put(Bytes.toBytes("row" + i));
p.add(Bytes.toBytes("col1"), Bytes.toBytes("ql"), Bytes.toBytes("test_val"));
table.put(p);
}
List<HRegion> regions = UTIL.getMiniHBaseCluster().getRegions(Bytes.toBytes(userTableName));
HRegionServer rs = UTIL.getMiniHBaseCluster().getRegionServer(0);
SplitTransaction st = null;
st = new MockedSplitTransaction(regions.get(0), null) {
@Override
protected void splitStoreFiles(final Path splitdir, final List<StoreFile> hstoreFilesToSplit)
throws IOException {
throw new IOException();
}
};
try {
st.execute(rs, rs);
} catch (IOException e) {
st.rollback(rs, rs);
}
count = manager.getTableRegionCount(userTableName);
Assert.assertEquals(1, count);
}