本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress类的典型用法代码示例。如果您正苦于以下问题:Java MiniBatchOperationInProgress类的具体用法?Java MiniBatchOperationInProgress怎么用?Java MiniBatchOperationInProgress使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
MiniBatchOperationInProgress类属于org.apache.hadoop.hbase.regionserver包,在下文中一共展示了MiniBatchOperationInProgress类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: postBatchMutate
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; //导入依赖的package包/类
@Override
public void postBatchMutate(ObserverContext<RegionCoprocessorEnvironment> ctx,
MiniBatchOperationInProgress<Mutation> miniBatchOp) {
if (ctx.getEnvironment().getRegion().getTableDesc().getNameAsString()
.contains("testWalReplayShouldNotSkipAnyRecords")) {
if (delayPostBatchMutate) {
try {
latch.await();
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
super.postBatchMutate(ctx, miniBatchOp);
} else {
super.postBatchMutate(ctx, miniBatchOp);
}
}
示例2: preBatchMutate
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; //导入依赖的package包/类
@Override
public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
Mutation mut = miniBatchOp.getOperation(0);
if (mut instanceof Delete) {
List<Cell> cells = mut.getFamilyCellMap().get(test);
Delete[] deletes = new Delete[] {
// delete only 2 rows
new Delete(row1).addColumns(test, dummy, cells.get(0).getTimestamp()),
new Delete(row2).addColumns(test, dummy, cells.get(0).getTimestamp()),
};
LOG.info("Deleting:" + Arrays.toString(deletes));
miniBatchOp.addOperationsFromCP(0, deletes);
}
}
示例3: preBatchMutate
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; //导入依赖的package包/类
@Override
public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
if (cellFeaturesEnabled && !compatibleEarlyTermination) {
TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable();
for (int i = 0; i < miniBatchOp.size(); i++) {
Mutation m = miniBatchOp.getOperation(i);
if (m.getAttribute(CHECK_COVERING_PERM) != null) {
// We have a failure with table, cf and q perm checks and now giving a chance for cell
// perm check
OpType opType;
if (m instanceof Put) {
checkForReservedTagPresence(getActiveUser(), m);
opType = OpType.PUT;
} else {
opType = OpType.DELETE;
}
AuthResult authResult = null;
if (checkCoveringPermission(opType, c.getEnvironment(), m.getRow(),
m.getFamilyCellMap(), m.getTimeStamp(), Action.WRITE)) {
authResult = AuthResult.allow(opType.toString(), "Covering cell set",
getActiveUser(), Action.WRITE, table, m.getFamilyCellMap());
} else {
authResult = AuthResult.deny(opType.toString(), "Covering cell set",
getActiveUser(), Action.WRITE, table, m.getFamilyCellMap());
}
logResult(authResult);
if (authorizationEnabled && !authResult.isAllowed()) {
throw new AccessDeniedException("Insufficient permissions "
+ authResult.toContextString());
}
}
}
}
}
示例4: preBatchMutate
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; //导入依赖的package包/类
@Override
public void preBatchMutate(final ObserverContext<RegionCoprocessorEnvironment> c,
final MiniBatchOperationInProgress<Mutation> miniBatchOp)
throws IOException {
if (ops.incrementAndGet() % 20000 == 0) {
LOG.info("Wrote " + ops.get() + " times in region " + regionName);
}
for (int i = 0; i < miniBatchOp.size(); i++) {
miniBatchOp.setOperationStatus(i,
new OperationStatus(HConstants.OperationStatusCode.SUCCESS));
}
c.bypass();
}
示例5: preBatchMutate
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; //导入依赖的package包/类
@Override
public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
RegionCoprocessorEnvironment e = c.getEnvironment();
assertNotNull(e);
assertNotNull(e.getRegion());
assertNotNull(miniBatchOp);
ctPreBatchMutate.incrementAndGet();
}
示例6: postBatchMutate
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; //导入依赖的package包/类
@Override
public void postBatchMutate(final ObserverContext<RegionCoprocessorEnvironment> c,
final MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
RegionCoprocessorEnvironment e = c.getEnvironment();
assertNotNull(e);
assertNotNull(e.getRegion());
assertNotNull(miniBatchOp);
ctPostBatchMutate.incrementAndGet();
}
示例7: preBatchMutate
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; //导入依赖的package包/类
@Override
public void preBatchMutate(final ObserverContext<RegionCoprocessorEnvironment> c,
final MiniBatchOperationInProgress<Pair<Mutation, Integer>> miniBatchOp)
throws IOException {
if (ops.incrementAndGet() % 20000 == 0) {
LOG.info("Wrote " + ops.get() + " times in region " + regionName);
}
for (int i = 0; i < miniBatchOp.size(); i++) {
miniBatchOp.setOperationStatus(i,
new OperationStatus(HConstants.OperationStatusCode.SUCCESS));
}
c.bypass();
}
示例8: preBatchMutate
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; //导入依赖的package包/类
@Override
public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Pair<Mutation, Integer>> miniBatchOp) throws IOException {
RegionCoprocessorEnvironment e = c.getEnvironment();
assertNotNull(e);
assertNotNull(e.getRegion());
assertNotNull(miniBatchOp);
hadPreBatchMutate = true;
}
示例9: postBatchMutate
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; //导入依赖的package包/类
@Override
public void postBatchMutate(final ObserverContext<RegionCoprocessorEnvironment> c,
final MiniBatchOperationInProgress<Pair<Mutation, Integer>> miniBatchOp) throws IOException {
RegionCoprocessorEnvironment e = c.getEnvironment();
assertNotNull(e);
assertNotNull(e.getRegion());
assertNotNull(miniBatchOp);
hadPostBatchMutate = true;
}
示例10: preBatchMutate
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; //导入依赖的package包/类
@Override
public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
if (cellFeaturesEnabled && !compatibleEarlyTermination) {
TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable();
for (int i = 0; i < miniBatchOp.size(); i++) {
Mutation m = miniBatchOp.getOperation(i);
if (m.getAttribute(CHECK_COVERING_PERM) != null) {
// We have a failure with table, cf and q perm checks and now giving a chance for cell
// perm check
OpType opType;
if (m instanceof Put) {
checkForReservedTagPresence(getActiveUser(), m);
opType = OpType.PUT;
} else {
opType = OpType.DELETE;
}
AuthResult authResult = null;
if (checkCoveringPermission(opType, c.getEnvironment(), m.getRow(), m.getFamilyCellMap(),
m.getTimeStamp(), Action.WRITE)) {
authResult = AuthResult.allow(opType.toString(), "Covering cell set", getActiveUser(),
Action.WRITE, table, m.getFamilyCellMap());
} else {
authResult = AuthResult.deny(opType.toString(), "Covering cell set", getActiveUser(),
Action.WRITE, table, m.getFamilyCellMap());
}
logResult(authResult);
if (!authResult.isAllowed()) {
throw new AccessDeniedException("Insufficient permissions "
+ authResult.toContextString());
}
}
}
}
}
示例11: preBatchMutate
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; //导入依赖的package包/类
@Override
public void preBatchMutate(final ObserverContext<RegionCoprocessorEnvironment> ctx,
final MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
HRegionServer rs = (HRegionServer) ctx.getEnvironment().getRegionServerServices();
HRegion userRegion = ctx.getEnvironment().getRegion();
HTableDescriptor userTableDesc = userRegion.getTableDesc();
String tableName = userTableDesc.getNameAsString();
if (isNotIndexedTableDescriptor(userTableDesc)) {
return;
}
List<IndexSpecification> indices = indexManager.getIndicesForTable(tableName);
if (indices == null || indices.isEmpty()) {
LOG.trace("skipping preBatchMutate for the table " + tableName + " as there are no indices");
return;
}
LOG.trace("Entering preBatchMutate for the table " + tableName);
LOG.trace("Indices for the table " + tableName + " are: " + indices);
HRegion indexRegion = getIndexTableRegion(tableName, userRegion, rs);
// Storing this found HRegion in the index table within the thread locale.
IndexEdits indexEdits = threadLocal.get();
indexEdits.indexRegion = indexRegion;
for (int i = 0; i < miniBatchOp.size(); i++) {
Mutation m = miniBatchOp.getOperation(i);
if (m instanceof Put) {
try {
prepareIndexMutations(indices, userRegion, m, tableName, indexRegion);
} catch (IOException e) {
miniBatchOp.setOperationStatus(i, new OperationStatus(
OperationStatusCode.SANITY_CHECK_FAILURE, e.getMessage()));
}
} else if (m instanceof Delete) {
prepareIndexMutations(indices, userRegion, m, tableName, indexRegion);
}
}
indexEdits.setUpdateLocked();
indexRegion.updatesLock();
LOG.trace("Exiting preBatchMutate for the table " + tableName);
}
示例12: postBatchMutate
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; //导入依赖的package包/类
@Override
public void postBatchMutate(final ObserverContext<RegionCoprocessorEnvironment> ctx,
final MiniBatchOperationInProgress<Mutation> miniBatchOp) {
HTableDescriptor userTableDesc = ctx.getEnvironment().getRegion().getTableDesc();
String tableName = userTableDesc.getNameAsString();
if (isNotIndexedTableDescriptor(userTableDesc)) {
return;
}
List<IndexSpecification> indices = indexManager.getIndicesForTable(tableName);
if (indices == null || indices.isEmpty()) {
LOG.trace("skipping postBatchMutate for the table " + tableName + " as there are no indices");
return;
}
LOG.trace("Entering postBatchMutate for the table " + tableName);
IndexEdits indexEdits = threadLocal.get();
List<Mutation> indexMutations = indexEdits.getIndexMutations();
if (indexMutations.size() == 0) {
return;
}
HRegion hr = indexEdits.getRegion();
try {
hr.batchMutateForIndex(indexMutations.toArray(new Mutation[indexMutations.size()]));
} catch (IOException e) {
// TODO This can come? If so we need to revert the actual put
// and make the op failed.
LOG.error("Error putting data into the index region", e);
}
LOG.trace("Exiting postBatchMutate for the table " + tableName);
}
示例13: postBatchMutateIndispensably
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; //导入依赖的package包/类
@Override
public void postBatchMutateIndispensably(ObserverContext<RegionCoprocessorEnvironment> ctx,
MiniBatchOperationInProgress<Mutation> miniBatchOp, boolean success) throws IOException {
IndexEdits indexEdits = threadLocal.get();
if (indexEdits != null) {
if (indexEdits.isUpdatesLocked()) {
indexEdits.getRegion().updatesUnlock();
}
}
threadLocal.remove();
}
示例14: postBatchMutateIndispensably
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; //导入依赖的package包/类
@Override
public void postBatchMutateIndispensably(
ObserverContext<RegionCoprocessorEnvironment> ctx,
MiniBatchOperationInProgress<Mutation> miniBatchOp, boolean success)
throws IOException {
}
示例15: preBatchMutate
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; //导入依赖的package包/类
@Override
public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
if (cellFeaturesEnabled && !compatibleEarlyTermination) {
TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable();
User user = getActiveUser(c);
for (int i = 0; i < miniBatchOp.size(); i++) {
Mutation m = miniBatchOp.getOperation(i);
if (m.getAttribute(CHECK_COVERING_PERM) != null) {
// We have a failure with table, cf and q perm checks and now giving a chance for cell
// perm check
OpType opType;
if (m instanceof Put) {
checkForReservedTagPresence(user, m);
opType = OpType.PUT;
} else {
opType = OpType.DELETE;
}
AuthResult authResult = null;
if (checkCoveringPermission(user, opType, c.getEnvironment(), m.getRow(),
m.getFamilyCellMap(), m.getTimeStamp(), Action.WRITE)) {
authResult = AuthResult.allow(opType.toString(), "Covering cell set",
user, Action.WRITE, table, m.getFamilyCellMap());
} else {
authResult = AuthResult.deny(opType.toString(), "Covering cell set",
user, Action.WRITE, table, m.getFamilyCellMap());
}
AccessChecker.logResult(authResult);
if (authorizationEnabled && !authResult.isAllowed()) {
throw new AccessDeniedException("Insufficient permissions "
+ authResult.toContextString());
}
}
}
}
}