本文整理汇总了Java中org.apache.hadoop.hbase.coprocessor.ObserverContext类的典型用法代码示例。如果您正苦于以下问题:Java ObserverContext类的具体用法?Java ObserverContext怎么用?Java ObserverContext使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ObserverContext类属于org.apache.hadoop.hbase.coprocessor包,在下文中一共展示了ObserverContext类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testTableCreate
import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入依赖的package包/类
@Test (timeout=180000)
public void testTableCreate() throws Exception {
AccessTestAction createTable = new AccessTestAction() {
@Override
public Object run() throws Exception {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testnewtable"));
htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
ACCESS_CONTROLLER.preCreateTable(ObserverContext.createAndPrepare(CP_ENV, null), htd, null);
return null;
}
};
// verify that superuser can create tables
verifyAllowed(createTable, SUPERUSER, USER_ADMIN, USER_GROUP_CREATE);
// all others should be denied
verifyDenied(createTable, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_ADMIN,
USER_GROUP_READ, USER_GROUP_WRITE);
}
示例2: postOpen
import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入依赖的package包/类
/****************************** Region related hooks ******************************/
@Override
public void postOpen(ObserverContext<RegionCoprocessorEnvironment> e) {
// Read the entire labels table and populate the zk
if (e.getEnvironment().getRegion().getRegionInfo().getTable().equals(LABELS_TABLE_NAME)) {
this.labelsRegion = true;
synchronized (this) {
this.accessControllerAvailable = CoprocessorHost.getLoadedCoprocessors()
.contains(AccessController.class.getName());
}
// Defer the init of VisibilityLabelService on labels region until it is in recovering state.
if (!e.getEnvironment().getRegion().isRecovering()) {
initVisibilityLabelService(e.getEnvironment());
}
} else {
checkAuths = e.getEnvironment().getConfiguration()
.getBoolean(VisibilityConstants.CHECK_AUTHS_FOR_MUTATION, false);
initVisibilityLabelService(e.getEnvironment());
}
}
示例3: postInstantiateDeleteTracker
import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入依赖的package包/类
@Override
public DeleteTracker postInstantiateDeleteTracker(
ObserverContext<RegionCoprocessorEnvironment> ctx, DeleteTracker delTracker)
throws IOException {
// Nothing to do if we are not filtering by visibility
if (!authorizationEnabled) {
return delTracker;
}
Region region = ctx.getEnvironment().getRegion();
TableName table = region.getRegionInfo().getTable();
if (table.isSystemTable()) {
return delTracker;
}
// We are creating a new type of delete tracker here which is able to track
// the timestamps and also the
// visibility tags per cell. The covering cells are determined not only
// based on the delete type and ts
// but also on the visibility expression matching.
return new VisibilityScanDeleteTracker();
}
示例4: testPrepareAndCleanBulkLoad
import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入依赖的package包/类
@Test (timeout=180000)
public void testPrepareAndCleanBulkLoad() throws Exception {
AccessTestAction prepareBulkLoadAction = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.prePrepareBulkLoad(ObserverContext.createAndPrepare(RCP_ENV, null),
null);
return null;
}
};
AccessTestAction cleanupBulkLoadAction = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preCleanupBulkLoad(ObserverContext.createAndPrepare(RCP_ENV, null),
null);
return null;
}
};
verifyAnyCreate(prepareBulkLoadAction);
verifyAnyCreate(cleanupBulkLoadAction);
}
示例5: preTruncateTable
import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入依赖的package包/类
@Override
public void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName) throws IOException {
requirePermission("truncateTable", tableName, null, null, Action.ADMIN, Action.CREATE);
final Configuration conf = c.getEnvironment().getConfiguration();
User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
List<UserPermission> acls = AccessControlLists.getUserTablePermissions(conf, tableName);
if (acls != null) {
tableAcls.put(tableName, acls);
}
return null;
}
});
}
示例6: postTruncateTable
import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入依赖的package包/类
@Override
public void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName) throws IOException {
final Configuration conf = ctx.getEnvironment().getConfiguration();
User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
List<UserPermission> perms = tableAcls.get(tableName);
if (perms != null) {
for (UserPermission perm : perms) {
AccessControlLists.addUserPermission(conf, perm);
}
}
tableAcls.remove(tableName);
return null;
}
});
}
示例7: testModifyNamespace
import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入依赖的package包/类
@Test
public void testModifyNamespace() throws Exception {
AccessTestAction modifyNamespace = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preModifyNamespace(ObserverContext.createAndPrepare(CP_ENV, null),
NamespaceDescriptor.create(TEST_NAMESPACE).addConfiguration("abc", "156").build());
return null;
}
};
// modifyNamespace: superuser | global(A) | NS(A)
verifyAllowed(modifyNamespace, SUPERUSER, USER_GLOBAL_ADMIN, USER_GROUP_ADMIN);
verifyDenied(modifyNamespace, USER_GLOBAL_CREATE, USER_GLOBAL_WRITE, USER_GLOBAL_READ,
USER_GLOBAL_EXEC, USER_NS_ADMIN, USER_NS_CREATE, USER_NS_WRITE, USER_NS_READ, USER_NS_EXEC,
USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE);
}
示例8: testAssign
import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入依赖的package包/类
@Test (timeout=180000)
public void testAssign() throws Exception {
List<HRegionLocation> regions;
try (RegionLocator locator = systemUserConnection.getRegionLocator(TEST_TABLE)) {
regions = locator.getAllRegionLocations();
}
HRegionLocation location = regions.get(0);
final HRegionInfo hri = location.getRegionInfo();
AccessTestAction action = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preAssign(ObserverContext.createAndPrepare(CP_ENV, null), hri);
return null;
}
};
verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN);
verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ,
USER_GROUP_WRITE, USER_GROUP_CREATE);
}
示例9: slowdownCode
import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入依赖的package包/类
private void slowdownCode(final ObserverContext<RegionCoprocessorEnvironment> e) {
if (e.getEnvironment().getRegion().getRegionInfo().getReplicaId() == 0) {
CountDownLatch latch = getCdl().get();
try {
if (sleepTime.get() > 0) {
LOG.info("Sleeping for " + sleepTime.get() + " ms");
Thread.sleep(sleepTime.get());
} else if (latch.getCount() > 0) {
LOG.info("Waiting for the counterCountDownLatch");
latch.await(2, TimeUnit.MINUTES); // To help the tests to finish.
if (latch.getCount() > 0) {
throw new RuntimeException("Can't wait more");
}
}
} catch (InterruptedException e1) {
LOG.error(e1);
}
} else {
LOG.info("We're not the primary replicas.");
}
}
示例10: testMergeRegions
import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入依赖的package包/类
@Test (timeout=180000)
public void testMergeRegions() throws Exception {
final TableName tname = TableName.valueOf("testMergeRegions");
createTestTable(tname);
try {
final List<HRegion> regions = TEST_UTIL.getHBaseCluster().findRegionsForTable(tname);
assertTrue("not enough regions: " + regions.size(), regions.size() >= 2);
AccessTestAction action = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preMerge(ObserverContext.createAndPrepare(RSCP_ENV, null),
regions.get(0), regions.get(1));
return null;
}
};
verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN);
verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ,
USER_GROUP_WRITE, USER_GROUP_CREATE);
} finally {
deleteTable(TEST_UTIL, tname);
}
}
示例11: testMove
import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入依赖的package包/类
@Test (timeout=180000)
public void testMove() throws Exception {
List<HRegionLocation> regions;
try (RegionLocator locator = systemUserConnection.getRegionLocator(TEST_TABLE)) {
regions = locator.getAllRegionLocations();
}
HRegionLocation location = regions.get(0);
final HRegionInfo hri = location.getRegionInfo();
final ServerName server = location.getServerName();
AccessTestAction action = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preMove(ObserverContext.createAndPrepare(CP_ENV, null),
hri, server, server);
return null;
}
};
verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN);
verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ,
USER_GROUP_WRITE, USER_GROUP_CREATE);
}
示例12: testReplicateLogEntries
import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入依赖的package包/类
@Test (timeout=180000)
public void testReplicateLogEntries() throws Exception {
AccessTestAction replicateLogEntriesAction = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preReplicateLogEntries(ObserverContext.createAndPrepare(RSCP_ENV, null),
null, null);
ACCESS_CONTROLLER.postReplicateLogEntries(ObserverContext.createAndPrepare(RSCP_ENV, null),
null, null);
return null;
}
};
verifyAllowed(replicateLogEntriesAction, SUPERUSER, USER_ADMIN, USER_GROUP_WRITE);
verifyDenied(replicateLogEntriesAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER,
USER_GROUP_READ, USER_GROUP_ADMIN, USER_GROUP_CREATE);
}
示例13: preFlushScannerOpen
import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入依赖的package包/类
@Override
public InternalScanner preFlushScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c,
Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
Long newTtl = ttls.get(store.getTableName());
if (newTtl != null) {
System.out.println("PreFlush:" + newTtl);
}
Integer newVersions = versions.get(store.getTableName());
ScanInfo oldSI = store.getScanInfo();
HColumnDescriptor family = store.getFamily();
ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(),
family.getName(), family.getMinVersions(),
newVersions == null ? family.getMaxVersions() : newVersions,
newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
Scan scan = new Scan();
scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(),
HConstants.OLDEST_TIMESTAMP);
}
示例14: testAddColumn
import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入依赖的package包/类
@Test (timeout=180000)
public void testAddColumn() throws Exception {
final HColumnDescriptor hcd = new HColumnDescriptor("fam_new");
AccessTestAction action = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preAddColumn(ObserverContext.createAndPrepare(CP_ENV, null), TEST_TABLE,
hcd);
return null;
}
};
verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_CREATE, USER_OWNER, USER_GROUP_CREATE,
USER_GROUP_ADMIN);
verifyDenied(action, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE);
}
示例15: preAppendAfterRowLock
import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入依赖的package包/类
@Override
public Result preAppendAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Append append) throws IOException {
if (append.getAttribute(CHECK_COVERING_PERM) != null) {
// We had failure with table, cf and q perm checks and now giving a chance for cell
// perm check
TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable();
AuthResult authResult = null;
if (checkCoveringPermission(OpType.APPEND, c.getEnvironment(), append.getRow(),
append.getFamilyCellMap(), HConstants.LATEST_TIMESTAMP, Action.WRITE)) {
authResult = AuthResult.allow(OpType.APPEND.toString(), "Covering cell set",
getActiveUser(), Action.WRITE, table, append.getFamilyCellMap());
} else {
authResult = AuthResult.deny(OpType.APPEND.toString(), "Covering cell set",
getActiveUser(), Action.WRITE, table, append.getFamilyCellMap());
}
logResult(authResult);
if (authorizationEnabled && !authResult.isAllowed()) {
throw new AccessDeniedException("Insufficient permissions " +
authResult.toContextString());
}
}
return null;
}