当前位置: 首页>>代码示例>>Java>>正文


Java ObserverContext.getEnvironment方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.coprocessor.ObserverContext.getEnvironment方法的典型用法代码示例。如果您正苦于以下问题:Java ObserverContext.getEnvironment方法的具体用法?Java ObserverContext.getEnvironment怎么用?Java ObserverContext.getEnvironment使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.coprocessor.ObserverContext的用法示例。


在下文中一共展示了ObserverContext.getEnvironment方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: preOpen

import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入方法依赖的package包/类
@Override
public void preOpen(ObserverContext<RegionCoprocessorEnvironment> e)
    throws IOException {
  RegionCoprocessorEnvironment env = e.getEnvironment();
  final Region region = env.getRegion();
  if (region == null) {
    LOG.error("NULL region from RegionCoprocessorEnvironment in preOpen()");
  } else {
    HRegionInfo regionInfo = region.getRegionInfo();
    if (regionInfo.getTable().isSystemTable()) {
      checkSystemOrSuperUser();
    } else {
      requirePermission("preOpen", Action.ADMIN);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:AccessController.java

示例2: postOpen

import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入方法依赖的package包/类
@Override
public void postOpen(ObserverContext<RegionCoprocessorEnvironment> c) {
  RegionCoprocessorEnvironment env = c.getEnvironment();
  final Region region = env.getRegion();
  if (region == null) {
    LOG.error("NULL region from RegionCoprocessorEnvironment in postOpen()");
    return;
  }
  if (AccessControlLists.isAclRegion(region)) {
    aclRegion = true;
    // When this region is under recovering state, initialize will be handled by postLogReplay
    if (!region.isRecovering()) {
      try {
        initialize(env);
      } catch (IOException ex) {
        // if we can't obtain permissions, it's better to fail
        // than perform checks incorrectly
        throw new RuntimeException("Failed to initialize permissions cache", ex);
      }
    }
  } else {
    initialized = true;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:AccessController.java

示例3: preIncrementColumnValue

import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入方法依赖的package包/类
@Override
public long preIncrementColumnValue(final ObserverContext<RegionCoprocessorEnvironment> c,
    final byte [] row, final byte [] family, final byte [] qualifier,
    final long amount, final boolean writeToWAL)
    throws IOException {
  // Require WRITE permission to the table, CF, and the KV to be replaced by the
  // incremented value
  RegionCoprocessorEnvironment env = c.getEnvironment();
  Map<byte[],? extends Collection<byte[]>> families = makeFamilyMap(family, qualifier);
  User user = getActiveUser();
  AuthResult authResult = permissionGranted(OpType.INCREMENT_COLUMN_VALUE, user, env, families,
    Action.WRITE);
  if (!authResult.isAllowed() && cellFeaturesEnabled && !compatibleEarlyTermination) {
    authResult.setAllowed(checkCoveringPermission(OpType.INCREMENT_COLUMN_VALUE, env, row,
      families, HConstants.LATEST_TIMESTAMP, Action.WRITE));
    authResult.setReason("Covering cell set");
  }
  logResult(authResult);
  if (authorizationEnabled && !authResult.isAllowed()) {
    throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
  }
  return -1;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:AccessController.java

示例4: prePut

import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入方法依赖的package包/类
@Override
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
    final Put put, final WALEdit edit, final Durability durability)
    throws IOException {
  User user = getActiveUser();
  checkForReservedTagPresence(user, put);

  // Require WRITE permission to the table, CF, or top visible value, if any.
  // NOTE: We don't need to check the permissions for any earlier Puts
  // because we treat the ACLs in each Put as timestamped like any other
  // HBase value. A new ACL in a new Put applies to that Put. It doesn't
  // change the ACL of any previous Put. This allows simple evolution of
  // security policy over time without requiring expensive updates.
  RegionCoprocessorEnvironment env = c.getEnvironment();
  Map<byte[],? extends Collection<Cell>> families = put.getFamilyCellMap();
  AuthResult authResult = permissionGranted(OpType.PUT, user, env, families, Action.WRITE);
  logResult(authResult);
  if (!authResult.isAllowed()) {
    if (cellFeaturesEnabled && !compatibleEarlyTermination) {
      put.setAttribute(CHECK_COVERING_PERM, TRUE);
    } else if (authorizationEnabled) {
      throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
    }
  }

  // Add cell ACLs from the operation to the cells themselves
  byte[] bytes = put.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL);
  if (bytes != null) {
    if (cellFeaturesEnabled) {
      addCellPermissions(bytes, put.getFamilyCellMap());
    } else {
      throw new DoNotRetryIOException("Cell ACLs cannot be persisted");
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:AccessController.java

示例5: preDelete

import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入方法依赖的package包/类
@Override
public void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
    final Delete delete, final WALEdit edit, final Durability durability)
    throws IOException {
  // An ACL on a delete is useless, we shouldn't allow it
  if (delete.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL) != null) {
    throw new DoNotRetryIOException("ACL on delete has no effect: " + delete.toString());
  }
  // Require WRITE permissions on all cells covered by the delete. Unlike
  // for Puts we need to check all visible prior versions, because a major
  // compaction could remove them. If the user doesn't have permission to
  // overwrite any of the visible versions ('visible' defined as not covered
  // by a tombstone already) then we have to disallow this operation.
  RegionCoprocessorEnvironment env = c.getEnvironment();
  Map<byte[],? extends Collection<Cell>> families = delete.getFamilyCellMap();
  User user = getActiveUser();
  AuthResult authResult = permissionGranted(OpType.DELETE, user, env, families, Action.WRITE);
  logResult(authResult);
  if (!authResult.isAllowed()) {
    if (cellFeaturesEnabled && !compatibleEarlyTermination) {
      delete.setAttribute(CHECK_COVERING_PERM, TRUE);
    } else if (authorizationEnabled) {
      throw new AccessDeniedException("Insufficient permissions " +
        authResult.toContextString());
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:AccessController.java

示例6: preCheckAndPut

import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入方法依赖的package包/类
@Override
public boolean preCheckAndPut(final ObserverContext<RegionCoprocessorEnvironment> c,
    final byte [] row, final byte [] family, final byte [] qualifier,
    final CompareFilter.CompareOp compareOp,
    final ByteArrayComparable comparator, final Put put,
    final boolean result) throws IOException {
  User user = getActiveUser();
  checkForReservedTagPresence(user, put);

  // Require READ and WRITE permissions on the table, CF, and KV to update
  RegionCoprocessorEnvironment env = c.getEnvironment();
  Map<byte[],? extends Collection<byte[]>> families = makeFamilyMap(family, qualifier);
  AuthResult authResult = permissionGranted(OpType.CHECK_AND_PUT, user, env, families,
    Action.READ, Action.WRITE);
  logResult(authResult);
  if (!authResult.isAllowed()) {
    if (cellFeaturesEnabled && !compatibleEarlyTermination) {
      put.setAttribute(CHECK_COVERING_PERM, TRUE);
    } else if (authorizationEnabled) {
      throw new AccessDeniedException("Insufficient permissions " +
        authResult.toContextString());
    }
  }

  byte[] bytes = put.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL);
  if (bytes != null) {
    if (cellFeaturesEnabled) {
      addCellPermissions(bytes, put.getFamilyCellMap());
    } else {
      throw new DoNotRetryIOException("Cell ACLs cannot be persisted");
    }
  }
  return result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:35,代码来源:AccessController.java

示例7: preCheckAndDelete

import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入方法依赖的package包/类
@Override
public boolean preCheckAndDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
    final byte [] row, final byte [] family, final byte [] qualifier,
    final CompareFilter.CompareOp compareOp,
    final ByteArrayComparable comparator, final Delete delete,
    final boolean result) throws IOException {
  // An ACL on a delete is useless, we shouldn't allow it
  if (delete.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL) != null) {
    throw new DoNotRetryIOException("ACL on checkAndDelete has no effect: " +
        delete.toString());
  }
  // Require READ and WRITE permissions on the table, CF, and the KV covered
  // by the delete
  RegionCoprocessorEnvironment env = c.getEnvironment();
  Map<byte[],? extends Collection<byte[]>> families = makeFamilyMap(family, qualifier);
  User user = getActiveUser();
  AuthResult authResult = permissionGranted(OpType.CHECK_AND_DELETE, user, env, families,
    Action.READ, Action.WRITE);
  logResult(authResult);
  if (!authResult.isAllowed()) {
    if (cellFeaturesEnabled && !compatibleEarlyTermination) {
      delete.setAttribute(CHECK_COVERING_PERM, TRUE);
    } else if (authorizationEnabled) {
      throw new AccessDeniedException("Insufficient permissions " +
        authResult.toContextString());
    }
  }
  return result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:AccessController.java

示例8: preAppend

import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入方法依赖的package包/类
@Override
public Result preAppend(ObserverContext<RegionCoprocessorEnvironment> c, Append append)
    throws IOException {
  User user = getActiveUser();
  checkForReservedTagPresence(user, append);

  // Require WRITE permission to the table, CF, and the KV to be appended
  RegionCoprocessorEnvironment env = c.getEnvironment();
  Map<byte[],? extends Collection<Cell>> families = append.getFamilyCellMap();
  AuthResult authResult = permissionGranted(OpType.APPEND, user, env, families, Action.WRITE);
  logResult(authResult);
  if (!authResult.isAllowed()) {
    if (cellFeaturesEnabled && !compatibleEarlyTermination) {
      append.setAttribute(CHECK_COVERING_PERM, TRUE);
    } else if (authorizationEnabled)  {
      throw new AccessDeniedException("Insufficient permissions " +
        authResult.toContextString());
    }
  }

  byte[] bytes = append.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL);
  if (bytes != null) {
    if (cellFeaturesEnabled) {
      addCellPermissions(bytes, append.getFamilyCellMap());
    } else {
      throw new DoNotRetryIOException("Cell ACLs cannot be persisted");
    }
  }

  return null;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:32,代码来源:AccessController.java

示例9: preIncrement

import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入方法依赖的package包/类
@Override
public Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> c,
    final Increment increment)
    throws IOException {
  User user = getActiveUser();
  checkForReservedTagPresence(user, increment);

  // Require WRITE permission to the table, CF, and the KV to be replaced by
  // the incremented value
  RegionCoprocessorEnvironment env = c.getEnvironment();
  Map<byte[],? extends Collection<Cell>> families = increment.getFamilyCellMap();
  AuthResult authResult = permissionGranted(OpType.INCREMENT, user, env, families,
    Action.WRITE);
  logResult(authResult);
  if (!authResult.isAllowed()) {
    if (cellFeaturesEnabled && !compatibleEarlyTermination) {
      increment.setAttribute(CHECK_COVERING_PERM, TRUE);
    } else if (authorizationEnabled) {
      throw new AccessDeniedException("Insufficient permissions " +
        authResult.toContextString());
    }
  }

  byte[] bytes = increment.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL);
  if (bytes != null) {
    if (cellFeaturesEnabled) {
      addCellPermissions(bytes, increment.getFamilyCellMap());
    } else {
      throw new DoNotRetryIOException("Cell ACLs cannot be persisted");
    }
  }

  return null;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:35,代码来源:AccessController.java

示例10: preSplitAfterPONR

import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入方法依赖的package包/类
@Override
public void preSplitAfterPONR(ObserverContext<RegionCoprocessorEnvironment> ctx)
    throws IOException {
  RegionCoprocessorEnvironment environment = ctx.getEnvironment();
  HRegionServer rs = (HRegionServer) environment.getRegionServerServices();
  st.stepsAfterPONR(rs, rs, daughterRegions, null);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:TestSplitTransactionOnCluster.java

示例11: preSplitBeforePONR

import org.apache.hadoop.hbase.coprocessor.ObserverContext; //导入方法依赖的package包/类
@Override
public void preSplitBeforePONR(ObserverContext<RegionCoprocessorEnvironment> ctx,
    byte[] splitKey, List<Mutation> metaEntries) throws IOException {
  RegionCoprocessorEnvironment environment = ctx.getEnvironment();
  HRegionServer rs = (HRegionServer) environment.getRegionServerServices();
  List<Region> onlineRegions =
      rs.getOnlineRegions(TableName.valueOf("testSplitHooksBeforeAndAfterPONR_2"));
  Region region = onlineRegions.get(0);
  for (Region r : onlineRegions) {
    if (r.getRegionInfo().containsRow(splitKey)) {
      region = r;
      break;
    }
  }
  st = new SplitTransactionImpl((HRegion) region, splitKey);
  if (!st.prepare()) {
    LOG.error("Prepare for the table " + region.getTableDesc().getNameAsString()
        + " failed. So returning null. ");
    ctx.bypass();
    return;
  }
  ((HRegion)region).forceSplit(splitKey);
  daughterRegions = st.stepsBeforePONR(rs, rs, false);
  HRegionInfo copyOfParent = new HRegionInfo(region.getRegionInfo());
  copyOfParent.setOffline(true);
  copyOfParent.setSplit(true);
  // Put for parent
  Put putParent = MetaTableAccessor.makePutFromRegionInfo(copyOfParent);
  MetaTableAccessor.addDaughtersToPut(putParent, daughterRegions.getFirst().getRegionInfo(),
    daughterRegions.getSecond().getRegionInfo());
  metaEntries.add(putParent);
  // Puts for daughters
  Put putA = MetaTableAccessor.makePutFromRegionInfo(
    daughterRegions.getFirst().getRegionInfo());
  Put putB = MetaTableAccessor.makePutFromRegionInfo(
    daughterRegions.getSecond().getRegionInfo());
  st.addLocation(putA, rs.getServerName(), 1);
  st.addLocation(putB, rs.getServerName(), 1);
  metaEntries.add(putA);
  metaEntries.add(putB);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:42,代码来源:TestSplitTransactionOnCluster.java


注:本文中的org.apache.hadoop.hbase.coprocessor.ObserverContext.getEnvironment方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。