当前位置: 首页>>代码示例>>Java>>正文


Java RegionCoprocessorEnvironment类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment的典型用法代码示例。如果您正苦于以下问题:Java RegionCoprocessorEnvironment类的具体用法?Java RegionCoprocessorEnvironment怎么用?Java RegionCoprocessorEnvironment使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


RegionCoprocessorEnvironment类属于org.apache.hadoop.hbase.coprocessor包,在下文中一共展示了RegionCoprocessorEnvironment类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: start

import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; //导入依赖的package包/类
@Override
public void start(CoprocessorEnvironment environment) {
  // make sure we are on a region server
  if (!(environment instanceof RegionCoprocessorEnvironment)) {
    throw new IllegalArgumentException(
        "Constraints only act on regions - started in an environment that was not a region");
  }
  RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) environment;
  HTableDescriptor desc = env.getRegion().getTableDesc();
  // load all the constraints from the HTD
  try {
    this.constraints = Constraints.getConstraints(desc, classloader);
  } catch (IOException e) {
    throw new IllegalArgumentException(e);
  }

  if (LOG.isInfoEnabled()) {
    LOG.info("Finished loading " + constraints.size()
        + " user Constraints on table: " + desc.getTableName());
  }

}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:ConstraintProcessor.java

示例2: postOpen

import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; //导入依赖的package包/类
/****************************** Region related hooks ******************************/

  @Override
  public void postOpen(ObserverContext<RegionCoprocessorEnvironment> e) {
    // Read the entire labels table and populate the zk
    if (e.getEnvironment().getRegion().getRegionInfo().getTable().equals(LABELS_TABLE_NAME)) {
      this.labelsRegion = true;
      synchronized (this) {
        this.accessControllerAvailable = CoprocessorHost.getLoadedCoprocessors()
          .contains(AccessController.class.getName());
      }
      // Defer the init of VisibilityLabelService on labels region until it is in recovering state.
      if (!e.getEnvironment().getRegion().isRecovering()) {
        initVisibilityLabelService(e.getEnvironment());
      }
    } else {
      checkAuths = e.getEnvironment().getConfiguration()
          .getBoolean(VisibilityConstants.CHECK_AUTHS_FOR_MUTATION, false);
      initVisibilityLabelService(e.getEnvironment());
    }
  }
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:VisibilityController.java

示例3: postScannerNext

import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; //导入依赖的package包/类
@Override
public boolean postScannerNext(ObserverContext<RegionCoprocessorEnvironment> e,
    InternalScanner s, List<Result> results, int limit, boolean hasMore) throws IOException {
  if (checkTagPresence) {
    if (results.size() > 0) {
      // Check tag presence in the 1st cell in 1st Result
      Result result = results.get(0);
      CellScanner cellScanner = result.cellScanner();
      if (cellScanner.advance()) {
        Cell cell = cellScanner.current();
        tags = Tag.asList(cell.getTagsArray(), cell.getTagsOffset(),
            cell.getTagsLength());
      }
    }
  }
  return hasMore;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TestTags.java

示例4: preGetOp

import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; //导入依赖的package包/类
@Override
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
                     final Get get, final List<Cell> results) throws IOException {

  if (e.getEnvironment().getRegion().getRegionInfo().getReplicaId() == 0) {
    CountDownLatch latch = cdl.get();
    try {
      if (sleepTime.get() > 0) {
        LOG.info("Sleeping for " + sleepTime.get() + " ms");
        Thread.sleep(sleepTime.get());
      } else if (latch.getCount() > 0) {
        LOG.info("Waiting for the counterCountDownLatch");
        latch.await(2, TimeUnit.MINUTES); // To help the tests to finish.
        if (latch.getCount() > 0) {
          throw new RuntimeException("Can't wait more");
        }
      }
    } catch (InterruptedException e1) {
      LOG.error(e1);
    }
  } else {
    LOG.info("We're not the primary replicas.");
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestReplicaWithCluster.java

示例5: preFlushScannerOpen

import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; //导入依赖的package包/类
@Override
public InternalScanner preFlushScannerOpen(
    final ObserverContext<RegionCoprocessorEnvironment> c,
    Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
  Long newTtl = ttls.get(store.getTableName());
  if (newTtl != null) {
    System.out.println("PreFlush:" + newTtl);
  }
  Integer newVersions = versions.get(store.getTableName());
  ScanInfo oldSI = store.getScanInfo();
  HColumnDescriptor family = store.getFamily();
  ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(),
      family.getName(), family.getMinVersions(),
      newVersions == null ? family.getMaxVersions() : newVersions,
      newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
      oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
  Scan scan = new Scan();
  scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
  return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
      ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(),
      HConstants.OLDEST_TIMESTAMP);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestCoprocessorScanPolicy.java

示例6: preIncrementColumnValue

import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; //导入依赖的package包/类
@Override
public long preIncrementColumnValue(final ObserverContext<RegionCoprocessorEnvironment> c,
    final byte [] row, final byte [] family, final byte [] qualifier,
    final long amount, final boolean writeToWAL)
    throws IOException {
  // Require WRITE permission to the table, CF, and the KV to be replaced by the
  // incremented value
  RegionCoprocessorEnvironment env = c.getEnvironment();
  Map<byte[],? extends Collection<byte[]>> families = makeFamilyMap(family, qualifier);
  User user = getActiveUser();
  AuthResult authResult = permissionGranted(OpType.INCREMENT_COLUMN_VALUE, user, env, families,
    Action.WRITE);
  if (!authResult.isAllowed() && cellFeaturesEnabled && !compatibleEarlyTermination) {
    authResult.setAllowed(checkCoveringPermission(OpType.INCREMENT_COLUMN_VALUE, env, row,
      families, HConstants.LATEST_TIMESTAMP, Action.WRITE));
    authResult.setReason("Covering cell set");
  }
  logResult(authResult);
  if (authorizationEnabled && !authResult.isAllowed()) {
    throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
  }
  return -1;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:AccessController.java

示例7: preIncrementAfterRowLock

import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; //导入依赖的package包/类
@Override
public Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
    final Increment increment) throws IOException {
  if (increment.getAttribute(CHECK_COVERING_PERM) != null) {
    // We had failure with table, cf and q perm checks and now giving a chance for cell
    // perm check
    TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable();
    AuthResult authResult = null;
    if (checkCoveringPermission(OpType.INCREMENT, c.getEnvironment(), increment.getRow(),
        increment.getFamilyCellMap(), increment.getTimeRange().getMax(), Action.WRITE)) {
      authResult = AuthResult.allow(OpType.INCREMENT.toString(), "Covering cell set",
          getActiveUser(), Action.WRITE, table, increment.getFamilyCellMap());
    } else {
      authResult = AuthResult.deny(OpType.INCREMENT.toString(), "Covering cell set",
          getActiveUser(), Action.WRITE, table, increment.getFamilyCellMap());
    }
    logResult(authResult);
    if (authorizationEnabled && !authResult.isAllowed()) {
      throw new AccessDeniedException("Insufficient permissions " +
        authResult.toContextString());
    }
  }
  return null;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:AccessController.java

示例8: preStoreScannerOpen

import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; //导入依赖的package包/类
@Override
public KeyValueScanner preStoreScannerOpen(
    final ObserverContext<RegionCoprocessorEnvironment> c, Store store, final Scan scan,
    final NavigableSet<byte[]> targetCols, KeyValueScanner s) throws IOException {
  TableName tn = store.getTableName();
  if (!tn.isSystemTable()) {
    Long newTtl = ttls.get(store.getTableName());
    Integer newVersions = versions.get(store.getTableName());
    ScanInfo oldSI = store.getScanInfo();
    HColumnDescriptor family = store.getFamily();
    ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(),
        family.getName(), family.getMinVersions(),
        newVersions == null ? family.getMaxVersions() : newVersions,
        newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
        oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
    return new StoreScanner(store, scanInfo, scan, targetCols,
        ((HStore) store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
  } else {
    return s;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestCoprocessorScanPolicy.java

示例9: prepareBulkLoad

import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; //导入依赖的package包/类
@Override
public void prepareBulkLoad(RpcController controller,
                                               PrepareBulkLoadRequest request,
                                               RpcCallback<PrepareBulkLoadResponse> done){
  try {
    List<BulkLoadObserver> bulkLoadObservers = getBulkLoadObservers();

    if(bulkLoadObservers != null) {
      ObserverContext<RegionCoprocessorEnvironment> ctx =
                                         new ObserverContext<RegionCoprocessorEnvironment>();
      ctx.prepare(env);

      for(BulkLoadObserver bulkLoadObserver : bulkLoadObservers) {
        bulkLoadObserver.prePrepareBulkLoad(ctx, request);
      }
    }

    String bulkToken = createStagingDir(baseStagingDir,
        getActiveUser(), ProtobufUtil.toTableName(request.getTableName())).toString();
    done.run(PrepareBulkLoadResponse.newBuilder().setBulkToken(bulkToken).build());
  } catch (IOException e) {
    ResponseConverter.setControllerException(controller, e);
  }
  done.run(null);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:SecureBulkLoadEndpoint.java

示例10: prePut

import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; //导入依赖的package包/类
@Override
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put,
    final WALEdit edit, final Durability durability) throws IOException {
  byte[] attribute = put.getAttribute("visibility");
  byte[] cf = null;
  List<Cell> updatedCells = new ArrayList<Cell>();
  if (attribute != null) {
    for (List<? extends Cell> edits : put.getFamilyCellMap().values()) {
      for (Cell cell : edits) {
        KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
        if (cf == null) {
          cf = kv.getFamily();
        }
        Tag tag = new Tag(TAG_TYPE, attribute);
        List<Tag> tagList = new ArrayList<Tag>();
        tagList.add(tag);

        KeyValue newKV = new KeyValue(kv.getRow(), 0, kv.getRowLength(), kv.getFamily(), 0,
            kv.getFamilyLength(), kv.getQualifier(), 0, kv.getQualifierLength(),
            kv.getTimestamp(), KeyValue.Type.codeToType(kv.getType()), kv.getValue(), 0,
            kv.getValueLength(), tagList);
        ((List<Cell>) updatedCells).add(newKV);
      }
    }
    put.getFamilyCellMap().remove(cf);
    // Update the family map
    put.getFamilyCellMap().put(cf, updatedCells);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:TestReplicationWithTags.java

示例11: getScanInfo

import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; //导入依赖的package包/类
protected ScanInfo getScanInfo(Store store, RegionCoprocessorEnvironment e) {
  byte[] data = ((ZKWatcher)e.getSharedData().get(zkkey)).getData();
  if (data == null) {
    return null;
  }
  ScanInfo oldSI = store.getScanInfo();
  if (oldSI.getTtl() == Long.MAX_VALUE) {
    return null;
  }
  long ttl = Math.max(EnvironmentEdgeManager.currentTime() -
      Bytes.toLong(data), oldSI.getTtl());
  return new ScanInfo(oldSI.getConfiguration(), store.getFamily(), ttl,
      oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:ZooKeeperScanPolicyObserver.java

示例12: preSplitAfterPONR

import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; //导入依赖的package包/类
@Override
public void preSplitAfterPONR(ObserverContext<RegionCoprocessorEnvironment> ctx)
    throws IOException {
  RegionCoprocessorEnvironment environment = ctx.getEnvironment();
  HRegionServer rs = (HRegionServer) environment.getRegionServerServices();
  st.stepsAfterPONR(rs, rs, daughterRegions, null);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:TestSplitTransactionOnCluster.java

示例13: postScannerNext

import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; //导入依赖的package包/类
/**
 * @param s the scanner
 * @param results the result set returned by the region server
 * @param limit the maximum number of results to return
 * @param hasMore
 * @return 'has more' indication to give to client
 * @exception IOException Exception
 */
public boolean postScannerNext(final InternalScanner s,
    final List<Result> results, final int limit, boolean hasMore)
    throws IOException {
  return execOperationWithResult(hasMore,
      coprocessors.isEmpty() ? null : new RegionOperationWithResult<Boolean>() {
    @Override
    public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
        throws IOException {
      setResult(oserver.postScannerNext(ctx, s, results, limit, getResult()));
    }
  });
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:RegionCoprocessorHost.java

示例14: preScannerOpen

import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; //导入依赖的package包/类
@Override
public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan,
    RegionScanner s) throws IOException {
  if (!initialized) {
    throw new VisibilityControllerNotReadyException("VisibilityController not yet initialized!");
  }
  // Nothing to do if authorization is not enabled
  if (!authorizationEnabled) {
    return s;
  }
  Region region = e.getEnvironment().getRegion();
  Authorizations authorizations = null;
  try {
    authorizations = scan.getAuthorizations();
  } catch (DeserializationException de) {
    throw new IOException(de);
  }
  if (authorizations == null) {
    // No Authorizations present for this scan/Get!
    // In case of system tables other than "labels" just scan with out visibility check and
    // filtering. Checking visibility labels for META and NAMESPACE table is not needed.
    TableName table = region.getRegionInfo().getTable();
    if (table.isSystemTable() && !table.equals(LABELS_TABLE_NAME)) {
      return s;
    }
  }

  Filter visibilityLabelFilter = VisibilityUtils.createVisibilityLabelFilter(region,
      authorizations);
  if (visibilityLabelFilter != null) {
    Filter filter = scan.getFilter();
    if (filter != null) {
      scan.setFilter(new FilterList(filter, visibilityLabelFilter));
    } else {
      scan.setFilter(visibilityLabelFilter);
    }
  }
  return s;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:VisibilityController.java

示例15: postScannerClose

import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; //导入依赖的package包/类
/**
 * @exception IOException Exception
 */
public void postScannerClose(final InternalScanner s) throws IOException {
  execOperation(coprocessors.isEmpty() ? null : new RegionOperation() {
    @Override
    public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
        throws IOException {
      oserver.postScannerClose(ctx, s);
    }
  });
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:RegionCoprocessorHost.java


注:本文中的org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。