当前位置: 首页>>代码示例>>Java>>正文


Java ResponseConverter.setControllerException方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.protobuf.ResponseConverter.setControllerException方法的典型用法代码示例。如果您正苦于以下问题:Java ResponseConverter.setControllerException方法的具体用法?Java ResponseConverter.setControllerException怎么用?Java ResponseConverter.setControllerException使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.protobuf.ResponseConverter的用法示例。


在下文中一共展示了ResponseConverter.setControllerException方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: prepareBulkLoad

import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入方法依赖的package包/类
@Override
public void prepareBulkLoad(RpcController controller,
                                               PrepareBulkLoadRequest request,
                                               RpcCallback<PrepareBulkLoadResponse> done){
  try {
    List<BulkLoadObserver> bulkLoadObservers = getBulkLoadObservers();

    if(bulkLoadObservers != null) {
      ObserverContext<RegionCoprocessorEnvironment> ctx =
                                         new ObserverContext<RegionCoprocessorEnvironment>();
      ctx.prepare(env);

      for(BulkLoadObserver bulkLoadObserver : bulkLoadObservers) {
        bulkLoadObserver.prePrepareBulkLoad(ctx, request);
      }
    }

    String bulkToken = createStagingDir(baseStagingDir,
        getActiveUser(), ProtobufUtil.toTableName(request.getTableName())).toString();
    done.run(PrepareBulkLoadResponse.newBuilder().setBulkToken(bulkToken).build());
  } catch (IOException e) {
    ResponseConverter.setControllerException(controller, e);
  }
  done.run(null);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:SecureBulkLoadEndpoint.java

示例2: cleanupBulkLoad

import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入方法依赖的package包/类
@Override
public void cleanupBulkLoad(RpcController controller,
                            CleanupBulkLoadRequest request,
                            RpcCallback<CleanupBulkLoadResponse> done) {
  try {
    List<BulkLoadObserver> bulkLoadObservers = getBulkLoadObservers();

    if(bulkLoadObservers != null) {
      ObserverContext<RegionCoprocessorEnvironment> ctx =
                                         new ObserverContext<RegionCoprocessorEnvironment>();
      ctx.prepare(env);

      for(BulkLoadObserver bulkLoadObserver : bulkLoadObservers) {
        bulkLoadObserver.preCleanupBulkLoad(ctx, request);
      }
    }

    fs.delete(new Path(request.getBulkToken()), true);
    done.run(CleanupBulkLoadResponse.newBuilder().build());
  } catch (IOException e) {
    ResponseConverter.setControllerException(controller, e);
  }
  done.run(null);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:SecureBulkLoadEndpoint.java

示例3: process

import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入方法依赖的package包/类
/**
 * Pass a processor to region to process multiple rows atomically.
 * 
 * The RowProcessor implementations should be the inner classes of your
 * RowProcessorEndpoint. This way the RowProcessor can be class-loaded with
 * the Coprocessor endpoint together.
 *
 * See {@code TestRowProcessorEndpoint} for example.
 *
 * The request contains information for constructing processor 
 * (see {@link #constructRowProcessorFromRequest}. The processor object defines
 * the read-modify-write procedure.
 */
@Override
public void process(RpcController controller, ProcessRequest request,
    RpcCallback<ProcessResponse> done) {
  ProcessResponse resultProto = null;
  try {
    RowProcessor<S,T> processor = constructRowProcessorFromRequest(request);
    Region region = env.getRegion();
    long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
    long nonce = request.hasNonce() ? request.getNonce() : HConstants.NO_NONCE;
    region.processRowsWithLocks(processor, nonceGroup, nonce);
    T result = processor.getResult();
    ProcessResponse.Builder b = ProcessResponse.newBuilder();
    b.setRowProcessorResult(result.toByteString());
    resultProto = b.build();
  } catch (Exception e) {
    ResponseConverter.setControllerException(controller, new IOException(e));
  }
  done.run(resultProto);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:BaseRowProcessorEndpoint.java

示例4: callMethod

import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入方法依赖的package包/类
@Override
@InterfaceAudience.Private
public void callMethod(Descriptors.MethodDescriptor method,
                       RpcController controller,
                       Message request, Message responsePrototype,
                       RpcCallback<Message> callback) {
  Message response = null;
  try {
    response = callExecService(controller, method, request, responsePrototype);
  } catch (IOException ioe) {
    LOG.warn("Call failed on IOException", ioe);
    ResponseConverter.setControllerException(controller, ioe);
  }
  if (callback != null) {
    callback.run(response);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:CoprocessorRpcChannel.java

示例5: callMethod

import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入方法依赖的package包/类
@Override
public void callMethod(Descriptors.MethodDescriptor method,
                       RpcController controller,
                       Message request, Message responsePrototype,
                       RpcCallback<Message> callback) {
  Message response = null;
  try {
    response = callExecService(method, request, responsePrototype);
  } catch (IOException ioe) {
    LOG.warn("Call failed on IOException", ioe);
    ResponseConverter.setControllerException(controller, ioe);
  }
  if (callback != null) {
    callback.run(response);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:17,代码来源:CoprocessorRpcChannel.java

示例6: callMethod

import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入方法依赖的package包/类
@Override
@InterfaceAudience.Private
public void callMethod(Descriptors.MethodDescriptor method,
                       RpcController controller,
                       Message request, Message responsePrototype,
                       RpcCallback<Message> callback) {
  Message response = null;
  try {
    response = callExecService(method, request, responsePrototype);
  } catch (IOException ioe) {
    LOG.warn("Call failed on IOException", ioe);
    ResponseConverter.setControllerException(controller, ioe);
  }
  if (callback != null) {
    callback.run(response);
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:18,代码来源:CoprocessorRpcChannel.java

示例7: getAuths

import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入方法依赖的package包/类
@Override
public synchronized void getAuths(RpcController controller, GetAuthsRequest request,
    RpcCallback<GetAuthsResponse> done) {
  byte[] user = request.getUser().toByteArray();
  GetAuthsResponse.Builder response = GetAuthsResponse.newBuilder();
  response.setUser(request.getUser());
  try {
    List<String> labels = getUserAuthsFromLabelsTable(user);
    for (String label : labels) {
      response.addAuth(HBaseZeroCopyByteString.wrap(Bytes.toBytes(label)));
    }
  } catch (IOException e) {
    ResponseConverter.setControllerException(controller, e);
  }
  done.run(response.build());
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:17,代码来源:VisibilityController.java

示例8: process

import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入方法依赖的package包/类
/**
 * Pass a processor to HRegion to process multiple rows atomically.
 * 
 * The RowProcessor implementations should be the inner classes of your
 * RowProcessorEndpoint. This way the RowProcessor can be class-loaded with
 * the Coprocessor endpoint together.
 *
 * See {@code TestRowProcessorEndpoint} for example.
 *
 * The request contains information for constructing processor 
 * (see {@link #constructRowProcessorFromRequest}. The processor object defines
 * the read-modify-write procedure.
 */
@Override
public void process(RpcController controller, ProcessRequest request,
    RpcCallback<ProcessResponse> done) {
  ProcessResponse resultProto = null;
  try {
    RowProcessor<S,T> processor = constructRowProcessorFromRequest(request);
    HRegion region = env.getRegion();
    region.processRowsWithLocks(processor);
    T result = processor.getResult();
    ProcessResponse.Builder b = ProcessResponse.newBuilder();
    b.setRowProcessorResult(result.toByteString());
    resultProto = b.build();
  } catch (Exception e) {
    ResponseConverter.setControllerException(controller, new IOException(e));
  }
  done.run(resultProto);
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:31,代码来源:BaseRowProcessorEndpoint.java

示例9: cleanupBulkLoad

import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入方法依赖的package包/类
@Override
public void cleanupBulkLoad(RpcController controller,
                            CleanupBulkLoadRequest request,
                            RpcCallback<CleanupBulkLoadResponse> done) {
  try {
    getAccessController().preCleanupBulkLoad(env);
    fs.delete(createStagingDir(baseStagingDir,
        getActiveUser(),
        env.getRegion().getTableDesc().getTableName(),
        new Path(request.getBulkToken()).getName()),
        true);
    done.run(CleanupBulkLoadResponse.newBuilder().build());
  } catch (IOException e) {
    ResponseConverter.setControllerException(controller, e);
  }
  done.run(null);
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:18,代码来源:SecureBulkLoadEndpoint.java

示例10: process

import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入方法依赖的package包/类
/**
 * Pass a processor to HRegion to process multiple rows atomically.
 * 
 * The RowProcessor implementations should be the inner classes of your
 * RowProcessorEndpoint. This way the RowProcessor can be class-loaded with
 * the Coprocessor endpoint together.
 *
 * See {@code TestRowProcessorEndpoint} for example.
 *
 * The request contains information for constructing processor 
 * (see {@link #constructRowProcessorFromRequest}. The processor object defines
 * the read-modify-write procedure.
 */
@Override
public void process(RpcController controller, ProcessRequest request,
    RpcCallback<ProcessResponse> done) {
  ProcessResponse resultProto = null;
  try {
    RowProcessor<S,T> processor = constructRowProcessorFromRequest(request);
    HRegion region = env.getRegion();
    long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
    long nonce = request.hasNonce() ? request.getNonce() : HConstants.NO_NONCE;
    region.processRowsWithLocks(processor, nonceGroup, nonce);
    T result = processor.getResult();
    ProcessResponse.Builder b = ProcessResponse.newBuilder();
    b.setRowProcessorResult(result.toByteString());
    resultProto = b.build();
  } catch (Exception e) {
    ResponseConverter.setControllerException(controller, new IOException(e));
  }
  done.run(resultProto);
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:33,代码来源:BaseRowProcessorEndpoint.java

示例11: getActiveTransactions

import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入方法依赖的package包/类
@Override
public void getActiveTransactions(RpcController controller,TxnMessage.ActiveTxnRequest request,RpcCallback<TxnMessage.ActiveTxnResponse> done){
    long endTxnId=request.getEndTxnId();
    long startTxnId=request.getStartTxnId();
    try{
        byte[] destTables=null;
        if(request.hasDestinationTables())
            destTables=request.getDestinationTables().toByteArray();
        Source<TxnMessage.Txn> activeTxns=lifecycleStore.getActiveTransactions(destTables,startTxnId,endTxnId);
        TxnMessage.ActiveTxnResponse.Builder response=TxnMessage.ActiveTxnResponse.newBuilder();
        while(activeTxns.hasNext()){
            response.addTxns(activeTxns.next());
        }
        done.run(response.build());
    }catch(IOException e){
        ResponseConverter.setControllerException(controller,e);
    }

}
 
开发者ID:splicemachine,项目名称:spliceengine,代码行数:20,代码来源:TxnLifecycleEndpoint.java

示例12: getTransaction

import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入方法依赖的package包/类
@Override
public void getTransaction(RpcController controller,TxnMessage.TxnRequest request,RpcCallback<TxnMessage.Txn> done){
    try{
        long txnId=request.getTxnId();
        boolean isOld = request.hasIsOld() && request.getIsOld();
        TxnMessage.Txn transaction;
        if (isOld) {
            transaction = lifecycleStore.getOldTransaction(txnId);
        } else {
            transaction = lifecycleStore.getTransaction(txnId);
        }
        done.run(transaction);
    }catch(IOException ioe){
        ResponseConverter.setControllerException(controller,ioe);
    }
}
 
开发者ID:splicemachine,项目名称:spliceengine,代码行数:17,代码来源:TxnLifecycleEndpoint.java

示例13: getActiveTransactionIds

import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入方法依赖的package包/类
@Override
public void getActiveTransactionIds(RpcController controller,TxnMessage.ActiveTxnRequest request,RpcCallback<TxnMessage.ActiveTxnIdResponse> done){
    long endTxnId=request.getEndTxnId();
    long startTxnId=request.getStartTxnId();
    try{
        byte[] destTables=null;
        if(request.hasDestinationTables())
            destTables=request.getDestinationTables().toByteArray();
        long[] activeTxnIds=lifecycleStore.getActiveTransactionIds(destTables,startTxnId,endTxnId);
        TxnMessage.ActiveTxnIdResponse.Builder response=TxnMessage.ActiveTxnIdResponse.newBuilder();
        //noinspection ForLoopReplaceableByForEach
        for(int i=0;i<activeTxnIds.length;i++){
            response.addActiveTxnIds(activeTxnIds[i]);
        }
        done.run(response.build());
    }catch(IOException e){
        ResponseConverter.setControllerException(controller,e);
    }
}
 
开发者ID:splicemachine,项目名称:spliceengine,代码行数:20,代码来源:TxnLifecycleEndpoint.java

示例14: getAuthenticationToken

import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入方法依赖的package包/类
@Override
public void getAuthenticationToken(RpcController controller,
                                   AuthenticationProtos.GetAuthenticationTokenRequest request,
                                   RpcCallback<AuthenticationProtos.GetAuthenticationTokenResponse> done) {
  AuthenticationProtos.GetAuthenticationTokenResponse.Builder response =
      AuthenticationProtos.GetAuthenticationTokenResponse.newBuilder();

  try {
    if (secretManager == null) {
      throw new IOException(
          "No secret manager configured for token authentication");
    }

    User currentUser = RpcServer.getRequestUser();
    UserGroupInformation ugi = null;
    if (currentUser != null) {
      ugi = currentUser.getUGI();
    }
    if (currentUser == null) {
      throw new AccessDeniedException("No authenticated user for request!");
    } else if (!isAllowedDelegationTokenOp(ugi)) {
      LOG.warn("Token generation denied for user="+currentUser.getName()
          +", authMethod="+ugi.getAuthenticationMethod());
      throw new AccessDeniedException(
          "Token generation only allowed for Kerberos authenticated clients");
    }

    Token<AuthenticationTokenIdentifier> token =
        secretManager.generateToken(currentUser.getName());
    response.setToken(ProtobufUtil.toToken(token)).build();
  } catch (IOException ioe) {
    ResponseConverter.setControllerException(controller, ioe);
  }
  done.run(response.build());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:TokenProvider.java

示例15: dummyThrow

import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入方法依赖的package包/类
@Override
public void dummyThrow(RpcController controller,
    DummyRequest request,
    RpcCallback<DummyResponse> done) {
  ResponseConverter.setControllerException(controller, WHAT_TO_THROW);

}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:TestRegionServerCoprocessorEndpoint.java


注:本文中的org.apache.hadoop.hbase.protobuf.ResponseConverter.setControllerException方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。