本文整理汇总了Java中org.apache.hadoop.hbase.ipc.BlockingRpcCallback.get方法的典型用法代码示例。如果您正苦于以下问题:Java BlockingRpcCallback.get方法的具体用法?Java BlockingRpcCallback.get怎么用?Java BlockingRpcCallback.get使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.ipc.BlockingRpcCallback
的用法示例。
在下文中一共展示了BlockingRpcCallback.get方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getAuthenticationToken
import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; //导入方法依赖的package包/类
@Override
public AuthenticationProtos.GetAuthenticationTokenResponse getAuthenticationToken(
RpcController controller, AuthenticationProtos.GetAuthenticationTokenRequest request)
throws ServiceException {
LOG.debug("Authentication token request from " + RpcServer.getRequestUserName());
// ignore passed in controller -- it's always null
ServerRpcController serverController = new ServerRpcController();
BlockingRpcCallback<AuthenticationProtos.GetAuthenticationTokenResponse> callback =
new BlockingRpcCallback<AuthenticationProtos.GetAuthenticationTokenResponse>();
getAuthenticationToken(serverController, request, callback);
try {
serverController.checkFailed();
return callback.get();
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
}
示例2: whoAmI
import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; //导入方法依赖的package包/类
@Override
public AuthenticationProtos.WhoAmIResponse whoAmI(
RpcController controller, AuthenticationProtos.WhoAmIRequest request)
throws ServiceException {
LOG.debug("whoAmI() request from " + RpcServer.getRequestUserName());
// ignore passed in controller -- it's always null
ServerRpcController serverController = new ServerRpcController();
BlockingRpcCallback<AuthenticationProtos.WhoAmIResponse> callback =
new BlockingRpcCallback<AuthenticationProtos.WhoAmIResponse>();
whoAmI(serverController, request, callback);
try {
serverController.checkFailed();
return callback.get();
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
}
示例3: getAuthenticationToken
import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; //导入方法依赖的package包/类
@Override
public AuthenticationProtos.GetAuthenticationTokenResponse getAuthenticationToken(
RpcController controller, AuthenticationProtos.GetAuthenticationTokenRequest request)
throws ServiceException {
LOG.debug("Authentication token request from "+RequestContext.getRequestUserName());
// ignore passed in controller -- it's always null
ServerRpcController serverController = new ServerRpcController();
BlockingRpcCallback<AuthenticationProtos.GetAuthenticationTokenResponse> callback =
new BlockingRpcCallback<AuthenticationProtos.GetAuthenticationTokenResponse>();
getAuthenticationToken(serverController, request, callback);
try {
serverController.checkFailed();
return callback.get();
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
}
示例4: whoAmI
import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; //导入方法依赖的package包/类
@Override
public AuthenticationProtos.WhoAmIResponse whoAmI(
RpcController controller, AuthenticationProtos.WhoAmIRequest request)
throws ServiceException {
LOG.debug("whoAmI() request from "+RequestContext.getRequestUserName());
// ignore passed in controller -- it's always null
ServerRpcController serverController = new ServerRpcController();
BlockingRpcCallback<AuthenticationProtos.WhoAmIResponse> callback =
new BlockingRpcCallback<AuthenticationProtos.WhoAmIResponse>();
whoAmI(serverController, request, callback);
try {
serverController.checkFailed();
return callback.get();
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
}
示例5: getAuthenticationToken
import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; //导入方法依赖的package包/类
@Override
public AuthenticationProtos.TokenResponse getAuthenticationToken(
RpcController controller, AuthenticationProtos.TokenRequest request)
throws ServiceException {
LOG.debug("Authentication token request from "+RequestContext.getRequestUserName());
// ignore passed in controller -- it's always null
ServerRpcController serverController = new ServerRpcController();
BlockingRpcCallback<AuthenticationProtos.TokenResponse> callback =
new BlockingRpcCallback<AuthenticationProtos.TokenResponse>();
getAuthenticationToken(serverController, request, callback);
try {
serverController.checkFailed();
return callback.get();
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
}
示例6: whoami
import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; //导入方法依赖的package包/类
@Override
public AuthenticationProtos.WhoAmIResponse whoami(
RpcController controller, AuthenticationProtos.WhoAmIRequest request)
throws ServiceException {
LOG.debug("whoami() request from "+RequestContext.getRequestUserName());
// ignore passed in controller -- it's always null
ServerRpcController serverController = new ServerRpcController();
BlockingRpcCallback<AuthenticationProtos.WhoAmIResponse> callback =
new BlockingRpcCallback<AuthenticationProtos.WhoAmIResponse>();
whoami(serverController, request, callback);
try {
serverController.checkFailed();
return callback.get();
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
}
示例7: prepareBulkLoad
import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; //导入方法依赖的package包/类
public String prepareBulkLoad(final TableName tableName) throws IOException {
try {
CoprocessorRpcChannel channel = table.coprocessorService(HConstants.EMPTY_START_ROW);
SecureBulkLoadProtos.SecureBulkLoadService instance =
ProtobufUtil.newServiceStub(SecureBulkLoadProtos.SecureBulkLoadService.class, channel);
ServerRpcController controller = new ServerRpcController();
BlockingRpcCallback<SecureBulkLoadProtos.PrepareBulkLoadResponse> rpcCallback =
new BlockingRpcCallback<SecureBulkLoadProtos.PrepareBulkLoadResponse>();
SecureBulkLoadProtos.PrepareBulkLoadRequest request =
SecureBulkLoadProtos.PrepareBulkLoadRequest.newBuilder()
.setTableName(ProtobufUtil.toProtoTableName(tableName)).build();
instance.prepareBulkLoad(controller,
request,
rpcCallback);
SecureBulkLoadProtos.PrepareBulkLoadResponse response = rpcCallback.get();
if (controller.failedOnException()) {
throw controller.getFailedOn();
}
return response.getBulkToken();
} catch (Throwable throwable) {
throw new IOException(throwable);
}
}
示例8: lifecycleAction
import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; //导入方法依赖的package包/类
@Override
public TxnMessage.ActionResponse lifecycleAction(byte[] rowKey,TxnMessage.TxnLifecycleMessage lifecycleMessage) throws IOException{
TxnMessage.TxnLifecycleService service=getLifecycleService(rowKey);
ServerRpcController controller=new ServerRpcController();
BlockingRpcCallback<TxnMessage.ActionResponse> done=new BlockingRpcCallback<>();
service.lifecycleAction(controller,lifecycleMessage,done);
dealWithError(controller);
return done.get();
}
示例9: getTxn
import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; //导入方法依赖的package包/类
@Override
public TxnMessage.Txn getTxn(byte[] rowKey,TxnMessage.TxnRequest request) throws IOException{
TxnMessage.TxnLifecycleService service=getLifecycleService(rowKey);
ServerRpcController controller=new ServerRpcController();
BlockingRpcCallback<TxnMessage.Txn> done=new BlockingRpcCallback<>();
service.getTransaction(controller,request,done);
dealWithError(controller);
return done.get();
}
示例10: invoke
import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; //导入方法依赖的package包/类
public BulkWritesResult invoke(BulkWrites write) throws IOException {
TableName tableName=tableInfoFactory.getTableInfo(this.tableName);
CoprocessorRpcChannel channel = channelFactory.newChannel(tableName,write.getRegionKey());
boolean cacheCheck = false;
try {
SpliceMessage.SpliceIndexService service = ProtobufUtil.newServiceStub(SpliceMessage.SpliceIndexService.class, channel);
SpliceMessage.BulkWriteRequest.Builder builder = SpliceMessage.BulkWriteRequest.newBuilder();
byte[] requestBytes = compressor.compress(write);
builder.setBytes(ZeroCopyLiteralByteString.wrap(requestBytes));
SpliceMessage.BulkWriteRequest bwr = builder.build();
BlockingRpcCallback<SpliceMessage.BulkWriteResponse> doneCallback =new BlockingRpcCallback<>();
ServerRpcController controller = new ServerRpcController();
service.bulkWrite(controller, bwr, doneCallback);
if (controller.failed()){
IOException error=controller.getFailedOn();
clearCacheIfNeeded(error);
cacheCheck=true;
if(error!=null)
throw pef.processRemoteException(error);
else
throw pef.fromErrorString(controller.errorText());
}
SpliceMessage.BulkWriteResponse bulkWriteResponse = doneCallback.get();
byte[] bytes = bulkWriteResponse.getBytes().toByteArray();
if(bytes==null || bytes.length<=0){
Logger logger=Logger.getLogger(BulkWriteChannelInvoker.class);
logger.error("zero-length bytes returned with a null error for encodedString: "+write.getBulkWrites().iterator().next().getEncodedStringName());
}
return compressor.decompress(bytes,BulkWritesResult.class);
} catch (Exception e) {
if (!cacheCheck) clearCacheIfNeeded(e);
throw pef.processRemoteException(e);
}
}
示例11: bulkLoadHFiles
import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; //导入方法依赖的package包/类
public boolean bulkLoadHFiles(final List<Pair<byte[], String>> familyPaths,
final Token<?> userToken,
final String bulkToken,
final byte[] startRow) throws IOException {
// we never want to send a batch of HFiles to all regions, thus cannot call
// HTable#coprocessorService methods that take start and end rowkeys; see HBASE-9639
try {
CoprocessorRpcChannel channel = table.coprocessorService(startRow);
SecureBulkLoadProtos.SecureBulkLoadService instance =
ProtobufUtil.newServiceStub(SecureBulkLoadProtos.SecureBulkLoadService.class, channel);
SecureBulkLoadProtos.DelegationToken protoDT =
SecureBulkLoadProtos.DelegationToken.newBuilder().build();
if(userToken != null) {
protoDT =
SecureBulkLoadProtos.DelegationToken.newBuilder()
.setIdentifier(ByteStringer.wrap(userToken.getIdentifier()))
.setPassword(ByteStringer.wrap(userToken.getPassword()))
.setKind(userToken.getKind().toString())
.setService(userToken.getService().toString()).build();
}
List<ClientProtos.BulkLoadHFileRequest.FamilyPath> protoFamilyPaths =
new ArrayList<ClientProtos.BulkLoadHFileRequest.FamilyPath>();
for(Pair<byte[], String> el: familyPaths) {
protoFamilyPaths.add(ClientProtos.BulkLoadHFileRequest.FamilyPath.newBuilder()
.setFamily(ByteStringer.wrap(el.getFirst()))
.setPath(el.getSecond()).build());
}
SecureBulkLoadProtos.SecureBulkLoadHFilesRequest request =
SecureBulkLoadProtos.SecureBulkLoadHFilesRequest.newBuilder()
.setFsToken(protoDT)
.addAllFamilyPath(protoFamilyPaths)
.setBulkToken(bulkToken).build();
ServerRpcController controller = new ServerRpcController();
BlockingRpcCallback<SecureBulkLoadProtos.SecureBulkLoadHFilesResponse> rpcCallback =
new BlockingRpcCallback<SecureBulkLoadProtos.SecureBulkLoadHFilesResponse>();
instance.secureBulkLoadHFiles(controller,
request,
rpcCallback);
SecureBulkLoadProtos.SecureBulkLoadHFilesResponse response = rpcCallback.get();
if (controller.failedOnException()) {
throw controller.getFailedOn();
}
return response.getLoaded();
} catch (Throwable throwable) {
throw new IOException(throwable);
}
}
示例12: bulkLoadHFiles
import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; //导入方法依赖的package包/类
public boolean bulkLoadHFiles(final List<Pair<byte[], String>> familyPaths,
final Token<?> userToken,
final String bulkToken,
final byte[] startRow) throws IOException {
// we never want to send a batch of HFiles to all regions, thus cannot call
// HTable#coprocessorService methods that take start and end rowkeys; see HBASE-9639
try {
CoprocessorRpcChannel channel = table.coprocessorService(startRow);
SecureBulkLoadProtos.SecureBulkLoadService instance =
ProtobufUtil.newServiceStub(SecureBulkLoadProtos.SecureBulkLoadService.class, channel);
SecureBulkLoadProtos.DelegationToken protoDT =
SecureBulkLoadProtos.DelegationToken.newBuilder().build();
if(userToken != null) {
protoDT =
SecureBulkLoadProtos.DelegationToken.newBuilder()
.setIdentifier(HBaseZeroCopyByteString.wrap(userToken.getIdentifier()))
.setPassword(HBaseZeroCopyByteString.wrap(userToken.getPassword()))
.setKind(userToken.getKind().toString())
.setService(userToken.getService().toString()).build();
}
List<ClientProtos.BulkLoadHFileRequest.FamilyPath> protoFamilyPaths =
new ArrayList<ClientProtos.BulkLoadHFileRequest.FamilyPath>();
for(Pair<byte[], String> el: familyPaths) {
protoFamilyPaths.add(ClientProtos.BulkLoadHFileRequest.FamilyPath.newBuilder()
.setFamily(HBaseZeroCopyByteString.wrap(el.getFirst()))
.setPath(el.getSecond()).build());
}
SecureBulkLoadProtos.SecureBulkLoadHFilesRequest request =
SecureBulkLoadProtos.SecureBulkLoadHFilesRequest.newBuilder()
.setFsToken(protoDT)
.addAllFamilyPath(protoFamilyPaths)
.setBulkToken(bulkToken).build();
ServerRpcController controller = new ServerRpcController();
BlockingRpcCallback<SecureBulkLoadProtos.SecureBulkLoadHFilesResponse> rpcCallback =
new BlockingRpcCallback<SecureBulkLoadProtos.SecureBulkLoadHFilesResponse>();
instance.secureBulkLoadHFiles(controller,
request,
rpcCallback);
SecureBulkLoadProtos.SecureBulkLoadHFilesResponse response = rpcCallback.get();
if (controller.failedOnException()) {
throw controller.getFailedOn();
}
return response.getLoaded();
} catch (Throwable throwable) {
throw new IOException(throwable);
}
}
示例13: bulkLoadHFiles
import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; //导入方法依赖的package包/类
public boolean bulkLoadHFiles(final List<Pair<byte[], String>> familyPaths,
final Token<?> userToken,
final String bulkToken,
final byte[] startRow) throws IOException {
// we never want to send a batch of HFiles to all regions, thus cannot call
// HTable#coprocessorService methods that take start and end rowkeys; see HBASE-9639
try {
CoprocessorRpcChannel channel = table.coprocessorService(startRow);
SecureBulkLoadProtos.SecureBulkLoadService instance =
ProtobufUtil.newServiceStub(SecureBulkLoadProtos.SecureBulkLoadService.class, channel);
SecureBulkLoadProtos.DelegationToken protoDT =
SecureBulkLoadProtos.DelegationToken.newBuilder().build();
if(userToken != null) {
protoDT =
SecureBulkLoadProtos.DelegationToken.newBuilder()
.setIdentifier(ZeroCopyLiteralByteString.wrap(userToken.getIdentifier()))
.setPassword(ZeroCopyLiteralByteString.wrap(userToken.getPassword()))
.setKind(userToken.getKind().toString())
.setService(userToken.getService().toString()).build();
}
List<ClientProtos.BulkLoadHFileRequest.FamilyPath> protoFamilyPaths =
new ArrayList<ClientProtos.BulkLoadHFileRequest.FamilyPath>();
for(Pair<byte[], String> el: familyPaths) {
protoFamilyPaths.add(ClientProtos.BulkLoadHFileRequest.FamilyPath.newBuilder()
.setFamily(ZeroCopyLiteralByteString.wrap(el.getFirst()))
.setPath(el.getSecond()).build());
}
SecureBulkLoadProtos.SecureBulkLoadHFilesRequest request =
SecureBulkLoadProtos.SecureBulkLoadHFilesRequest.newBuilder()
.setFsToken(protoDT)
.addAllFamilyPath(protoFamilyPaths)
.setBulkToken(bulkToken).build();
ServerRpcController controller = new ServerRpcController();
BlockingRpcCallback<SecureBulkLoadProtos.SecureBulkLoadHFilesResponse> rpcCallback =
new BlockingRpcCallback<SecureBulkLoadProtos.SecureBulkLoadHFilesResponse>();
instance.secureBulkLoadHFiles(controller,
request,
rpcCallback);
SecureBulkLoadProtos.SecureBulkLoadHFilesResponse response = rpcCallback.get();
if (controller.failedOnException()) {
throw controller.getFailedOn();
}
return response.getLoaded();
} catch (Throwable throwable) {
throw new IOException(throwable);
}
}