本文整理汇总了Java中org.apache.hadoop.hbase.protobuf.ResponseConverter类的典型用法代码示例。如果您正苦于以下问题:Java ResponseConverter类的具体用法?Java ResponseConverter怎么用?Java ResponseConverter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ResponseConverter类属于org.apache.hadoop.hbase.protobuf包,在下文中一共展示了ResponseConverter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: prepareBulkLoad
import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入依赖的package包/类
@Override
public void prepareBulkLoad(RpcController controller,
PrepareBulkLoadRequest request,
RpcCallback<PrepareBulkLoadResponse> done){
try {
List<BulkLoadObserver> bulkLoadObservers = getBulkLoadObservers();
if(bulkLoadObservers != null) {
ObserverContext<RegionCoprocessorEnvironment> ctx =
new ObserverContext<RegionCoprocessorEnvironment>();
ctx.prepare(env);
for(BulkLoadObserver bulkLoadObserver : bulkLoadObservers) {
bulkLoadObserver.prePrepareBulkLoad(ctx, request);
}
}
String bulkToken = createStagingDir(baseStagingDir,
getActiveUser(), ProtobufUtil.toTableName(request.getTableName())).toString();
done.run(PrepareBulkLoadResponse.newBuilder().setBulkToken(bulkToken).build());
} catch (IOException e) {
ResponseConverter.setControllerException(controller, e);
}
done.run(null);
}
示例2: cleanupBulkLoad
import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入依赖的package包/类
@Override
public void cleanupBulkLoad(RpcController controller,
CleanupBulkLoadRequest request,
RpcCallback<CleanupBulkLoadResponse> done) {
try {
List<BulkLoadObserver> bulkLoadObservers = getBulkLoadObservers();
if(bulkLoadObservers != null) {
ObserverContext<RegionCoprocessorEnvironment> ctx =
new ObserverContext<RegionCoprocessorEnvironment>();
ctx.prepare(env);
for(BulkLoadObserver bulkLoadObserver : bulkLoadObservers) {
bulkLoadObserver.preCleanupBulkLoad(ctx, request);
}
}
fs.delete(new Path(request.getBulkToken()), true);
done.run(CleanupBulkLoadResponse.newBuilder().build());
} catch (IOException e) {
ResponseConverter.setControllerException(controller, e);
}
done.run(null);
}
示例3: sendRegionOpen
import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入依赖的package包/类
/**
* Sends an OPEN RPC to the specified server to open the specified region.
* <p>
* Open should not fail but can if server just crashed.
* <p>
* @param server server to open a region
* @param region region to open
* @param versionOfOfflineNode that needs to be present in the offline node
* when RS tries to change the state from OFFLINE to other states.
* @param favoredNodes
*/
public RegionOpeningState sendRegionOpen(final ServerName server,
HRegionInfo region, int versionOfOfflineNode, List<ServerName> favoredNodes)
throws IOException {
AdminService.BlockingInterface admin = getRsAdmin(server);
if (admin == null) {
LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
" failed because no RPC connection found to this server");
return RegionOpeningState.FAILED_OPENING;
}
OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(server,
region, versionOfOfflineNode, favoredNodes,
(RecoveryMode.LOG_REPLAY == this.services.getMasterFileSystem().getLogRecoveryMode()));
try {
OpenRegionResponse response = admin.openRegion(null, request);
return ResponseConverter.getRegionOpeningState(response);
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
}
}
示例4: process
import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入依赖的package包/类
/**
* Pass a processor to region to process multiple rows atomically.
*
* The RowProcessor implementations should be the inner classes of your
* RowProcessorEndpoint. This way the RowProcessor can be class-loaded with
* the Coprocessor endpoint together.
*
* See {@code TestRowProcessorEndpoint} for example.
*
* The request contains information for constructing processor
* (see {@link #constructRowProcessorFromRequest}. The processor object defines
* the read-modify-write procedure.
*/
@Override
public void process(RpcController controller, ProcessRequest request,
RpcCallback<ProcessResponse> done) {
ProcessResponse resultProto = null;
try {
RowProcessor<S,T> processor = constructRowProcessorFromRequest(request);
Region region = env.getRegion();
long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
long nonce = request.hasNonce() ? request.getNonce() : HConstants.NO_NONCE;
region.processRowsWithLocks(processor, nonceGroup, nonce);
T result = processor.getResult();
ProcessResponse.Builder b = ProcessResponse.newBuilder();
b.setRowProcessorResult(result.toByteString());
resultProto = b.build();
} catch (Exception e) {
ResponseConverter.setControllerException(controller, new IOException(e));
}
done.run(resultProto);
}
示例5: getOnlineRegion
import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入依赖的package包/类
@Override
@QosPriority(priority=HConstants.ADMIN_QOS)
public GetOnlineRegionResponse getOnlineRegion(final RpcController controller,
final GetOnlineRegionRequest request) throws ServiceException {
try {
checkOpen();
requestCount.increment();
Map<String, Region> onlineRegions = regionServer.onlineRegions;
List<HRegionInfo> list = new ArrayList<HRegionInfo>(onlineRegions.size());
for (Region region: onlineRegions.values()) {
list.add(region.getRegionInfo());
}
Collections.sort(list);
return ResponseConverter.buildGetOnlineRegionResponse(list);
} catch (IOException ie) {
throw new ServiceException(ie);
}
}
示例6: callMethod
import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入依赖的package包/类
@Override
@InterfaceAudience.Private
public void callMethod(Descriptors.MethodDescriptor method,
RpcController controller,
Message request, Message responsePrototype,
RpcCallback<Message> callback) {
Message response = null;
try {
response = callExecService(controller, method, request, responsePrototype);
} catch (IOException ioe) {
LOG.warn("Call failed on IOException", ioe);
ResponseConverter.setControllerException(controller, ioe);
}
if (callback != null) {
callback.run(response);
}
}
示例7: process
import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入依赖的package包/类
/**
* Pass a processor to HRegion to process multiple rows atomically.
*
* The RowProcessor implementations should be the inner classes of your
* RowProcessorEndpoint. This way the RowProcessor can be class-loaded with
* the Coprocessor endpoint together.
*
* See {@code TestRowProcessorEndpoint} for example.
*
* The request contains information for constructing processor
* (see {@link #constructRowProcessorFromRequest}. The processor object defines
* the read-modify-write procedure.
*/
@Override
public void process(RpcController controller, ProcessRequest request,
RpcCallback<ProcessResponse> done) {
ProcessResponse resultProto = null;
try {
RowProcessor<S,T> processor = constructRowProcessorFromRequest(request);
HRegion region = env.getRegion();
long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
long nonce = request.hasNonce() ? request.getNonce() : HConstants.NO_NONCE;
region.processRowsWithLocks(processor, nonceGroup, nonce);
T result = processor.getResult();
ProcessResponse.Builder b = ProcessResponse.newBuilder();
b.setRowProcessorResult(result.toByteString());
resultProto = b.build();
} catch (Exception e) {
ResponseConverter.setControllerException(controller, new IOException(e));
}
done.run(resultProto);
}
示例8: getOnlineRegion
import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入依赖的package包/类
@Override
@QosPriority(priority = HConstants.ADMIN_QOS)
public GetOnlineRegionResponse getOnlineRegion(final RpcController controller,
final GetOnlineRegionRequest request) throws ServiceException {
try {
checkOpen();
requestCount.increment();
Map<String, HRegion> onlineRegions = regionServer.onlineRegions;
List<HRegionInfo> list = new ArrayList<HRegionInfo>(onlineRegions.size());
for (HRegion region : onlineRegions.values()) {
list.add(region.getRegionInfo());
}
Collections.sort(list);
return ResponseConverter.buildGetOnlineRegionResponse(list);
} catch (IOException ie) {
throw new ServiceException(ie);
}
}
示例9: call
import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入依赖的package包/类
@Override
public Result[] call(int timeout) throws IOException {
if (this.closed) return null;
if (Thread.interrupted()) {
throw new InterruptedIOException();
}
ScanRequest request = RequestConverter.buildScanRequest(getLocation()
.getRegionInfo().getRegionName(), getScan(), getCaching(), true);
ScanResponse response = null;
PayloadCarryingRpcController controller = controllerFactory.newController();
try {
controller.setPriority(getTableName());
controller.setCallTimeout(timeout);
response = getStub().scan(controller, request);
return ResponseConverter.getResults(controller.cellScanner(),
response);
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
}
}
示例10: callMethod
import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入依赖的package包/类
@Override
@InterfaceAudience.Private
public void callMethod(Descriptors.MethodDescriptor method,
RpcController controller,
Message request, Message responsePrototype,
RpcCallback<Message> callback) {
Message response = null;
try {
response = callExecService(method, request, responsePrototype);
} catch (IOException ioe) {
LOG.warn("Call failed on IOException", ioe);
ResponseConverter.setControllerException(controller, ioe);
}
if (callback != null) {
callback.run(response);
}
}
示例11: getAuths
import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入依赖的package包/类
@Override
public synchronized void getAuths(RpcController controller, GetAuthsRequest request,
RpcCallback<GetAuthsResponse> done) {
byte[] user = request.getUser().toByteArray();
GetAuthsResponse.Builder response = GetAuthsResponse.newBuilder();
response.setUser(request.getUser());
try {
List<String> labels = getUserAuthsFromLabelsTable(user);
for (String label : labels) {
response.addAuth(HBaseZeroCopyByteString.wrap(Bytes.toBytes(label)));
}
} catch (IOException e) {
ResponseConverter.setControllerException(controller, e);
}
done.run(response.build());
}
示例12: cleanupBulkLoad
import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入依赖的package包/类
@Override
public void cleanupBulkLoad(RpcController controller,
CleanupBulkLoadRequest request,
RpcCallback<CleanupBulkLoadResponse> done) {
try {
getAccessController().preCleanupBulkLoad(env);
fs.delete(createStagingDir(baseStagingDir,
getActiveUser(),
env.getRegion().getTableDesc().getTableName(),
new Path(request.getBulkToken()).getName()),
true);
done.run(CleanupBulkLoadResponse.newBuilder().build());
} catch (IOException e) {
ResponseConverter.setControllerException(controller, e);
}
done.run(null);
}
示例13: sendRegionOpen
import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入依赖的package包/类
/**
* Sends an OPEN RPC to the specified server to open the specified region.
* <p>
* Open should not fail but can if server just crashed.
* <p>
* @param server server to open a region
* @param region region to open
* @param versionOfOfflineNode that needs to be present in the offline node
* when RS tries to change the state from OFFLINE to other states.
* @param favoredNodes
*/
public RegionOpeningState sendRegionOpen(final ServerName server,
HRegionInfo region, int versionOfOfflineNode, List<ServerName> favoredNodes)
throws IOException {
AdminService.BlockingInterface admin = getRsAdmin(server);
if (admin == null) {
LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
" failed because no RPC connection found to this server");
return RegionOpeningState.FAILED_OPENING;
}
OpenRegionRequest request =
RequestConverter.buildOpenRegionRequest(server, region, versionOfOfflineNode, favoredNodes);
try {
OpenRegionResponse response = admin.openRegion(null, request);
return ResponseConverter.getRegionOpeningState(response);
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
}
}
示例14: getOnlineRegion
import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入依赖的package包/类
@Override
@QosPriority(priority=HConstants.HIGH_QOS)
public GetOnlineRegionResponse getOnlineRegion(final RpcController controller,
final GetOnlineRegionRequest request) throws ServiceException {
try {
checkOpen();
requestCount.increment();
List<HRegionInfo> list = new ArrayList<HRegionInfo>(onlineRegions.size());
for (HRegion region: this.onlineRegions.values()) {
list.add(region.getRegionInfo());
}
Collections.sort(list);
return ResponseConverter.buildGetOnlineRegionResponse(list);
} catch (IOException ie) {
throw new ServiceException(ie);
}
}
示例15: getSmallScanCallable
import org.apache.hadoop.hbase.protobuf.ResponseConverter; //导入依赖的package包/类
static RegionServerCallable<Result[]> getSmallScanCallable(
final Scan sc, HConnection connection, TableName table, byte[] localStartKey,
final int cacheNum, final RpcControllerFactory rpcControllerFactory) throws IOException {
sc.setStartRow(localStartKey);
RegionServerCallable<Result[]> callable = new RegionServerCallable<Result[]>(
connection, table, sc.getStartRow()) {
public Result[] call() throws IOException {
ScanRequest request = RequestConverter.buildScanRequest(getLocation()
.getRegionInfo().getRegionName(), sc, cacheNum, true);
ScanResponse response = null;
PayloadCarryingRpcController controller = rpcControllerFactory.newController();
try {
controller.setPriority(getTableName());
response = getStub().scan(controller, request);
return ResponseConverter.getResults(controller.cellScanner(),
response);
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
}
}
};
return callable;
}