本文整理汇总了Java中org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel类的典型用法代码示例。如果您正苦于以下问题:Java CoprocessorRpcChannel类的具体用法?Java CoprocessorRpcChannel怎么用?Java CoprocessorRpcChannel使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
CoprocessorRpcChannel类属于org.apache.hadoop.hbase.ipc包,在下文中一共展示了CoprocessorRpcChannel类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testTokenAuth
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; //导入依赖的package包/类
private void testTokenAuth(Class<? extends RpcClient> rpcImplClass) throws IOException,
ServiceException {
TEST_UTIL.getConfiguration().set(RpcClientFactory.CUSTOM_RPC_CLIENT_IMPL_CONF_KEY,
rpcImplClass.getName());
try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
Table table = conn.getTable(TableName.META_TABLE_NAME)) {
CoprocessorRpcChannel rpcChannel = table.coprocessorService(HConstants.EMPTY_START_ROW);
AuthenticationProtos.AuthenticationService.BlockingInterface service =
AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
WhoAmIResponse response = service.whoAmI(null, WhoAmIRequest.getDefaultInstance());
assertEquals(USERNAME, response.getUsername());
assertEquals(AuthenticationMethod.TOKEN.name(), response.getAuthMethod());
try {
service.getAuthenticationToken(null, GetAuthenticationTokenRequest.getDefaultInstance());
} catch (ServiceException e) {
AccessDeniedException exc = (AccessDeniedException) ProtobufUtil.getRemoteException(e);
assertTrue(exc.getMessage().contains(
"Token generation only allowed for Kerberos authenticated clients"));
}
}
}
示例2: testDoubleScan
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; //导入依赖的package包/类
@Test
public void testDoubleScan() throws Throwable {
prepareTestData();
CoprocessorRpcChannel channel = table.coprocessorService(ROW);
RowProcessorEndpoint.FriendsOfFriendsProcessor processor =
new RowProcessorEndpoint.FriendsOfFriendsProcessor(ROW, A);
RowProcessorService.BlockingInterface service =
RowProcessorService.newBlockingStub(channel);
ProcessRequest request = RowProcessorClient.getRowProcessorPB(processor);
ProcessResponse protoResult = service.process(null, request);
FriendsOfFriendsProcessorResponse response =
FriendsOfFriendsProcessorResponse.parseFrom(protoResult.getRowProcessorResult());
Set<String> result = new HashSet<String>();
result.addAll(response.getResultList());
Set<String> expected =
new HashSet<String>(Arrays.asList(new String[]{"d", "e", "f", "g"}));
Get get = new Get(ROW);
LOG.debug("row keyvalues:" + stringifyKvs(table.get(get).listCells()));
assertEquals(expected, result);
}
示例3: testTimeout
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; //导入依赖的package包/类
@Test
public void testTimeout() throws Throwable {
prepareTestData();
CoprocessorRpcChannel channel = table.coprocessorService(ROW);
RowProcessorEndpoint.TimeoutProcessor processor =
new RowProcessorEndpoint.TimeoutProcessor(ROW);
RowProcessorService.BlockingInterface service =
RowProcessorService.newBlockingStub(channel);
ProcessRequest request = RowProcessorClient.getRowProcessorPB(processor);
boolean exceptionCaught = false;
try {
service.process(null, request);
} catch (Exception e) {
exceptionCaught = true;
}
assertTrue(exceptionCaught);
}
示例4: testCoprocessorError
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; //导入依赖的package包/类
@Test
public void testCoprocessorError() throws Exception {
Configuration configuration = new Configuration(util.getConfiguration());
// Make it not retry forever
configuration.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
Table table = new HTable(configuration, TEST_TABLE);
try {
CoprocessorRpcChannel protocol = table.coprocessorService(ROWS[0]);
TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface service =
TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(protocol);
service.error(null, TestProtos.EmptyRequestProto.getDefaultInstance());
fail("Should have thrown an exception");
} catch (ServiceException e) {
} finally {
table.close();
}
}
示例5: obtainToken
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; //导入依赖的package包/类
/**
* Obtain and return an authentication token for the current user.
* @param conn The HBase cluster connection
* @return the authentication token instance
*/
public static Token<AuthenticationTokenIdentifier> obtainToken(
Connection conn) throws IOException {
Table meta = null;
try {
meta = conn.getTable(TableName.META_TABLE_NAME);
CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW);
AuthenticationProtos.AuthenticationService.BlockingInterface service =
AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
AuthenticationProtos.GetAuthenticationTokenResponse response = service.getAuthenticationToken(null,
AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance());
return ProtobufUtil.toToken(response.getToken());
} catch (ServiceException se) {
ProtobufUtil.toIOException(se);
} finally {
if (meta != null) {
meta.close();
}
}
// dummy return for ServiceException block
return null;
}
示例6: cleanupBulkLoad
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; //导入依赖的package包/类
public void cleanupBulkLoad(final String bulkToken) throws IOException {
try {
CoprocessorRpcChannel channel = table.coprocessorService(HConstants.EMPTY_START_ROW);
SecureBulkLoadProtos.SecureBulkLoadService instance =
ProtobufUtil.newServiceStub(SecureBulkLoadProtos.SecureBulkLoadService.class, channel);
ServerRpcController controller = new ServerRpcController();
BlockingRpcCallback<SecureBulkLoadProtos.CleanupBulkLoadResponse> rpcCallback =
new BlockingRpcCallback<SecureBulkLoadProtos.CleanupBulkLoadResponse>();
SecureBulkLoadProtos.CleanupBulkLoadRequest request =
SecureBulkLoadProtos.CleanupBulkLoadRequest.newBuilder()
.setBulkToken(bulkToken).build();
instance.cleanupBulkLoad(controller,
request,
rpcCallback);
if (controller.failedOnException()) {
throw controller.getFailedOn();
}
} catch (Throwable throwable) {
throw new IOException(throwable);
}
}
示例7: obtainToken
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; //导入依赖的package包/类
/**
* Obtain and return an authentication token for the current user.
* @param conf The configuration for connecting to the cluster
* @return the authentication token instance
*/
public static Token<AuthenticationTokenIdentifier> obtainToken(
Configuration conf) throws IOException {
HTable meta = null;
try {
meta = new HTable(conf, TableName.META_TABLE_NAME);
CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW);
AuthenticationProtos.AuthenticationService.BlockingInterface service =
AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
AuthenticationProtos.GetAuthenticationTokenResponse response = service.getAuthenticationToken(null,
AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance());
return ProtobufUtil.toToken(response.getToken());
} catch (ServiceException se) {
ProtobufUtil.toIOException(se);
} finally {
if (meta != null) {
meta.close();
}
}
// dummy return for ServiceException catch block
return null;
}
示例8: multiMutate
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; //导入依赖的package包/类
/**
* Performs an atomic multi-Mutate operation against the given table.
*/
private static void multiMutate(HTable table, byte[] row, Mutation... mutations) throws IOException {
CoprocessorRpcChannel channel = table.coprocessorService(row);
MutateRowsRequest.Builder mmrBuilder = MutateRowsRequest.newBuilder();
for (Mutation mutation : mutations) {
if (mutation instanceof Put) {
mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(MutationType.PUT, mutation));
} else if (mutation instanceof Delete) {
mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(MutationType.DELETE, mutation));
} else {
throw new DoNotRetryIOException("multi in MetaEditor doesn't support "
+ mutation.getClass().getName());
}
}
MultiRowMutationService.BlockingInterface service =
MultiRowMutationService.newBlockingStub(channel);
try {
service.mutateRows(null, mmrBuilder.build());
} catch (ServiceException ex) {
ProtobufUtil.toIOException(ex);
}
}
示例9: testCoprocessorError
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; //导入依赖的package包/类
@Test
public void testCoprocessorError() throws Exception {
Configuration configuration = new Configuration(util.getConfiguration());
// Make it not retry forever
configuration.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
HTable table = new HTable(configuration, TEST_TABLE);
try {
CoprocessorRpcChannel protocol = table.coprocessorService(ROWS[0]);
TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface service =
TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(protocol);
service.error(null, TestProtos.EmptyRequestProto.getDefaultInstance());
fail("Should have thrown an exception");
} catch (ServiceException e) {
} finally {
table.close();
}
}
示例10: testDoubleScan
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; //导入依赖的package包/类
@Test
public void testDoubleScan() throws Throwable {
prepareTestData();
CoprocessorRpcChannel channel = table.coprocessorService(ROW);
RowProcessorEndpoint.FriendsOfFriendsProcessor processor =
new RowProcessorEndpoint.FriendsOfFriendsProcessor(ROW, A);
RowProcessorService.BlockingInterface service =
RowProcessorService.newBlockingStub(channel);
ProcessRequest request = RowProcessorClient.getRowProcessorPB(processor);
ProcessResponse protoResult = service.process(null, request);
FriendsOfFriendsProcessorResponse response =
FriendsOfFriendsProcessorResponse.parseFrom(protoResult.getRowProcessorResult());
Set<String> result = new HashSet<>();
result.addAll(response.getResultList());
Set<String> expected = new HashSet<>(Arrays.asList(new String[]{"d", "e", "f", "g"}));
Get get = new Get(ROW);
LOG.debug("row keyvalues:" + stringifyKvs(table.get(get).listCells()));
assertEquals(expected, result);
}
示例11: testCoprocessorError
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; //导入依赖的package包/类
@Test
public void testCoprocessorError() throws Exception {
Configuration configuration = new Configuration(util.getConfiguration());
// Make it not retry forever
configuration.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
Table table = util.getConnection().getTable(TEST_TABLE);
try {
CoprocessorRpcChannel protocol = table.coprocessorService(ROWS[0]);
TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface service =
TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(protocol);
service.error(null, TestProtos.EmptyRequestProto.getDefaultInstance());
fail("Should have thrown an exception");
} catch (ServiceException e) {
} finally {
table.close();
}
}
示例12: cleanupBulkLoad
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; //导入依赖的package包/类
public void cleanupBulkLoad(final String bulkToken) throws IOException {
try {
CoprocessorRpcChannel channel = table.coprocessorService(HConstants.EMPTY_START_ROW);
SecureBulkLoadProtos.SecureBulkLoadService instance =
ProtobufUtil.newServiceStub(SecureBulkLoadProtos.SecureBulkLoadService.class, channel);
ServerRpcController controller = new ServerRpcController();
CoprocessorRpcUtils.BlockingRpcCallback<CleanupBulkLoadResponse> rpcCallback =
new CoprocessorRpcUtils.BlockingRpcCallback<>();
CleanupBulkLoadRequest request =
CleanupBulkLoadRequest.newBuilder()
.setBulkToken(bulkToken).build();
instance.cleanupBulkLoad(controller,
request,
rpcCallback);
if (controller.failedOnException()) {
throw controller.getFailedOn();
}
} catch (Throwable throwable) {
throw new IOException(throwable);
}
}
示例13: obtainToken
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; //导入依赖的package包/类
/**
* Obtain and return an authentication token for the current user.
* @param conn The HBase cluster connection
* @throws IOException if a remote error or serialization problem occurs.
* @return the authentication token instance
*/
public static Token<AuthenticationTokenIdentifier> obtainToken(
Connection conn) throws IOException {
Table meta = null;
try {
meta = conn.getTable(TableName.META_TABLE_NAME);
CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW);
AuthenticationProtos.AuthenticationService.BlockingInterface service =
AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
AuthenticationProtos.GetAuthenticationTokenResponse response = service.getAuthenticationToken(null,
AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance());
return toToken(response.getToken());
} catch (ServiceException se) {
throw ProtobufUtil.handleRemoteException(se);
} finally {
if (meta != null) {
meta.close();
}
}
}
示例14: test
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; //导入依赖的package包/类
@Test
public void test() throws Exception {
try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
Table table = conn.getTable(TableName.META_TABLE_NAME)) {
CoprocessorRpcChannel rpcChannel = table.coprocessorService(HConstants.EMPTY_START_ROW);
AuthenticationProtos.AuthenticationService.BlockingInterface service =
AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
WhoAmIResponse response = service.whoAmI(null, WhoAmIRequest.getDefaultInstance());
assertEquals(USERNAME, response.getUsername());
assertEquals(AuthenticationMethod.TOKEN.name(), response.getAuthMethod());
try {
service.getAuthenticationToken(null, GetAuthenticationTokenRequest.getDefaultInstance());
} catch (ServiceException e) {
IOException ioe = ProtobufUtil.getRemoteException(e);
assertThat(ioe, instanceOf(AccessDeniedException.class));
assertThat(ioe.getMessage(),
containsString("Token generation only allowed for Kerberos authenticated clients"));
}
}
}
示例15: obtainToken
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; //导入依赖的package包/类
/**
* Obtain and return an authentication token for the current user.
* @param conf The configuration for connecting to the cluster
* @return the authentication token instance
*/
public static Token<AuthenticationTokenIdentifier> obtainToken(
Configuration conf) throws IOException {
HTable meta = null;
try {
meta = new HTable(conf, ".META.");
CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW);
AuthenticationProtos.AuthenticationService.BlockingInterface service =
AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
AuthenticationProtos.TokenResponse response = service.getAuthenticationToken(null,
AuthenticationProtos.TokenRequest.getDefaultInstance());
return ProtobufUtil.toToken(response.getToken());
} catch (ServiceException se) {
ProtobufUtil.toIOException(se);
} finally {
if (meta != null) {
meta.close();
}
}
// dummy return for ServiceException catch block
return null;
}