当前位置: 首页>>代码示例>>Java>>正文


Java RpcClient类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.ipc.RpcClient的典型用法代码示例。如果您正苦于以下问题:Java RpcClient类的具体用法?Java RpcClient怎么用?Java RpcClient使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


RpcClient类属于org.apache.hadoop.hbase.ipc包,在下文中一共展示了RpcClient类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testTokenAuth

import org.apache.hadoop.hbase.ipc.RpcClient; //导入依赖的package包/类
private void testTokenAuth(Class<? extends RpcClient> rpcImplClass) throws IOException,
    ServiceException {
  TEST_UTIL.getConfiguration().set(RpcClientFactory.CUSTOM_RPC_CLIENT_IMPL_CONF_KEY,
    rpcImplClass.getName());
  try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
      Table table = conn.getTable(TableName.META_TABLE_NAME)) {
    CoprocessorRpcChannel rpcChannel = table.coprocessorService(HConstants.EMPTY_START_ROW);
    AuthenticationProtos.AuthenticationService.BlockingInterface service =
        AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
    WhoAmIResponse response = service.whoAmI(null, WhoAmIRequest.getDefaultInstance());
    assertEquals(USERNAME, response.getUsername());
    assertEquals(AuthenticationMethod.TOKEN.name(), response.getAuthMethod());
    try {
      service.getAuthenticationToken(null, GetAuthenticationTokenRequest.getDefaultInstance());
    } catch (ServiceException e) {
      AccessDeniedException exc = (AccessDeniedException) ProtobufUtil.getRemoteException(e);
      assertTrue(exc.getMessage().contains(
        "Token generation only allowed for Kerberos authenticated clients"));
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestGenerateDelegationToken.java

示例2: testRpcCallWithEnabledKerberosSaslAuth

import org.apache.hadoop.hbase.ipc.RpcClient; //导入依赖的package包/类
private void testRpcCallWithEnabledKerberosSaslAuth(Class<? extends RpcClient> rpcImplClass)
    throws Exception {
  String krbKeytab = getKeytabFileForTesting();
  String krbPrincipal = getPrincipalForTesting();

  UserGroupInformation ugi = loginKerberosPrincipal(krbKeytab, krbPrincipal);
  UserGroupInformation ugi2 = UserGroupInformation.getCurrentUser();

  // check that the login user is okay:
  assertSame(ugi, ugi2);
  assertEquals(AuthenticationMethod.KERBEROS, ugi.getAuthenticationMethod());
  assertEquals(krbPrincipal, ugi.getUserName());

  Configuration clientConf = getSecuredConfiguration();
  callRpcService(rpcImplClass, User.create(ugi2), clientConf, false);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestSecureRPC.java

示例3: testRpcFallbackToSimpleAuth

import org.apache.hadoop.hbase.ipc.RpcClient; //导入依赖的package包/类
public void testRpcFallbackToSimpleAuth(Class<? extends RpcClient> rpcImplClass) throws Exception {
  String krbKeytab = getKeytabFileForTesting();
  String krbPrincipal = getPrincipalForTesting();

  UserGroupInformation ugi = loginKerberosPrincipal(krbKeytab, krbPrincipal);
  assertEquals(AuthenticationMethod.KERBEROS, ugi.getAuthenticationMethod());
  assertEquals(krbPrincipal, ugi.getUserName());

  String clientUsername = "testuser";
  UserGroupInformation clientUgi = UserGroupInformation.createUserForTesting(clientUsername,
      new String[]{clientUsername});

  // check that the client user is insecure
  assertNotSame(ugi, clientUgi);
  assertEquals(AuthenticationMethod.SIMPLE, clientUgi.getAuthenticationMethod());
  assertEquals(clientUsername, clientUgi.getUserName());

  Configuration clientConf = new Configuration();
  clientConf.set(User.HBASE_SECURITY_CONF_KEY, "simple");
  callRpcService(rpcImplClass, User.create(clientUgi), clientConf, true);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestSecureRPC.java

示例4: setUp

import org.apache.hadoop.hbase.ipc.RpcClient; //导入依赖的package包/类
@Override
public void setUp() throws Exception {
  super.setUp();
  Configuration conf = util.getConfiguration();

  // sanity check cluster
  // TODO: this should reach out to master and verify online state instead
  assertEquals("Master must be configured with StochasticLoadBalancer",
    "org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer",
    conf.get("hbase.master.loadbalancer.class"));
  // TODO: this should reach out to master and verify online state instead
  assertTrue("hbase.regionserver.storefile.refresh.period must be greater than zero.",
    conf.getLong("hbase.regionserver.storefile.refresh.period", 0) > 0);

  // enable client-side settings
  conf.setBoolean(RpcClient.SPECIFIC_WRITE_THREAD, true);
  // TODO: expose these settings to CLI override
  conf.setLong("hbase.client.primaryCallTimeout.get", primaryTimeout);
  conf.setLong("hbase.client.primaryCallTimeout.multiget", primaryTimeout);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:IntegrationTestRegionReplicaPerf.java

示例5: testTokenAuthentication

import org.apache.hadoop.hbase.ipc.RpcClient; //导入依赖的package包/类
@Test
public void testTokenAuthentication() throws Exception {
  UserGroupInformation testuser =
      UserGroupInformation.createUserForTesting("testuser", new String[]{"testgroup"});

  testuser.setAuthenticationMethod(
      UserGroupInformation.AuthenticationMethod.TOKEN);
  final Configuration conf = TEST_UTIL.getConfiguration();
  UserGroupInformation.setConfiguration(conf);
  Token<AuthenticationTokenIdentifier> token =
      secretManager.generateToken("testuser");
  LOG.debug("Got token: " + token.toString());
  testuser.addToken(token);

  // verify the server authenticates us as this token user
  testuser.doAs(new PrivilegedExceptionAction<Object>() {
    public Object run() throws Exception {
      Configuration c = server.getConfiguration();
      RpcClient rpcClient = RpcClientFactory.createClient(c, clusterId.toString());
      ServerName sn =
          ServerName.valueOf(server.getAddress().getHostName(), server.getAddress().getPort(),
              System.currentTimeMillis());
      try {
        BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn,
            User.getCurrent(), HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
        AuthenticationProtos.AuthenticationService.BlockingInterface stub =
            AuthenticationProtos.AuthenticationService.newBlockingStub(channel);
        AuthenticationProtos.WhoAmIResponse response =
            stub.whoAmI(null, AuthenticationProtos.WhoAmIRequest.getDefaultInstance());
        String myname = response.getUsername();
        assertEquals("testuser", myname);
        String authMethod = response.getAuthMethod();
        assertEquals("TOKEN", authMethod);
      } finally {
        rpcClient.close();
      }
      return null;
    }
  });
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:41,代码来源:TestTokenAuthentication.java

示例6: initializeThreads

import org.apache.hadoop.hbase.ipc.RpcClient; //导入依赖的package包/类
private void initializeThreads() throws IOException {
  // Cache flushing thread.
  this.cacheFlusher = new MemStoreFlusher(conf, this);

  // Compaction thread
  this.compactSplitThread = new CompactSplitThread(this);

  // Background thread to check for compactions; needed if region has not gotten updates
  // in a while. It will take care of not checking too frequently on store-by-store basis.
  this.compactionChecker = new CompactionChecker(this, this.threadWakeFrequency, this);
  this.periodicFlusher = new PeriodicMemstoreFlusher(this.threadWakeFrequency, this);
  // Health checker thread.
  int sleepTime = this.conf.getInt(HConstants.HEALTH_CHORE_WAKE_FREQ,
    HConstants.DEFAULT_THREAD_WAKE_FREQUENCY);
  if (isHealthCheckerConfigured()) {
    healthCheckChore = new HealthCheckChore(sleepTime, this, getConfiguration());
  }

  this.leases = new Leases(this.threadWakeFrequency);

  // Create the thread to clean the moved regions list
  movedRegionsCleaner = MovedRegionsCleaner.createAndStart(this);

  if (this.nonceManager != null) {
    // Create the chore that cleans up nonces.
    nonceManagerChore = this.nonceManager.createCleanupChore(this);
  }

  // Setup RPC client for master communication
  rpcClient = new RpcClient(conf, clusterId, new InetSocketAddress(
      this.isa.getAddress(), 0));
  this.pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:35,代码来源:HRegionServer.java

示例7: testTokenAuthentication

import org.apache.hadoop.hbase.ipc.RpcClient; //导入依赖的package包/类
@Test
public void testTokenAuthentication() throws Exception {
  UserGroupInformation testuser =
      UserGroupInformation.createUserForTesting("testuser", new String[]{"testgroup"});

  testuser.setAuthenticationMethod(
      UserGroupInformation.AuthenticationMethod.TOKEN);
  final Configuration conf = TEST_UTIL.getConfiguration();
  UserGroupInformation.setConfiguration(conf);
  Token<AuthenticationTokenIdentifier> token =
      secretManager.generateToken("testuser");
  LOG.debug("Got token: " + token.toString());
  testuser.addToken(token);

  // verify the server authenticates us as this token user
  testuser.doAs(new PrivilegedExceptionAction<Object>() {
    public Object run() throws Exception {
      Configuration c = server.getConfiguration();
      RpcClient rpcClient = new RpcClient(c, clusterId.toString());
      ServerName sn =
          ServerName.valueOf(server.getAddress().getHostName(), server.getAddress().getPort(),
              System.currentTimeMillis());
      try {
        BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn,
          User.getCurrent(), HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
        AuthenticationProtos.AuthenticationService.BlockingInterface stub =
          AuthenticationProtos.AuthenticationService.newBlockingStub(channel);
        AuthenticationProtos.WhoAmIResponse response =
          stub.whoAmI(null, AuthenticationProtos.WhoAmIRequest.getDefaultInstance());
        String myname = response.getUsername();
        assertEquals("testuser", myname);
        String authMethod = response.getAuthMethod();
        assertEquals("TOKEN", authMethod);
      } finally {
        rpcClient.stop();
      }
      return null;
    }
  });
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:41,代码来源:TestTokenAuthentication.java

示例8: beforeClass

import org.apache.hadoop.hbase.ipc.RpcClient; //导入依赖的package包/类
@BeforeClass public static void beforeClass() throws Exception {
  ((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
  UTIL.startMiniCluster(slaves);
  HTable t = UTIL.createTable(Bytes.toBytes(TEST_TABLE), Bytes.toBytes(FAMILY));
  UTIL.createMultiRegions(t, Bytes.toBytes(FAMILY));
  UTIL.waitTableEnabled(Bytes.toBytes(TEST_TABLE));
  t.close();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:11,代码来源:TestMultiParallel.java

示例9: newRandomTimeoutRpcClient

import org.apache.hadoop.hbase.ipc.RpcClient; //导入依赖的package包/类
private static RpcClient newRandomTimeoutRpcClient() {
  return new RpcClient(
      TEST_UTIL.getConfiguration(), TEST_UTIL.getClusterKey()) {
    // Return my own instance, one that does random timeouts
    @Override
    public BlockingRpcChannel createBlockingRpcChannel(ServerName sn,
        User ticket, int rpcTimeout) {
      return new RandomTimeoutBlockingRpcChannel(this, sn, ticket, rpcTimeout);
    }
  };
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:12,代码来源:TestClientTimeouts.java

示例10: testNoCodec

import org.apache.hadoop.hbase.ipc.RpcClient; //导入依赖的package包/类
@Test
public void testNoCodec() {
  Configuration c = new Configuration();
  c.set("hbase.client.default.rpc.codec", "");
  String codec = RpcClient.getDefaultCodec(c);
  assertTrue(codec == null || codec.length() == 0);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:8,代码来源:TestFromClientSideNoCodec.java

示例11: setUpBeforeClass

import org.apache.hadoop.hbase.ipc.RpcClient; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  ((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
  Configuration conf = TEST_UTIL.getConfiguration();
  // Don't report so often so easier to see other rpcs
  conf.setInt("hbase.regionserver.msginterval", 3 * 10000);
  conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, rpcTimeout);
  conf.setStrings(HConstants.REGION_SERVER_IMPL, RegionServerWithScanTimeout.class.getName());
  conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, CLIENT_RETRIES_NUMBER);
  conf.setInt(HConstants.HBASE_CLIENT_PAUSE, 1000);
  TEST_UTIL.startMiniCluster(1);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:15,代码来源:TestClientScannerRPCTimeout.java

示例12: setUpBeforeClass

import org.apache.hadoop.hbase.ipc.RpcClient; //导入依赖的package包/类
/**
 * @throws java.lang.Exception
 */
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  ((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      MultiRowMutationEndpoint.class.getName());
  // We need more than one region server in this test
  TEST_UTIL.startMiniCluster(SLAVES);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:15,代码来源:TestFromClientSide.java

示例13: setupCluster

import org.apache.hadoop.hbase.ipc.RpcClient; //导入依赖的package包/类
/**
 * Setup the config for the cluster
 * @throws Exception on failure
 */
@BeforeClass
public static void setupCluster() throws Exception {
  ((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
  setupConf(UTIL.getConfiguration());
  UTIL.startMiniCluster(NUM_RS);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:13,代码来源:TestFlushSnapshotFromClient.java

示例14: setUp

import org.apache.hadoop.hbase.ipc.RpcClient; //导入依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
  ((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
  TEST_UTIL.startMiniCluster(1);
  initialize(TEST_UTIL.getConfiguration());
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:9,代码来源:TestFilterWithScanLimits.java

示例15: HConnectionImplementation

import org.apache.hadoop.hbase.ipc.RpcClient; //导入依赖的package包/类
/**
 * constructor
 * @param conf Configuration object
 * @param managed If true, does not do full shutdown on close; i.e. cleanup of connection
 * to zk and shutdown of all services; we just close down the resources this connection was
 * responsible for and decrement usage counters.  It is up to the caller to do the full
 * cleanup.  It is set when we want have connection sharing going on -- reuse of zk connection,
 * and cached region locations, established regionserver connections, etc.  When connections
 * are shared, we have reference counting going on and will only do full cleanup when no more
 * users of an HConnectionImplementation instance.
 */
HConnectionImplementation(Configuration conf, boolean managed,
    ExecutorService pool, User user) throws IOException {
  this(conf);
  this.user = user;
  this.batchPool = pool;
  this.managed = managed;
  this.registry = setupRegistry();
  retrieveClusterId();

  this.rpcClient = new RpcClient(this.conf, this.clusterId);

  // Do we publish the status?
  boolean shouldListen = conf.getBoolean(HConstants.STATUS_PUBLISHED,
      HConstants.STATUS_PUBLISHED_DEFAULT);
  Class<? extends ClusterStatusListener.Listener> listenerClass =
      conf.getClass(ClusterStatusListener.STATUS_LISTENER_CLASS,
          ClusterStatusListener.DEFAULT_STATUS_LISTENER_CLASS,
          ClusterStatusListener.Listener.class);
  if (shouldListen) {
    if (listenerClass == null) {
      LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +
          ClusterStatusListener.STATUS_LISTENER_CLASS + " is not set - not listening status");
    } else {
      clusterStatusListener = new ClusterStatusListener(
          new ClusterStatusListener.DeadServerHandler() {
            @Override
            public void newDead(ServerName sn) {
              clearCaches(sn);
              rpcClient.cancelConnections(sn.getHostname(), sn.getPort(),
                  new SocketException(sn.getServerName() +
                      " is dead: closing its connection."));
            }
          }, conf, listenerClass);
    }
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:48,代码来源:HConnectionManager.java


注:本文中的org.apache.hadoop.hbase.ipc.RpcClient类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。