當前位置: 首頁>>代碼示例>>Java>>正文


Java NetUtils.getConnectAddress方法代碼示例

本文整理匯總了Java中org.apache.hadoop.net.NetUtils.getConnectAddress方法的典型用法代碼示例。如果您正苦於以下問題:Java NetUtils.getConnectAddress方法的具體用法?Java NetUtils.getConnectAddress怎麽用?Java NetUtils.getConnectAddress使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.net.NetUtils的用法示例。


在下文中一共展示了NetUtils.getConnectAddress方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: registerNodeManager

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
@Override
public RegisterNodeManagerResponse registerNodeManager(
    RegisterNodeManagerRequest request) throws YarnException,
    IOException {
  NodeId nodeId = request.getNodeId();
  Resource resource = request.getResource();
  LOG.info("Registering " + nodeId.toString());
  // NOTE: this really should be checking against the config value
  InetSocketAddress expected = NetUtils.getConnectAddress(
      conf.getSocketAddr(YarnConfiguration.NM_ADDRESS, null, -1));
  Assert.assertEquals(NetUtils.getHostPortString(expected), nodeId.toString());
  Assert.assertEquals(5 * 1024, resource.getMemory());
  registeredNodes.add(nodeId);

  RegisterNodeManagerResponse response = recordFactory
      .newRecordInstance(RegisterNodeManagerResponse.class);
  response.setContainerTokenMasterKey(createMasterKey());
  response.setNMTokenMasterKey(createMasterKey());
  return response;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:TestNodeStatusUpdater.java

示例2: testVersion2ClientVersion1Server

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
@Test // Compatible new client & old server
public void testVersion2ClientVersion1Server() throws Exception {
  // create a server with two handlers
  TestImpl1 impl = new TestImpl1();
  server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
      .setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
      .setVerbose(false).build();
  server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
  server.start();
  addr = NetUtils.getConnectAddress(server);


  Version2Client client = new Version2Client();
  client.ping();
  assertEquals("hello", client.echo("hello"));
  
  // echo(int) is not supported by server, so returning 3
  // This verifies that echo(int) and echo(String)'s hash codes are different
  assertEquals(3, client.echo(3));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:TestRPCCompatibility.java

示例3: startAndGetRPCServerAddress

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
private InetSocketAddress startAndGetRPCServerAddress(InetSocketAddress serverAddress) {
  Configuration conf = new Configuration();

  try {
    RPC.setProtocolEngine(conf,
        HAServiceProtocolPB.class, ProtobufRpcEngine.class);
    HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator =
        new HAServiceProtocolServerSideTranslatorPB(new MockHAProtocolImpl());
    BlockingService haPbService = HAServiceProtocolService
        .newReflectiveBlockingService(haServiceProtocolXlator);

    Server server = new RPC.Builder(conf)
        .setProtocol(HAServiceProtocolPB.class)
        .setInstance(haPbService)
        .setBindAddress(serverAddress.getHostName())
        .setPort(serverAddress.getPort()).build();
    server.start();
    return NetUtils.getConnectAddress(server);
  } catch (IOException e) {
    return null;
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:23,代碼來源:DummyHAService.java

示例4: testIpcWithServiceClass

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
/**
 * Check service class byte in IPC header is correct on wire.
 */
@Test(timeout=60000)
public void testIpcWithServiceClass() throws IOException {
  // start server
  Server server = new TestServer(5, false);
  InetSocketAddress addr = NetUtils.getConnectAddress(server);
  server.start();

  // start client
  Client.setConnectTimeout(conf, 10000);

  callAndVerify(server, addr, 0, true);
  // Service Class is low to -128 as byte on wire.
  // -128 shouldn't be casted on wire but -129 should.
  callAndVerify(server, addr, -128, true);
  callAndVerify(server, addr, -129, false);

  // Service Class is up to 127.
  // 127 shouldn't be casted on wire but 128 should.
  callAndVerify(server, addr, 127, true);
  callAndVerify(server, addr, 128, false);

  server.stop();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:27,代碼來源:TestIPC.java

示例5: testVersion0ClientVersion1Server

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
@Test  // old client vs new server
public void testVersion0ClientVersion1Server() throws Exception {
  // create a server with two handlers
  TestImpl1 impl = new TestImpl1();
  server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
      .setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
      .setVerbose(false).build();
  server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
  server.start();
  addr = NetUtils.getConnectAddress(server);

  proxy = RPC.getProtocolProxy(
      TestProtocol0.class, TestProtocol0.versionID, addr, conf);

  TestProtocol0 proxy0 = (TestProtocol0)proxy.getProxy();
  proxy0.ping();
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:18,代碼來源:TestRPCCompatibility.java

示例6: testVersion2ClientVersion2Server

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
@Test // equal version client and server
public void testVersion2ClientVersion2Server() throws Exception {
  // create a server with two handlers
  TestImpl2 impl = new TestImpl2();
  server = new RPC.Builder(conf).setProtocol(TestProtocol2.class)
      .setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
      .setVerbose(false).build();
  server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
  server.start();
  addr = NetUtils.getConnectAddress(server);

  Version2Client client = new Version2Client();

  client.ping();
  assertEquals("hello", client.echo("hello"));
  
  // now that echo(int) is supported by the server, echo(int) should return -3
  assertEquals(-3, client.echo(3));
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:20,代碼來源:TestRPCCompatibility.java

示例7: checkServerResponder

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
public void checkServerResponder(final int handlerCount, 
                                final boolean handlerSleep, 
                                final int clientCount,
                                final int callerCount,
                                final int callCount) throws IOException,
                                InterruptedException {
  Server server = new TestServer(handlerCount, handlerSleep);
  server.start();

  InetSocketAddress address = NetUtils.getConnectAddress(server);
  Client[] clients = new Client[clientCount];
  for (int i = 0; i < clientCount; i++) {
    clients[i] = new Client(BytesWritable.class, conf);
  }

  Caller[] callers = new Caller[callerCount];
  for (int i = 0; i < callerCount; i++) {
    callers[i] = new Caller(clients[i % clientCount], address, callCount);
    callers[i].start();
  }
  for (int i = 0; i < callerCount; i++) {
    callers[i].join();
    assertFalse(callers[i].failed);
  }
  for (int i = 0; i < clientCount; i++) {
    clients[i].stop();
  }
  server.stop();
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:30,代碼來源:TestIPCServerResponder.java

示例8: testCallRetryCount

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
/**
 * Test if the rpc server gets the retry count from client.
 */
@Test(timeout=60000)
public void testCallRetryCount() throws IOException {
  final int retryCount = 255;
  // Override client to store the call id
  final Client client = new Client(LongWritable.class, conf);
  Client.setCallIdAndRetryCount(Client.nextCallId(), 255);

  // Attach a listener that tracks every call ID received by the server.
  final TestServer server = new TestServer(1, false);
  server.callListener = new Runnable() {
    @Override
    public void run() {
      // we have not set the retry count for the client, thus on the server
      // side we should see retry count as 0
      Assert.assertEquals(retryCount, Server.getCallRetryCount());
    }
  };

  try {
    InetSocketAddress addr = NetUtils.getConnectAddress(server);
    server.start();
    final SerialCaller caller = new SerialCaller(client, addr, 10);
    caller.run();
    assertFalse(caller.failed);
  } finally {
    client.stop();
    server.stop();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:33,代碼來源:TestIPC.java

示例9: doDigestRpc

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
private void doDigestRpc(Server server, TestTokenSecretManager sm
                         ) throws Exception {
  server.start();

  final UserGroupInformation current = UserGroupInformation.getCurrentUser();
  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
      .getUserName()));
  Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
      sm);
  SecurityUtil.setTokenService(token, addr);
  current.addToken(token);

  TestSaslProtocol proxy = null;
  try {
    proxy = RPC.getProxy(TestSaslProtocol.class,
        TestSaslProtocol.versionID, addr, conf);
    AuthMethod authMethod = proxy.getAuthMethod();
    assertEquals(TOKEN, authMethod);
    //QOP must be auth
    assertEquals(expectedQop.saslQop,
                 RPC.getConnectionIdForProxy(proxy).getSaslQop());            
    proxy.ping();
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:31,代碼來源:TestSaslRPC.java

示例10: testInitialCallRetryCount

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
/**
 * Test if the rpc server gets the default retry count (0) from client.
 */
@Test(timeout=60000)
public void testInitialCallRetryCount() throws IOException {
  // Override client to store the call id
  final Client client = new Client(LongWritable.class, conf);

  // Attach a listener that tracks every call ID received by the server.
  final TestServer server = new TestServer(1, false);
  server.callListener = new Runnable() {
    @Override
    public void run() {
      // we have not set the retry count for the client, thus on the server
      // side we should see retry count as 0
      Assert.assertEquals(0, Server.getCallRetryCount());
    }
  };

  try {
    InetSocketAddress addr = NetUtils.getConnectAddress(server);
    server.start();
    final SerialCaller caller = new SerialCaller(client, addr, 10);
    caller.run();
    assertFalse(caller.failed);
  } finally {
    client.stop();
    server.stop();
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:31,代碼來源:TestIPC.java

示例11: testCallIdAndRetry

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
/**
 * Test if
 * (1) the rpc server uses the call id/retry provided by the rpc client, and
 * (2) the rpc client receives the same call id/retry from the rpc server.
 */
@Test(timeout=60000)
public void testCallIdAndRetry() throws IOException {
  final CallInfo info = new CallInfo();

  // Override client to store the call info and check response
  final Client client = new Client(LongWritable.class, conf) {
    @Override
    Call createCall(RpcKind rpcKind, Writable rpcRequest) {
      final Call call = super.createCall(rpcKind, rpcRequest);
      info.id = call.id;
      info.retry = call.retry;
      return call;
    }
    
    @Override
    void checkResponse(RpcResponseHeaderProto header) throws IOException {
      super.checkResponse(header);
      Assert.assertEquals(info.id, header.getCallId());
      Assert.assertEquals(info.retry, header.getRetryCount());
    }
  };

  // Attach a listener that tracks every call received by the server.
  final TestServer server = new TestServer(1, false);
  server.callListener = new Runnable() {
    @Override
    public void run() {
      Assert.assertEquals(info.id, Server.getCallId());
      Assert.assertEquals(info.retry, Server.getCallRetryCount());
    }
  };

  try {
    InetSocketAddress addr = NetUtils.getConnectAddress(server);
    server.start();
    final SerialCaller caller = new SerialCaller(client, addr, 10);
    caller.run();
    assertFalse(caller.failed);
  } finally {
    client.stop();
    server.stop();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:49,代碼來源:TestIPC.java

示例12: testSaslResponseOrdering

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
@Test(timeout=10000)
public void testSaslResponseOrdering() throws Exception {
  SecurityUtil.setAuthenticationMethod(
      AuthenticationMethod.TOKEN, conf);
  UserGroupInformation.setConfiguration(conf);

  TestTokenSecretManager sm = new TestTokenSecretManager();
  Server server = setupTestServer(conf, 1, sm);
  try {
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    final UserGroupInformation clientUgi =
        UserGroupInformation.createRemoteUser("client");
    clientUgi.setAuthenticationMethod(AuthenticationMethod.TOKEN);

    TestTokenIdentifier tokenId = new TestTokenIdentifier(
        new Text(clientUgi.getUserName()));
    Token<?> token = new Token<>(tokenId, sm);
    SecurityUtil.setTokenService(token, addr);
    clientUgi.addToken(token);
    clientUgi.doAs(new PrivilegedExceptionAction<Void>() {
      @Override
      public Void run() throws Exception {
        final TestRpcService proxy = getClient(addr, conf);
        final ExecutorService executor = Executors.newCachedThreadPool();
        final AtomicInteger count = new AtomicInteger();
        try {
          // queue up a bunch of futures for postponed calls serviced
          // in a random order.
          Future<?>[] futures = new Future<?>[10];
          for (int i=0; i < futures.length; i++) {
            futures[i] = executor.submit(new Callable<Void>(){
              @Override
              public Void call() throws Exception {
                String expect = "future"+count.getAndIncrement();
                String answer = convert(proxy.echoPostponed(null,
                    newEchoRequest(expect)));
                assertEquals(expect, answer);
                return null;
              }
            });
            try {
              // ensures the call is initiated and the response is blocked.
              futures[i].get(100, TimeUnit.MILLISECONDS);
            } catch (TimeoutException te) {
              continue; // expected.
            }
            Assert.fail("future"+i+" did not block");
          }
          // triggers responses to be unblocked in a random order.  having
          // only 1 handler ensures that the prior calls are already
          // postponed.  1 handler also ensures that this call will
          // timeout if the postponing doesn't work (ie. free up handler)
          proxy.sendPostponed(null, newEmptyRequest());
          for (int i=0; i < futures.length; i++) {
            LOG.info("waiting for future"+i);
            futures[i].get();
          }
        } finally {
          RPC.stopProxy(proxy);
          executor.shutdownNow();
        }
        return null;
      }
    });
  } finally {
    server.stop();
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:69,代碼來源:TestSaslRPC.java

示例13: testProtocolVersionMismatch

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
@Test
public void testProtocolVersionMismatch() throws IOException, ServiceException {
  conf = new Configuration();
  conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, 1024);
  // Set RPC engine to protobuf RPC engine
  RPC.setProtocolEngine(conf, NewRpcService.class, ProtobufRpcEngine.class);

  // Create server side implementation
  NewServerImpl serverImpl = new NewServerImpl();
  BlockingService service = NewProtobufRpcProto
      .newReflectiveBlockingService(serverImpl);
  // Get RPC server for server side implementation
  server = new RPC.Builder(conf).setProtocol(NewRpcService.class)
      .setInstance(service).setBindAddress(ADDRESS).setPort(PORT).build();
  addr = NetUtils.getConnectAddress(server);

  server.start();

  RPC.setProtocolEngine(conf, OldRpcService.class, ProtobufRpcEngine.class);

  OldRpcService proxy = RPC.getProxy(OldRpcService.class, 0, addr, conf);
  // Verify that exception is thrown if protocolVersion is mismatch between
  // client and server.
  EmptyRequestProto emptyRequest = EmptyRequestProto.newBuilder().build();
  try {
    proxy.ping(null, emptyRequest);
    fail("Expected an exception to occur as version mismatch.");
  } catch (Exception e) {
    if (! (e.getMessage().contains("version mismatch"))){
      // Exception type is not what we expected, re-throw it.
      throw new IOException(e);
    }
  }

  // Verify that missing of optional field is still compatible in RPC call.
  RPC.setProtocolEngine(conf, NewerRpcService.class, ProtobufRpcEngine.class);
  NewerRpcService newProxy = RPC.getProxy(NewerRpcService.class, 0, addr,
      conf);
  newProxy.echo(null, emptyRequest);

}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:42,代碼來源:TestProtoBufRPCCompatibility.java

示例14: testRealUserIPNotSpecified

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
@Test
public void testRealUserIPNotSpecified() throws IOException {
  final Configuration conf = new Configuration();
  conf.setStrings(DefaultImpersonationProvider.getTestProvider().
      getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
  Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
      .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
      .setNumHandlers(2).setVerbose(false).build();

  refreshConf(conf);

  try {
    server.start();

    final InetSocketAddress addr = NetUtils.getConnectAddress(server);

    UserGroupInformation realUserUgi = UserGroupInformation
        .createRemoteUser(REAL_USER_NAME);

    UserGroupInformation proxyUserUgi = UserGroupInformation
        .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
    String retVal = proxyUserUgi
        .doAs(new PrivilegedExceptionAction<String>() {
          @Override
          public String run() throws IOException {
            proxy = RPC.getProxy(TestProtocol.class,
                TestProtocol.versionID, addr, conf);
            String ret = proxy.aMethod();
            return ret;
          }
        });

    Assert.fail("The RPC must have failed " + retVal);
  } catch (Exception e) {
    e.printStackTrace();
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:43,代碼來源:TestDoAsEffectiveUser.java

示例15: testUniqueSequentialCallIds

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
/**
  * Tests that client generates a unique sequential call ID for each RPC call,
  * even if multiple threads are using the same client.
* @throws InterruptedException 
  */
 @Test(timeout=60000)
 public void testUniqueSequentialCallIds() 
     throws IOException, InterruptedException {
   int serverThreads = 10, callerCount = 100, perCallerCallCount = 100;
   TestServer server = new TestServer(serverThreads, false);

   // Attach a listener that tracks every call ID received by the server.  This
   // list must be synchronized, because multiple server threads will add to it.
   final List<Integer> callIds = Collections.synchronizedList(
     new ArrayList<Integer>());
   server.callListener = new Runnable() {
     @Override
     public void run() {
       callIds.add(Server.getCallId());
     }
   };

   Client client = new Client(LongWritable.class, conf);

   try {
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
     server.start();
     SerialCaller[] callers = new SerialCaller[callerCount];
     for (int i = 0; i < callerCount; ++i) {
       callers[i] = new SerialCaller(client, addr, perCallerCallCount);
       callers[i].start();
     }
     for (int i = 0; i < callerCount; ++i) {
       callers[i].join();
       assertFalse(callers[i].failed);
     }
   } finally {
     client.stop();
     server.stop();
   }

   int expectedCallCount = callerCount * perCallerCallCount;
   assertEquals(expectedCallCount, callIds.size());

   // It is not guaranteed that the server executes requests in sequential order
   // of client call ID, so we must sort the call IDs before checking that it
   // contains every expected value.
   Collections.sort(callIds);
   final int startID = callIds.get(0).intValue();
   for (int i = 0; i < expectedCallCount; ++i) {
     assertEquals(startID + i, callIds.get(i).intValue());
   }
 }
 
開發者ID:naver,項目名稱:hadoop,代碼行數:54,代碼來源:TestIPC.java


注:本文中的org.apache.hadoop.net.NetUtils.getConnectAddress方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。