当前位置: 首页>>代码示例>>Java>>正文


Java Server.start方法代码示例

本文整理汇总了Java中org.apache.hadoop.ipc.Server.start方法的典型用法代码示例。如果您正苦于以下问题:Java Server.start方法的具体用法?Java Server.start怎么用?Java Server.start使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.ipc.Server的用法示例。


在下文中一共展示了Server.start方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: startAndGetRPCServerAddress

import org.apache.hadoop.ipc.Server; //导入方法依赖的package包/类
private InetSocketAddress startAndGetRPCServerAddress(InetSocketAddress serverAddress) {
  Configuration conf = new Configuration();

  try {
    RPC.setProtocolEngine(conf,
        HAServiceProtocolPB.class, ProtobufRpcEngine.class);
    HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator =
        new HAServiceProtocolServerSideTranslatorPB(new MockHAProtocolImpl());
    BlockingService haPbService = HAServiceProtocolService
        .newReflectiveBlockingService(haServiceProtocolXlator);

    Server server = new RPC.Builder(conf)
        .setProtocol(HAServiceProtocolPB.class)
        .setInstance(haPbService)
        .setBindAddress(serverAddress.getHostName())
        .setPort(serverAddress.getPort()).build();
    server.start();
    return NetUtils.getConnectAddress(server);
  } catch (IOException e) {
    return null;
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:23,代码来源:DummyHAService.java

示例2: testPbServerFactory

import org.apache.hadoop.ipc.Server; //导入方法依赖的package包/类
private void testPbServerFactory() {
  InetSocketAddress addr = new InetSocketAddress(0);
  Configuration conf = new Configuration();
  ResourceTracker instance = new ResourceTrackerTestImpl();
  Server server = null;
  try {
    server = 
      RpcServerFactoryPBImpl.get().getServer(
          ResourceTracker.class, instance, addr, conf, null, 1);
    server.start();
  } catch (YarnRuntimeException e) {
    e.printStackTrace();
    Assert.fail("Failed to create server");
  } finally {
    server.stop();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestYSCRPCFactories.java

示例3: testNMAuditLoggerWithIP

import org.apache.hadoop.ipc.Server; //导入方法依赖的package包/类
/**
 * Test {@link NMAuditLogger} with IP set.
 */
@Test  
public void testNMAuditLoggerWithIP() throws Exception {
  Configuration conf = new Configuration();
  // start the IPC server
  Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
      .setInstance(new MyTestRPCServer()).setBindAddress("0.0.0.0")
      .setPort(0).setNumHandlers(5).setVerbose(true).build();

  server.start();

  InetSocketAddress addr = NetUtils.getConnectAddress(server);

  // Make a client connection and test the audit log
  TestProtocol proxy = (TestProtocol)RPC.getProxy(TestProtocol.class,
                         TestProtocol.versionID, addr, conf);
  // Start the testcase
  proxy.ping();

  server.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestNMAuditLogger.java

示例4: testPbServerFactory

import org.apache.hadoop.ipc.Server; //导入方法依赖的package包/类
private void testPbServerFactory() {
  InetSocketAddress addr = new InetSocketAddress(0);
  Configuration conf = new Configuration();
  LocalizationProtocol instance = new LocalizationProtocolTestImpl();
  Server server = null;
  try {
    server = 
      RpcServerFactoryPBImpl.get().getServer(
          LocalizationProtocol.class, instance, addr, conf, null, 1);
    server.start();
  } catch (YarnRuntimeException e) {
    e.printStackTrace();
    Assert.fail("Failed to create server");
  } finally {
    if (server != null) {
      server.stop();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestRPCFactories.java

示例5: testInterDNProtocolTimeout

import org.apache.hadoop.ipc.Server; //导入方法依赖的package包/类
/** Test to verify that InterDatanode RPC timesout as expected when
 *  the server DN does not respond.
 */
@Test(expected=SocketTimeoutException.class)
public void testInterDNProtocolTimeout() throws Throwable {
  final Server server = new TestServer(1, true);
  server.start();

  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
  DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
  InterDatanodeProtocol proxy = null;

  try {
    proxy = DataNode.createInterDataNodeProtocolProxy(
        dInfo, conf, 500, false);
    proxy.initReplicaRecovery(new RecoveringBlock(
        new ExtendedBlock("bpid", 1), null, 100));
    fail ("Expected SocketTimeoutException exception, but did not get.");
  } finally {
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
    server.stop();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestInterDatanodeProtocol.java

示例6: testAuditLoggerWithIP

import org.apache.hadoop.ipc.Server; //导入方法依赖的package包/类
/**
 * Test {@link AuditLogger} with IP set.
 */
public void testAuditLoggerWithIP() throws Exception {
  Configuration conf = new Configuration();
  // start the IPC server
  Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
          .setInstance(new MyTestRPCServer()).setBindAddress("0.0.0.0")
          .setPort(0).build();
  server.start();

  InetSocketAddress addr = NetUtils.getConnectAddress(server);

  // Make a client connection and test the audit log
  TestProtocol proxy = (TestProtocol)RPC.getProxy(TestProtocol.class,
                         TestProtocol.versionID, addr, conf);
  // Start the testcase
  proxy.ping();

  server.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestAuditLogger.java

示例7: testPbServerFactory

import org.apache.hadoop.ipc.Server; //导入方法依赖的package包/类
private void testPbServerFactory() {
  InetSocketAddress addr = new InetSocketAddress(0);
  Configuration conf = new Configuration();
  ApplicationMasterProtocol instance = new AMRMProtocolTestImpl();
  Server server = null;
  try {
    server = 
      RpcServerFactoryPBImpl.get().getServer(
          ApplicationMasterProtocol.class, instance, addr, conf, null, 1);
    server.start();
  } catch (YarnRuntimeException e) {
    e.printStackTrace();
    Assert.fail("Failed to create server");
  } finally {
    if (server != null) {
      server.stop();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:TestRPCFactories.java

示例8: serviceStart

import org.apache.hadoop.ipc.Server; //导入方法依赖的package包/类
@Override
protected void serviceStart() throws Exception {
  Configuration conf = getConfig();

  Server server;
  try {
    secretMgr = new ClientToAMTokenSecretManager(
        this.appAttemptId, secretKey);
    server =
        new RPC.Builder(conf)
          .setProtocol(CustomProtocol.class)
          .setNumHandlers(1)
          .setSecretManager(secretMgr)
          .setInstance(this).build();
  } catch (Exception e) {
    throw new YarnRuntimeException(e);
  }
  server.start();
  this.address = NetUtils.getConnectAddress(server);
  super.serviceStart();
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:22,代码来源:TestClientToAMTokens.java

示例9: testPbServerFactory

import org.apache.hadoop.ipc.Server; //导入方法依赖的package包/类
private void testPbServerFactory() {
  InetSocketAddress addr = new InetSocketAddress(0);
  Configuration conf = new Configuration();
  MRClientProtocol instance = new MRClientProtocolTestImpl();
  Server server = null;
  try {
    server = 
      RpcServerFactoryPBImpl.get().getServer(
        MRClientProtocol.class, instance, addr, conf, null, 1);
    server.start();
  } catch (YarnRuntimeException e) {
    e.printStackTrace();
    Assert.fail("Failed to crete server");
  } finally {
    server.stop();
  }
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:18,代码来源:TestRPCFactories.java

示例10: testRealUserAuthorizationSuccess

import org.apache.hadoop.ipc.Server; //导入方法依赖的package包/类
@Test(timeout=4000)
public void testRealUserAuthorizationSuccess() throws IOException {
  final Configuration conf = new Configuration();
  configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
  conf.setStrings(DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
      "group1");
  Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
      .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
      .setNumHandlers(2).setVerbose(false).build();

  refreshConf(conf);
  try {
    server.start();

    UserGroupInformation realUserUgi = UserGroupInformation
        .createRemoteUser(REAL_USER_NAME);
    checkRemoteUgi(server, realUserUgi, conf);

    UserGroupInformation proxyUserUgi = UserGroupInformation
        .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
    checkRemoteUgi(server, proxyUserUgi, conf);
  } catch (Exception e) {
    e.printStackTrace();
    Assert.fail();
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:33,代码来源:TestDoAsEffectiveUser.java

示例11: testRealUserGroupNotSpecified

import org.apache.hadoop.ipc.Server; //导入方法依赖的package包/类
@Test
public void testRealUserGroupNotSpecified() throws IOException {
  final Configuration conf = new Configuration();
  configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
  Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
      .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
      .setNumHandlers(2).setVerbose(false).build();

  try {
    server.start();

    final InetSocketAddress addr = NetUtils.getConnectAddress(server);

    UserGroupInformation realUserUgi = UserGroupInformation
        .createRemoteUser(REAL_USER_NAME);

    UserGroupInformation proxyUserUgi = UserGroupInformation
        .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
    String retVal = proxyUserUgi
        .doAs(new PrivilegedExceptionAction<String>() {
          @Override
          public String run() throws IOException {
            proxy = (TestProtocol) RPC.getProxy(TestProtocol.class,
                TestProtocol.versionID, addr, conf);
            String ret = proxy.aMethod();
            return ret;
          }
        });

    Assert.fail("The RPC must have failed " + retVal);
  } catch (Exception e) {
    e.printStackTrace();
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:40,代码来源:TestDoAsEffectiveUser.java

示例12: run

import org.apache.hadoop.ipc.Server; //导入方法依赖的package包/类
@Override
public void run() {
  Configuration conf = new Configuration();
  YarnRPC rpc = YarnRPC.create(conf);
  InetSocketAddress address = new InetSocketAddress(rpcAddress, rpcPort);
  Server server = rpc.getServer(
      TensorFlowCluster.class, this, address, conf, null,
      conf.getInt(YarnConfiguration.RM_RESOURCE_TRACKER_CLIENT_THREAD_COUNT,
          YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_CLIENT_THREAD_COUNT));

  server.start();
}
 
开发者ID:Intel-bigdata,项目名称:TensorFlowOnYARN,代码行数:13,代码来源:TFApplicationRpcServer.java

示例13: testBlockTokenRpc

import org.apache.hadoop.ipc.Server; //导入方法依赖的package包/类
@Test
public void testBlockTokenRpc() throws Exception {
  Configuration conf = new Configuration();
  conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
  UserGroupInformation.setConfiguration(conf);
  
  BlockTokenSecretManager sm = new BlockTokenSecretManager(
      blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null);
  Token<BlockTokenIdentifier> token = sm.generateToken(block3,
      EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));

  final Server server = createMockDatanode(sm, token, conf);

  server.start();

  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  final UserGroupInformation ticket = UserGroupInformation
      .createRemoteUser(block3.toString());
  ticket.addToken(token);

  ClientDatanodeProtocol proxy = null;
  try {
    proxy = DFSUtil.createClientDatanodeProtocolProxy(addr, ticket, conf,
        NetUtils.getDefaultSocketFactory(conf));
    assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:TestBlockToken.java

示例14: testRealUserSetup

import org.apache.hadoop.ipc.Server; //导入方法依赖的package包/类
@Test(timeout=4000)
public void testRealUserSetup() throws IOException {
  final Configuration conf = new Configuration();
  conf.setStrings(DefaultImpersonationProvider.getTestProvider().
      getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
  configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
  Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
      .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
      .setNumHandlers(5).setVerbose(true).build();

  refreshConf(conf);
  try {
    server.start();

    UserGroupInformation realUserUgi = UserGroupInformation
        .createRemoteUser(REAL_USER_NAME);
    checkRemoteUgi(server, realUserUgi, conf);
    
    UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
        PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
    checkRemoteUgi(server, proxyUserUgi, conf);
  } catch (Exception e) {
    e.printStackTrace();
    Assert.fail();
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestDoAsEffectiveUser.java

示例15: testBlockTokenRpc

import org.apache.hadoop.ipc.Server; //导入方法依赖的package包/类
@Test
public void testBlockTokenRpc() throws Exception {
  Configuration conf = new Configuration();
  conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
  UserGroupInformation.setConfiguration(conf);
  
  BlockTokenSecretManager sm = new BlockTokenSecretManager(
      blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null);
  Token<BlockTokenIdentifier> token = sm.generateToken(block3,
      EnumSet.allOf(BlockTokenIdentifier.AccessMode.class));

  final Server server = createMockDatanode(sm, token, conf);

  server.start();

  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  final UserGroupInformation ticket = UserGroupInformation
      .createRemoteUser(block3.toString());
  ticket.addToken(token);

  ClientDatanodeProtocol proxy = null;
  try {
    proxy = DFSUtilClient.createClientDatanodeProtocolProxy(addr, ticket, conf,
        NetUtils.getDefaultSocketFactory(conf));
    assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:33,代码来源:TestBlockToken.java


注:本文中的org.apache.hadoop.ipc.Server.start方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。