本文整理汇总了Java中org.apache.hadoop.ipc.RPC.Server类的典型用法代码示例。如果您正苦于以下问题:Java Server类的具体用法?Java Server怎么用?Java Server使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Server类属于org.apache.hadoop.ipc.RPC包,在下文中一共展示了Server类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: JournalNodeRpcServer
import org.apache.hadoop.ipc.RPC.Server; //导入依赖的package包/类
JournalNodeRpcServer(Configuration conf, JournalNode jn) throws IOException {
this.jn = jn;
Configuration confCopy = new Configuration(conf);
// Ensure that nagling doesn't kick in, which could cause latency issues.
confCopy.setBoolean("ipc.server.tcpnodelay", true);
// reader threads will handle the RPC calls
confCopy.setBoolean("ipc.direct.handling", true);
// set the number of reader threads, should be at least the number of
// served namenodes
confCopy.setInt(Server.IPC_SERVER_RPC_READ_THREADS_KEY, confCopy.getInt(
JournalConfigKeys.DFS_QJOURNAL_IPC_READER_KEY,
JournalConfigKeys.DFS_QJOURNAL_IPC_READER_DEFAULT));
InetSocketAddress addr = getAddress(confCopy);
this.server = RPC.getServer(this, addr.getAddress().getHostAddress(), addr.getPort(),
HANDLER_COUNT, false, confCopy, false);
this.conf = confCopy;
}
示例2: startServer
import org.apache.hadoop.ipc.RPC.Server; //导入依赖的package包/类
private Server startServer(MyOptions opts) throws IOException {
if (opts.serverThreads <= 0) {
return null;
}
conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY,
opts.serverReaderThreads);
RPC.Server server;
// Get RPC server for server side implementation
if (opts.rpcEngine == ProtobufRpcEngine.class) {
// Create server side implementation
PBServerImpl serverImpl = new PBServerImpl();
BlockingService service = TestProtobufRpcProto
.newReflectiveBlockingService(serverImpl);
server = new RPC.Builder(conf).setProtocol(TestRpcService.class)
.setInstance(service).setBindAddress(opts.host).setPort(opts.getPort())
.setNumHandlers(opts.serverThreads).setVerbose(false).build();
} else if (opts.rpcEngine == WritableRpcEngine.class) {
server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestRPC.TestImpl()).setBindAddress(opts.host)
.setPort(opts.getPort()).setNumHandlers(opts.serverThreads)
.setVerbose(false).build();
} else {
throw new RuntimeException("Bad engine: " + opts.rpcEngine);
}
server.start();
return server;
}
示例3: startServer
import org.apache.hadoop.ipc.RPC.Server; //导入依赖的package包/类
protected void startServer() throws Exception {
Configuration conf = getConfig();
YarnRPC rpc = YarnRPC.create(conf);
this.server = (Server) rpc.getServer(
ResourceManagerAdministrationProtocol.class, this, masterServiceBindAddress,
conf, null,
conf.getInt(YarnConfiguration.RM_ADMIN_CLIENT_THREAD_COUNT,
YarnConfiguration.DEFAULT_RM_ADMIN_CLIENT_THREAD_COUNT));
// Enable service authorization?
if (conf.getBoolean(
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
false)) {
refreshServiceAcls(
getConfiguration(conf,
YarnConfiguration.HADOOP_POLICY_CONFIGURATION_FILE),
RMPolicyProvider.getInstance());
}
if (rmContext.isHAEnabled()) {
RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
ProtobufRpcEngine.class);
HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator =
new HAServiceProtocolServerSideTranslatorPB(this);
BlockingService haPbService =
HAServiceProtocolProtos.HAServiceProtocolService
.newReflectiveBlockingService(haServiceProtocolXlator);
server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER,
HAServiceProtocol.class, haPbService);
}
this.server.start();
conf.updateConnectAddr(YarnConfiguration.RM_BIND_HOST,
YarnConfiguration.RM_ADMIN_ADDRESS,
YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS,
server.getListenerAddress());
}
示例4: checkHaStateChange
import org.apache.hadoop.ipc.RPC.Server; //导入依赖的package包/类
/**
* Check that a request to change this node's HA state is valid.
* In particular, verifies that, if auto failover is enabled, non-forced
* requests from the HAAdmin CLI are rejected, and vice versa.
*
* @param req the request to check
* @throws AccessControlException if the request is disallowed
*/
private void checkHaStateChange(StateChangeRequestInfo req)
throws AccessControlException {
switch (req.getSource()) {
case REQUEST_BY_USER:
if (autoFailoverEnabled) {
throw new AccessControlException(
"Manual failover for this ResourceManager is disallowed, " +
"because automatic failover is enabled.");
}
break;
case REQUEST_BY_USER_FORCED:
if (autoFailoverEnabled) {
LOG.warn("Allowing manual failover from " +
org.apache.hadoop.ipc.Server.getRemoteAddress() +
" even though automatic failover is enabled, because the user " +
"specified the force flag");
}
break;
case REQUEST_BY_ZKFC:
if (!autoFailoverEnabled) {
throw new AccessControlException(
"Request from ZK failover controller at " +
org.apache.hadoop.ipc.Server.getRemoteAddress() + " denied " +
"since automatic failover is not enabled");
}
break;
}
}
示例5: main
import org.apache.hadoop.ipc.RPC.Server; //导入依赖的package包/类
public static void main(String[] args) {
try {
Configuration configuration = new Configuration();
Server server = new RPC.Builder(configuration)
.setProtocol(PublicInterface.class)
.setInstance(new RPCServer())
.setBindAddress("127.0.0.1")
.setPort(6537).build();
server.start();
} catch (IOException e) {
e.printStackTrace();
}
}
示例6: getClientMachine
import org.apache.hadoop.ipc.RPC.Server; //导入依赖的package包/类
private static String getClientMachine() {
String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
if (clientMachine == null) { //not a web client
clientMachine = Server.getRemoteAddress();
}
if (clientMachine == null) { //not a RPC client
clientMachine = "";
}
return clientMachine;
}
示例7: startServer
import org.apache.hadoop.ipc.RPC.Server; //导入依赖的package包/类
protected void startServer() throws Exception {
Configuration conf = getConfig();
YarnRPC rpc = YarnRPC.create(conf);
this.server = (Server) rpc.getServer(
ResourceManagerAdministrationProtocol.class, this, masterServiceAddress,
conf, null,
conf.getInt(YarnConfiguration.RM_ADMIN_CLIENT_THREAD_COUNT,
YarnConfiguration.DEFAULT_RM_ADMIN_CLIENT_THREAD_COUNT));
// Enable service authorization?
if (conf.getBoolean(
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
false)) {
refreshServiceAcls(
getConfiguration(conf,
YarnConfiguration.HADOOP_POLICY_CONFIGURATION_FILE),
RMPolicyProvider.getInstance());
}
if (rmContext.isHAEnabled()) {
RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
ProtobufRpcEngine.class);
HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator =
new HAServiceProtocolServerSideTranslatorPB(this);
BlockingService haPbService =
HAServiceProtocolProtos.HAServiceProtocolService
.newReflectiveBlockingService(haServiceProtocolXlator);
server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER,
HAServiceProtocol.class, haPbService);
}
this.server.start();
conf.updateConnectAddr(YarnConfiguration.RM_ADMIN_ADDRESS,
server.getListenerAddress());
}
示例8: createServer
import org.apache.hadoop.ipc.RPC.Server; //导入依赖的package包/类
private Server createServer(Class<?> pbProtocol, InetSocketAddress addr, Configuration conf,
int numHandlers,
BlockingService blockingService, String portRangeConfig) throws IOException {
RPC.setProtocolEngine(conf, pbProtocol, ProtobufRpcEngine.class);
RPC.Server server = new RPC.Builder(conf).setProtocol(pbProtocol)
.setInstance(blockingService).setBindAddress(addr.getHostName())
.setPort(addr.getPort()).setNumHandlers(numHandlers).setVerbose(false)
.setPortRangeConfig(portRangeConfig).setSecretManager(secretManager)
.build();
server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, pbProtocol, blockingService);
return server;
}
示例9: getServer
import org.apache.hadoop.ipc.RPC.Server; //导入依赖的package包/类
@VisibleForTesting
public Server getServer() {
return this.server;
}