本文整理汇总了Java中org.apache.hadoop.net.NetUtils.getDefaultSocketFactory方法的典型用法代码示例。如果您正苦于以下问题:Java NetUtils.getDefaultSocketFactory方法的具体用法?Java NetUtils.getDefaultSocketFactory怎么用?Java NetUtils.getDefaultSocketFactory使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.net.NetUtils
的用法示例。
在下文中一共展示了NetUtils.getDefaultSocketFactory方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getProxyForAddress
import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
private HAServiceProtocol getProxyForAddress(Configuration conf,
int timeoutMs, InetSocketAddress addr) throws IOException {
Configuration confCopy = new Configuration(conf);
// Lower the timeout so we quickly fail to connect
confCopy.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
SocketFactory factory = NetUtils.getDefaultSocketFactory(confCopy);
return new HAServiceProtocolClientSideTranslatorPB(
addr,
confCopy, factory, timeoutMs);
}
示例2: getZKFCProxy
import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
/**
* @return a proxy to the ZKFC which is associated with this HA service.
*/
public ZKFCProtocol getZKFCProxy(Configuration conf, int timeoutMs)
throws IOException {
Configuration confCopy = new Configuration(conf);
// Lower the timeout so we quickly fail to connect
confCopy.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
SocketFactory factory = NetUtils.getDefaultSocketFactory(confCopy);
return new ZKFCProtocolClientSideTranslatorPB(
getZKFCAddress(),
confCopy, factory, timeoutMs);
}
示例3: testSocketFactoryAsKeyInMap
import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
@Test
public void testSocketFactoryAsKeyInMap() {
Map<SocketFactory, Integer> dummyCache = new HashMap<SocketFactory, Integer>();
int toBeCached1 = 1;
int toBeCached2 = 2;
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
"org.apache.hadoop.ipc.TestSocketFactory$DummySocketFactory");
final SocketFactory dummySocketFactory = NetUtils
.getDefaultSocketFactory(conf);
dummyCache.put(dummySocketFactory, toBeCached1);
conf.set(CommonConfigurationKeys.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
"org.apache.hadoop.net.StandardSocketFactory");
final SocketFactory defaultSocketFactory = NetUtils
.getDefaultSocketFactory(conf);
dummyCache.put(defaultSocketFactory, toBeCached2);
Assert
.assertEquals("The cache contains two elements", 2, dummyCache.size());
Assert.assertEquals("Equals of both socket factory shouldn't be same",
defaultSocketFactory.equals(dummySocketFactory), false);
assertSame(toBeCached2, dummyCache.remove(defaultSocketFactory));
dummyCache.put(defaultSocketFactory, toBeCached2);
assertSame(toBeCached1, dummyCache.remove(dummySocketFactory));
}
示例4: RpcClientImpl
import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
/**
* Used in test only. Construct an IPC client for the cluster {@code clusterId} with
* the default SocketFactory
*/
@VisibleForTesting
RpcClientImpl(Configuration conf, String clusterId) {
this(conf, clusterId, NetUtils.getDefaultSocketFactory(conf), null, null);
}
示例5: Client
import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
/**
* Construct an IPC client with the default SocketFactory
* @param valueClass
* @param conf
*/
public Client(Class<? extends Writable> valueClass, Configuration conf) {
this(valueClass, conf, NetUtils.getDefaultSocketFactory(conf));
}