本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient类的典型用法代码示例。如果您正苦于以下问题:Java SaslDataTransferClient类的具体用法?Java SaslDataTransferClient怎么用?Java SaslDataTransferClient使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
SaslDataTransferClient类属于org.apache.hadoop.hdfs.protocol.datatransfer.sasl包,在下文中一共展示了SaslDataTransferClient类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: connectToDN
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; //导入依赖的package包/类
/**
* Connect to the given datanode's datantrasfer port, and return
* the resulting IOStreamPair. This includes encryption wrapping, etc.
*/
public static IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
Configuration conf,
SaslDataTransferClient saslClient,
SocketFactory socketFactory,
boolean connectToDnViaHostname,
DataEncryptionKeyFactory dekFactory,
Token<BlockTokenIdentifier> blockToken)
throws IOException {
boolean success = false;
Socket sock = null;
try {
sock = socketFactory.createSocket();
String dnAddr = dn.getXferAddr(connectToDnViaHostname);
LOG.debug("Connecting to datanode {}", dnAddr);
NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
sock.setSoTimeout(timeout);
OutputStream unbufOut = NetUtils.getOutputStream(sock);
InputStream unbufIn = NetUtils.getInputStream(sock);
IOStreamPair pair = saslClient.newSocketSend(sock, unbufOut,
unbufIn, dekFactory, blockToken, dn);
IOStreamPair result = new IOStreamPair(
new DataInputStream(pair.in),
new DataOutputStream(new BufferedOutputStream(pair.out,
NuCypherExtUtilClient.getSmallBufferSize(conf)))
);
success = true;
return result;
} finally {
if (!success) {
IOUtils.closeSocket(sock);
}
}
}
示例2: peerFromSocketAndKey
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; //导入依赖的package包/类
public static Peer peerFromSocketAndKey(
SaslDataTransferClient saslClient, Socket s,
DataEncryptionKeyFactory keyFactory,
Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
throws IOException {
Peer peer = null;
boolean success = false;
try {
peer = peerFromSocket(s);
peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
success = true;
return peer;
} finally {
if (!success) {
IOUtilsClient.cleanup(null, peer);
}
}
}
示例3: Dispatcher
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; //导入依赖的package包/类
public Dispatcher(NameNodeConnector nnc, Set<String> includedNodes,
Set<String> excludedNodes, long movedWinWidth, int moverThreads,
int dispatcherThreads, int maxConcurrentMovesPerNode, Configuration conf) {
this.nnc = nnc;
this.excludedNodes = excludedNodes;
this.includedNodes = includedNodes;
this.movedBlocks = new MovedBlocks<StorageGroup>(movedWinWidth);
this.cluster = NetworkTopology.getInstance(conf);
this.moveExecutor = Executors.newFixedThreadPool(moverThreads);
this.dispatchExecutor = dispatcherThreads == 0? null
: Executors.newFixedThreadPool(dispatcherThreads);
this.maxConcurrentMovesPerNode = maxConcurrentMovesPerNode;
this.saslClient = new SaslDataTransferClient(conf,
DataTransferSaslUtil.getSaslPropertiesResolver(conf),
TrustedChannelResolver.getInstance(conf), nnc.fallbackToSimpleAuth);
}
示例4: peerFromSocketAndKey
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; //导入依赖的package包/类
public static Peer peerFromSocketAndKey(
SaslDataTransferClient saslClient, Socket s,
DataEncryptionKeyFactory keyFactory,
Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
throws IOException {
Peer peer = null;
boolean success = false;
try {
peer = peerFromSocket(s);
peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
success = true;
return peer;
} finally {
if (!success) {
IOUtils.cleanup(null, peer);
}
}
}
示例5: peerFromSocketAndKey
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; //导入依赖的package包/类
public static Peer peerFromSocketAndKey(
SaslDataTransferClient saslClient, Socket s,
DataEncryptionKeyFactory keyFactory,
Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
throws IOException {
Peer peer = null;
boolean success = false;
try {
peer = peerFromSocket(s);
peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
success = true;
return peer;
} finally {
if (!success) {
IOUtilsClient.cleanup(null, peer);
}
}
}
示例6: Dispatcher
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; //导入依赖的package包/类
Dispatcher(NameNodeConnector nnc, Set<String> includedNodes,
Set<String> excludedNodes, long movedWinWidth, int moverThreads,
int dispatcherThreads, int maxConcurrentMovesPerNode,
long getBlocksSize, long getBlocksMinBlockSize, Configuration conf) {
this.nnc = nnc;
this.excludedNodes = excludedNodes;
this.includedNodes = includedNodes;
this.movedBlocks = new MovedBlocks<StorageGroup>(movedWinWidth);
this.cluster = NetworkTopology.getInstance(conf);
this.dispatchExecutor = dispatcherThreads == 0? null
: Executors.newFixedThreadPool(dispatcherThreads);
this.moverThreadAllocator = new Allocator(moverThreads);
this.maxConcurrentMovesPerNode = maxConcurrentMovesPerNode;
this.getBlocksSize = getBlocksSize;
this.getBlocksMinBlockSize = getBlocksMinBlockSize;
this.saslClient = new SaslDataTransferClient(conf,
DataTransferSaslUtil.getSaslPropertiesResolver(conf),
TrustedChannelResolver.getInstance(conf), nnc.fallbackToSimpleAuth);
this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(conf);
this.connectToDnViaHostname = conf.getBoolean(
HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME,
HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
placementPolicies = new BlockPlacementPolicies(conf, null, cluster, null);
}
示例7: getSaslDataTransferClient
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; //导入依赖的package包/类
/**
* Gets the {@link SaslDataTransferClient} from the {@link DataNode} attached
* to the servlet context.
*
* @return SaslDataTransferClient from DataNode
*/
private static SaslDataTransferClient getSaslDataTransferClient(
HttpServletRequest req) {
DataNode dataNode = (DataNode)req.getSession().getServletContext()
.getAttribute("datanode");
return dataNode.saslClient;
}
示例8: getSaslClient
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; //导入依赖的package包/类
public SaslDataTransferClient getSaslClient() {
return saslClient;
}
示例9: trySaslNegotiate
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; //导入依赖的package包/类
static void trySaslNegotiate(Configuration conf, Channel channel, DatanodeInfo dnInfo,
int timeoutMs, DFSClient client, Token<BlockTokenIdentifier> accessToken,
Promise<Void> saslPromise) throws IOException {
SaslDataTransferClient saslClient = client.getSaslDataTransferClient();
SaslPropertiesResolver saslPropsResolver = SASL_ADAPTOR.getSaslPropsResolver(saslClient);
TrustedChannelResolver trustedChannelResolver =
SASL_ADAPTOR.getTrustedChannelResolver(saslClient);
AtomicBoolean fallbackToSimpleAuth = SASL_ADAPTOR.getFallbackToSimpleAuth(saslClient);
InetAddress addr = ((InetSocketAddress) channel.remoteAddress()).getAddress();
if (trustedChannelResolver.isTrusted() || trustedChannelResolver.isTrusted(addr)) {
saslPromise.trySuccess(null);
return;
}
DataEncryptionKey encryptionKey = client.newDataEncryptionKey();
if (encryptionKey != null) {
if (LOG.isDebugEnabled()) {
LOG.debug(
"SASL client doing encrypted handshake for addr = " + addr + ", datanodeId = " + dnInfo);
}
doSaslNegotiation(conf, channel, timeoutMs, getUserNameFromEncryptionKey(encryptionKey),
encryptionKeyToPassword(encryptionKey.encryptionKey),
createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise);
} else if (!UserGroupInformation.isSecurityEnabled()) {
if (LOG.isDebugEnabled()) {
LOG.debug("SASL client skipping handshake in unsecured configuration for addr = " + addr
+ ", datanodeId = " + dnInfo);
}
saslPromise.trySuccess(null);
} else if (dnInfo.getXferPort() < 1024) {
if (LOG.isDebugEnabled()) {
LOG.debug("SASL client skipping handshake in secured configuration with "
+ "privileged port for addr = " + addr + ", datanodeId = " + dnInfo);
}
saslPromise.trySuccess(null);
} else if (fallbackToSimpleAuth != null && fallbackToSimpleAuth.get()) {
if (LOG.isDebugEnabled()) {
LOG.debug("SASL client skipping handshake in secured configuration with "
+ "unsecured cluster for addr = " + addr + ", datanodeId = " + dnInfo);
}
saslPromise.trySuccess(null);
} else if (saslPropsResolver != null) {
if (LOG.isDebugEnabled()) {
LOG.debug(
"SASL client doing general handshake for addr = " + addr + ", datanodeId = " + dnInfo);
}
doSaslNegotiation(conf, channel, timeoutMs, buildUsername(accessToken),
buildClientPassword(accessToken), saslPropsResolver.getClientProperties(addr), saslPromise);
} else {
// It's a secured cluster using non-privileged ports, but no SASL. The only way this can
// happen is if the DataNode has ignore.secure.ports.for.testing configured, so this is a rare
// edge case.
if (LOG.isDebugEnabled()) {
LOG.debug("SASL client skipping handshake in secured configuration with no SASL "
+ "protection configured for addr = " + addr + ", datanodeId = " + dnInfo);
}
saslPromise.trySuccess(null);
}
}
示例10: getSaslDataTransferClient
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; //导入依赖的package包/类
/**
* Returns the SaslDataTransferClient configured for this DFSClient.
*
* @return SaslDataTransferClient configured for this DFSClient
*/
public SaslDataTransferClient getSaslDataTransferClient() {
return saslClient;
}
示例11: getTrustedChannelResolver
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; //导入依赖的package包/类
TrustedChannelResolver getTrustedChannelResolver(SaslDataTransferClient saslClient);
示例12: getSaslPropsResolver
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; //导入依赖的package包/类
SaslPropertiesResolver getSaslPropsResolver(SaslDataTransferClient saslClient);
示例13: getFallbackToSimpleAuth
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; //导入依赖的package包/类
AtomicBoolean getFallbackToSimpleAuth(SaslDataTransferClient saslClient);