本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol类的典型用法代码示例。如果您正苦于以下问题:Java ClientDatanodeProtocol类的具体用法?Java ClientDatanodeProtocol怎么用?Java ClientDatanodeProtocol使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ClientDatanodeProtocol类属于org.apache.hadoop.hdfs.protocol包,在下文中一共展示了ClientDatanodeProtocol类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getDatanodeProxy
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
private synchronized ClientDatanodeProtocol getDatanodeProxy(
UserGroupInformation ugi, final DatanodeInfo node,
final Configuration conf, final int socketTimeout,
final boolean connectToDnViaHostname) throws IOException {
if (proxy == null) {
try {
proxy = ugi.doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
@Override
public ClientDatanodeProtocol run() throws Exception {
return DFSUtil.createClientDatanodeProtocolProxy(node, conf,
socketTimeout, connectToDnViaHostname);
}
});
} catch (InterruptedException e) {
LOG.warn("encountered exception ", e);
}
}
return proxy;
}
示例2: call
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
@Override
public HdfsBlocksMetadata call() throws Exception {
HdfsBlocksMetadata metadata = null;
// Create the RPC proxy and make the RPC
ClientDatanodeProtocol cdp = null;
TraceScope scope =
Trace.startSpan("getHdfsBlocksMetadata", parentSpan);
try {
cdp = DFSUtil.createClientDatanodeProtocolProxy(datanode, configuration,
timeout, connectToDnViaHostname);
metadata = cdp.getHdfsBlocksMetadata(poolId, blockIds, dnTokens);
} catch (IOException e) {
// Bubble this up to the caller, handle with the Future
throw e;
} finally {
scope.close();
if (cdp != null) {
RPC.stopProxy(cdp);
}
}
return metadata;
}
示例3: getDataNodeProxy
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
throws IOException {
InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
// Get the current configuration
Configuration conf = getConf();
// For datanode proxy the server principal should be DN's one.
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));
// Create the client
ClientDatanodeProtocol dnProtocol =
DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
return dnProtocol;
}
示例4: shutdownDatanode
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
private int shutdownDatanode(String[] argv, int i) throws IOException {
final String dn = argv[i];
ClientDatanodeProtocol dnProxy = getDataNodeProxy(dn);
boolean upgrade = false;
if (argv.length-1 == i+1) {
if ("upgrade".equalsIgnoreCase(argv[i+1])) {
upgrade = true;
} else {
printUsage("-shutdownDatanode");
return -1;
}
}
dnProxy.shutdownDatanode(upgrade);
System.out.println("Submitted a shutdown request to datanode " + dn);
return 0;
}
示例5: getDatanodeProxy
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
private synchronized ClientDatanodeProtocol getDatanodeProxy(
UserGroupInformation ugi, final DatanodeInfo node,
final Configuration conf, final int socketTimeout,
final boolean connectToDnViaHostname) throws IOException {
if (proxy == null) {
try {
proxy = ugi.doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
@Override
public ClientDatanodeProtocol run() throws Exception {
return DFSUtilClient.createClientDatanodeProtocolProxy(node, conf,
socketTimeout, connectToDnViaHostname);
}
});
} catch (InterruptedException e) {
LOG.warn("encountered exception ", e);
}
}
return proxy;
}
示例6: getReconfigurableProperties
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
int getReconfigurableProperties(String nodeType, String address,
PrintStream out, PrintStream err) throws IOException {
if ("datanode".equals(nodeType)) {
ClientDatanodeProtocol dnProxy = getDataNodeProxy(address);
try {
List<String> properties =
dnProxy.listReconfigurableProperties();
out.println(
"Configuration properties that are allowed to be reconfigured:");
for (String name : properties) {
out.println(name);
}
} catch (IOException e) {
err.println("DataNode reconfiguration: " + e + ".");
return 1;
}
} else {
err.println("Node type " + nodeType +
" does not support reconfiguration.");
return 1;
}
return 0;
}
示例7: getDataNodeProxy
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
throws IOException {
InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
// Get the current configuration
Configuration conf = getConf();
// For datanode proxy the server principal should be DN's one.
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));
// Create the client
ClientDatanodeProtocol dnProtocol =
DFSUtilClient.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
return dnProtocol;
}
示例8: runGetBalancerBandwidthCmd
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
private void runGetBalancerBandwidthCmd(DFSAdmin admin, String[] args,
ClientDatanodeProtocol proxy, long expectedBandwidth) throws Exception {
PrintStream initialStdOut = System.out;
outContent.reset();
try {
System.setOut(outStream);
int exitCode = admin.run(args);
assertEquals("DFSAdmin should return 0", 0, exitCode);
String bandwidthOutMsg = "Balancer bandwidth is " + expectedBandwidth
+ " bytes per second.";
String strOut = new String(outContent.toByteArray(), UTF8);
assertTrue("Wrong balancer bandwidth!", strOut.contains(bandwidthOutMsg));
} finally {
System.setOut(initialStdOut);
}
}
示例9: call
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
@Override
public HdfsBlocksMetadata call() throws Exception {
HdfsBlocksMetadata metadata = null;
// Create the RPC proxy and make the RPC
ClientDatanodeProtocol cdp = null;
try {
cdp = DFSUtil.createClientDatanodeProtocolProxy(datanode, configuration,
timeout, connectToDnViaHostname);
metadata = cdp.getHdfsBlocksMetadata(poolId, blockIds, dnTokens);
} catch (IOException e) {
// Bubble this up to the caller, handle with the Future
throw e;
} finally {
if (cdp != null) {
RPC.stopProxy(cdp);
}
}
return metadata;
}
示例10: getOrCreate
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
/**
* Creates one rpc object if necessary
*/
private synchronized ClientDatanodeProtocol getOrCreate(String name)
throws IOException {
ClientDatanodeProtocol obj = hadoopHash.get(name);
if (obj != null) {
return obj;
}
// connection does not exist, create a new one.
DatanodeID dn = new DatanodeID(name, "", -1, getPort(name));
ClientDatanodeProtocol instance =
DFSClient.createClientDatanodeProtocolProxy(dn, conf, timeout);
// cache connection
hadoopHash.put(name, instance);
return instance;
}
示例11: getBlockInfo
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
public ThdfsBlock getBlockInfo(TDatanodeID datanode,
ThdfsNamespaceId namespaceid,
ThdfsBlock block)
throws ThriftIOException, TException {
Block blk = new Block(block.blockId, block.numBytes,
block.generationStamp);
// make RPC to datanode
try {
ClientDatanodeProtocol remote = getOrCreate(datanode.name);
Block nblk = remote.getBlockInfo(namespaceid.id, blk);
return new ThdfsBlock(nblk.getBlockId(), nblk.getNumBytes(),
nblk.getGenerationStamp());
} catch (IOException e) {
String msg = "Error getBlockInfo datanode " + datanode.name +
" namespaceid " + namespaceid.id +
" block " + blk;
LOG.warn(msg);
throw new ThriftIOException(msg);
}
}
示例12: getBlockPathInfo
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
public ThdfsBlockPath getBlockPathInfo(TDatanodeID datanode,
ThdfsNamespaceId namespaceId,
ThdfsBlock block)
throws ThriftIOException, TException {
Block blk = new Block(block.blockId, block.numBytes,
block.generationStamp);
// make RPC to datanode to find local pathnames of blocks
try {
ClientDatanodeProtocol remote = getOrCreate(datanode.name);
BlockPathInfo pathinfo = remote.getBlockPathInfo(namespaceId.id, blk);
return new ThdfsBlockPath(pathinfo.getBlockPath(),
pathinfo.getMetaPath());
} catch (IOException e) {
String msg = "Error getBlockPathInfo datanode " + datanode.name +
" namespaceid " + namespaceId.id +
" block " + blk;
LOG.warn(msg);
throw new ThriftIOException(msg);
}
}
示例13: createClientDNProtocolProxy
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
static ProtocolProxy<ClientDatanodeProtocol> createClientDNProtocolProxy (
DatanodeID datanodeid, Configuration conf, int socketTimeout)
throws IOException {
InetSocketAddress addr = NetUtils.createSocketAddr(
datanodeid.getHost() + ":" + datanodeid.getIpcPort());
if (ClientDatanodeProtocol.LOG.isDebugEnabled()) {
ClientDatanodeProtocol.LOG.info("ClientDatanodeProtocol addr=" + addr);
}
UserGroupInformation ugi;
try {
ugi = UserGroupInformation.login(conf);
} catch (LoginException le) {
throw new RuntimeException("Couldn't login!");
}
return RPC.getProtocolProxy(ClientDatanodeProtocol.class,
ClientDatanodeProtocol.versionID, addr, ugi, conf,
NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}
示例14: createClientDatanodeProtocolProxy
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
/**
* Setup a session with the specified datanode
*/
static ClientDatanodeProtocol createClientDatanodeProtocolProxy (
DatanodeInfo datanodeid, Configuration conf) throws IOException {
InetSocketAddress addr = NetUtils.createSocketAddr(
datanodeid.getHost() + ":" + datanodeid.getIpcPort());
if (ClientDatanodeProtocol.LOG.isDebugEnabled()) {
ClientDatanodeProtocol.LOG.info("ClientDatanodeProtocol addr=" + addr);
}
try {
return (ClientDatanodeProtocol)RPC.getProxy(ClientDatanodeProtocol.class,
ClientDatanodeProtocol.versionID, addr, conf);
} catch (RPC.VersionMismatch e) {
long clientVersion = e.getClientVersion();
long datanodeVersion = e.getServerVersion();
if (clientVersion > datanodeVersion &&
!ProtocolCompatible.isCompatibleClientDatanodeProtocol(
clientVersion, datanodeVersion)) {
throw new RPC.VersionIncompatible(
ClientDatanodeProtocol.class.getName(), clientVersion, datanodeVersion);
}
return (ClientDatanodeProtocol)e.getProxy();
}
}
示例15: refreshNamenodes
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
/**
* Refresh the namenodes served by the {@link DataNode}.
* Usage: java DFSAdmin -refreshNamenodes datanodehost:port
* @param argv List of of command line parameters.
* @param idx The index of the command that is being processed.
* @exception IOException if an error accoured wile accessing
* the file or path.
* @return exitcode 0 on success, non-zero on failure
*/
public int refreshNamenodes(String[] argv, int i) throws IOException {
ClientDatanodeProtocol datanode = null;
String dnAddr = (argv.length == 2) ? argv[i] : null;
try {
datanode = getClientDatanodeProtocol(dnAddr);
if (datanode != null) {
datanode.refreshNamenodes();
return 0;
} else {
return -1;
}
} finally {
if (datanode != null && Proxy.isProxyClass(datanode.getClass())) {
RPC.stopProxy(datanode);
}
}
}