当前位置: 首页>>代码示例>>Java>>正文


Java ClientDatanodeProtocol类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol的典型用法代码示例。如果您正苦于以下问题:Java ClientDatanodeProtocol类的具体用法?Java ClientDatanodeProtocol怎么用?Java ClientDatanodeProtocol使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


ClientDatanodeProtocol类属于org.apache.hadoop.hdfs.protocol包,在下文中一共展示了ClientDatanodeProtocol类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getDatanodeProxy

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
private synchronized ClientDatanodeProtocol getDatanodeProxy(
    UserGroupInformation ugi, final DatanodeInfo node,
    final Configuration conf, final int socketTimeout,
    final boolean connectToDnViaHostname) throws IOException {
  if (proxy == null) {
    try {
      proxy = ugi.doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
        @Override
        public ClientDatanodeProtocol run() throws Exception {
          return DFSUtil.createClientDatanodeProtocolProxy(node, conf,
              socketTimeout, connectToDnViaHostname);
        }
      });
    } catch (InterruptedException e) {
      LOG.warn("encountered exception ", e);
    }
  }
  return proxy;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:BlockReaderLocalLegacy.java

示例2: call

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
@Override
public HdfsBlocksMetadata call() throws Exception {
  HdfsBlocksMetadata metadata = null;
  // Create the RPC proxy and make the RPC
  ClientDatanodeProtocol cdp = null;
  TraceScope scope =
      Trace.startSpan("getHdfsBlocksMetadata", parentSpan);
  try {
    cdp = DFSUtil.createClientDatanodeProtocolProxy(datanode, configuration,
        timeout, connectToDnViaHostname);
    metadata = cdp.getHdfsBlocksMetadata(poolId, blockIds, dnTokens);
  } catch (IOException e) {
    // Bubble this up to the caller, handle with the Future
    throw e;
  } finally {
    scope.close();
    if (cdp != null) {
      RPC.stopProxy(cdp);
    }
  }
  return metadata;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:BlockStorageLocationUtil.java

示例3: getDataNodeProxy

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
    throws IOException {
  InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
  // Get the current configuration
  Configuration conf = getConf();

  // For datanode proxy the server principal should be DN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));

  // Create the client
  ClientDatanodeProtocol dnProtocol =     
      DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
          NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
  return dnProtocol;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:DFSAdmin.java

示例4: shutdownDatanode

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
private int shutdownDatanode(String[] argv, int i) throws IOException {
  final String dn = argv[i];
  ClientDatanodeProtocol dnProxy = getDataNodeProxy(dn);
  boolean upgrade = false;
  if (argv.length-1 == i+1) {
    if ("upgrade".equalsIgnoreCase(argv[i+1])) {
      upgrade = true;
    } else {
      printUsage("-shutdownDatanode");
      return -1;
    }
  }
  dnProxy.shutdownDatanode(upgrade);
  System.out.println("Submitted a shutdown request to datanode " + dn);
  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:DFSAdmin.java

示例5: getDatanodeProxy

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
private synchronized ClientDatanodeProtocol getDatanodeProxy(
    UserGroupInformation ugi, final DatanodeInfo node,
    final Configuration conf, final int socketTimeout,
    final boolean connectToDnViaHostname) throws IOException {
  if (proxy == null) {
    try {
      proxy = ugi.doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
        @Override
        public ClientDatanodeProtocol run() throws Exception {
          return DFSUtilClient.createClientDatanodeProtocolProxy(node, conf,
              socketTimeout, connectToDnViaHostname);
        }
      });
    } catch (InterruptedException e) {
      LOG.warn("encountered exception ", e);
    }
  }
  return proxy;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:BlockReaderLocalLegacy.java

示例6: getReconfigurableProperties

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
int getReconfigurableProperties(String nodeType, String address,
    PrintStream out, PrintStream err) throws IOException {
  if ("datanode".equals(nodeType)) {
    ClientDatanodeProtocol dnProxy = getDataNodeProxy(address);
    try {
      List<String> properties =
          dnProxy.listReconfigurableProperties();
      out.println(
          "Configuration properties that are allowed to be reconfigured:");
      for (String name : properties) {
        out.println(name);
      }
    } catch (IOException e) {
      err.println("DataNode reconfiguration: " + e + ".");
      return 1;
    }
  } else {
    err.println("Node type " + nodeType +
        " does not support reconfiguration.");
    return 1;
  }
  return 0;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:DFSAdmin.java

示例7: getDataNodeProxy

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
    throws IOException {
  InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
  // Get the current configuration
  Configuration conf = getConf();

  // For datanode proxy the server principal should be DN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));

  // Create the client
  ClientDatanodeProtocol dnProtocol =     
      DFSUtilClient.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
          NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
  return dnProtocol;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:17,代码来源:DFSAdmin.java

示例8: runGetBalancerBandwidthCmd

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
private void runGetBalancerBandwidthCmd(DFSAdmin admin, String[] args,
    ClientDatanodeProtocol proxy, long expectedBandwidth) throws Exception {
  PrintStream initialStdOut = System.out;
  outContent.reset();
  try {
    System.setOut(outStream);
    int exitCode = admin.run(args);
    assertEquals("DFSAdmin should return 0", 0, exitCode);
    String bandwidthOutMsg = "Balancer bandwidth is " + expectedBandwidth
        + " bytes per second.";
    String strOut = new String(outContent.toByteArray(), UTF8);
    assertTrue("Wrong balancer bandwidth!", strOut.contains(bandwidthOutMsg));
  } finally {
    System.setOut(initialStdOut);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:17,代码来源:TestBalancerBandwidth.java

示例9: call

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
@Override
public HdfsBlocksMetadata call() throws Exception {
  HdfsBlocksMetadata metadata = null;
  // Create the RPC proxy and make the RPC
  ClientDatanodeProtocol cdp = null;
  try {
    cdp = DFSUtil.createClientDatanodeProtocolProxy(datanode, configuration,
        timeout, connectToDnViaHostname);
    metadata = cdp.getHdfsBlocksMetadata(poolId, blockIds, dnTokens);
  } catch (IOException e) {
    // Bubble this up to the caller, handle with the Future
    throw e;
  } finally {
    if (cdp != null) {
      RPC.stopProxy(cdp);
    }
  }
  return metadata;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:20,代码来源:BlockStorageLocationUtil.java

示例10: getOrCreate

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
/**
 * Creates one rpc object if necessary
 */
private synchronized ClientDatanodeProtocol getOrCreate(String name)
  throws IOException {
  ClientDatanodeProtocol obj =  hadoopHash.get(name);
  if (obj != null) {
    return obj;
  }
  // connection does not exist, create a new one.
  DatanodeID dn = new DatanodeID(name, "", -1, getPort(name));
  ClientDatanodeProtocol instance =
    DFSClient.createClientDatanodeProtocolProxy(dn, conf, timeout); 

  // cache connection
  hadoopHash.put(name, instance);
  return instance;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:19,代码来源:HadoopThriftDatanodeServer.java

示例11: getBlockInfo

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
public ThdfsBlock getBlockInfo(TDatanodeID datanode,
                      ThdfsNamespaceId namespaceid,
                      ThdfsBlock block)
                      throws ThriftIOException, TException { 
  Block blk = new Block(block.blockId, block.numBytes, 
                         block.generationStamp);
  // make RPC to datanode
  try {
    ClientDatanodeProtocol remote = getOrCreate(datanode.name);
    Block nblk = remote.getBlockInfo(namespaceid.id, blk);
    return new ThdfsBlock(nblk.getBlockId(), nblk.getNumBytes(),
                          nblk.getGenerationStamp());
  } catch (IOException e) {
    String msg = "Error getBlockInfo datanode " + datanode.name +
                 " namespaceid " + namespaceid.id +
                 " block " + blk;
    LOG.warn(msg);
    throw new ThriftIOException(msg);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:21,代码来源:HadoopThriftDatanodeServer.java

示例12: getBlockPathInfo

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
public ThdfsBlockPath getBlockPathInfo(TDatanodeID datanode,
                              ThdfsNamespaceId namespaceId,
                              ThdfsBlock block)
                              throws ThriftIOException, TException {
  Block blk = new Block(block.blockId, block.numBytes, 
                        block.generationStamp);

  // make RPC to datanode to find local pathnames of blocks
  try {
    ClientDatanodeProtocol remote = getOrCreate(datanode.name);
    BlockPathInfo pathinfo = remote.getBlockPathInfo(namespaceId.id, blk);
    return new ThdfsBlockPath(pathinfo.getBlockPath(), 
                              pathinfo.getMetaPath());
  } catch (IOException e) {
    String msg = "Error getBlockPathInfo datanode " + datanode.name +
                 " namespaceid " + namespaceId.id +
                 " block " + blk;
    LOG.warn(msg);
    throw new ThriftIOException(msg);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:22,代码来源:HadoopThriftDatanodeServer.java

示例13: createClientDNProtocolProxy

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
static ProtocolProxy<ClientDatanodeProtocol> createClientDNProtocolProxy (
    DatanodeID datanodeid, Configuration conf, int socketTimeout)
    throws IOException {
  InetSocketAddress addr = NetUtils.createSocketAddr(
    datanodeid.getHost() + ":" + datanodeid.getIpcPort());
  if (ClientDatanodeProtocol.LOG.isDebugEnabled()) {
    ClientDatanodeProtocol.LOG.info("ClientDatanodeProtocol addr=" + addr);
  }
  UserGroupInformation ugi;
  try {
    ugi = UserGroupInformation.login(conf);
  } catch (LoginException le) {
    throw new RuntimeException("Couldn't login!");
  }

  return RPC.getProtocolProxy(ClientDatanodeProtocol.class,
      ClientDatanodeProtocol.versionID, addr, ugi, conf,
      NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:20,代码来源:DFSClient.java

示例14: createClientDatanodeProtocolProxy

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
/**
 * Setup a session with the specified datanode
 */
static ClientDatanodeProtocol createClientDatanodeProtocolProxy (
    DatanodeInfo datanodeid, Configuration conf) throws IOException {
  InetSocketAddress addr = NetUtils.createSocketAddr(
    datanodeid.getHost() + ":" + datanodeid.getIpcPort());
  if (ClientDatanodeProtocol.LOG.isDebugEnabled()) {
    ClientDatanodeProtocol.LOG.info("ClientDatanodeProtocol addr=" + addr);
  }
  try {
    return (ClientDatanodeProtocol)RPC.getProxy(ClientDatanodeProtocol.class,
      ClientDatanodeProtocol.versionID, addr, conf);
  } catch (RPC.VersionMismatch e) {
    long clientVersion = e.getClientVersion();
    long datanodeVersion = e.getServerVersion();
    if (clientVersion > datanodeVersion &&
        !ProtocolCompatible.isCompatibleClientDatanodeProtocol(
            clientVersion, datanodeVersion)) {
      throw new RPC.VersionIncompatible(
          ClientDatanodeProtocol.class.getName(), clientVersion, datanodeVersion);
    }
    return (ClientDatanodeProtocol)e.getProxy();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:26,代码来源:FileFixer.java

示例15: refreshNamenodes

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入依赖的package包/类
/**
 * Refresh the namenodes served by the {@link DataNode}.
 * Usage: java DFSAdmin -refreshNamenodes datanodehost:port
 * @param argv List of of command line parameters.
 * @param idx The index of the command that is being processed.
 * @exception IOException if an error accoured wile accessing
 *            the file or path.
 * @return exitcode 0 on success, non-zero on failure
 */
public int refreshNamenodes(String[] argv, int i) throws IOException {
  ClientDatanodeProtocol datanode = null;
  String dnAddr = (argv.length == 2) ? argv[i] : null;
  try {
    datanode = getClientDatanodeProtocol(dnAddr);
    if (datanode != null) {
      datanode.refreshNamenodes();
      return 0;
    } else {
      return -1;
    }
   } finally {
    if (datanode != null && Proxy.isProxyClass(datanode.getClass())) {
      RPC.stopProxy(datanode);
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:27,代码来源:DFSAdmin.java


注:本文中的org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。