当前位置: 首页>>代码示例>>Java>>正文


Java DataNode.createDataNode方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.createDataNode方法的典型用法代码示例。如果您正苦于以下问题:Java DataNode.createDataNode方法的具体用法?Java DataNode.createDataNode怎么用?Java DataNode.createDataNode使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.datanode.DataNode的用法示例。


在下文中一共展示了DataNode.createDataNode方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: restartDataNode

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
/**
 * Restart a datanode, on the same port if requested
 * @param dnprop the datanode to restart
 * @param keepPort whether to use the same port 
 * @return true if restarting is successful
 * @throws IOException
 */
public synchronized boolean restartDataNode(DataNodeProperties dnprop,
    boolean keepPort) throws IOException {
  Configuration conf = dnprop.conf;
  String[] args = dnprop.dnArgs;
  SecureResources secureResources = dnprop.secureResources;
  Configuration newconf = new HdfsConfiguration(conf); // save cloned config
  if (keepPort) {
    InetSocketAddress addr = dnprop.datanode.getXferAddress();
    conf.set(DFS_DATANODE_ADDRESS_KEY, 
        addr.getAddress().getHostAddress() + ":" + addr.getPort());
    conf.set(DFS_DATANODE_IPC_ADDRESS_KEY,
        addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort); 
  }
  DataNode newDn = DataNode.createDataNode(args, conf, secureResources);
  dataNodes.add(new DataNodeProperties(
      newDn, newconf, args, secureResources, newDn.getIpcPort()));
  numDataNodes++;
  return true;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:MiniDFSCluster.java

示例2: testDataDirectories

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
/**
 * Test that a data-node does not start if configuration specifies
 * incorrect URI scheme in data directory.
 * Test that a data-node starts if data directory is specified as
 * URI = "file:///path" or as a non URI path.
 */
@Test
public void testDataDirectories() throws IOException {
  File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
  Configuration conf = cluster.getConfiguration(0);
  // 1. Test unsupported schema. Only "file:" is supported.
  String dnDir = makeURI("shv", null, fileAsURI(dataDir).getPath());
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dnDir);
  DataNode dn = null;
  try {
    dn = DataNode.createDataNode(new String[]{}, conf);
    fail();
  } catch(Exception e) {
    // expecting exception here
  } finally {
    if (dn != null) {
      dn.shutdown();
    }
  }
  assertNull("Data-node startup should have failed.", dn);

  // 2. Test "file:" schema and no schema (path-only). Both should work.
  String dnDir1 = fileAsURI(dataDir).toString() + "1";
  String dnDir2 = makeURI("file", "localhost",
                  fileAsURI(dataDir).getPath() + "2");
  String dnDir3 = dataDir.getAbsolutePath() + "3";
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
              dnDir1 + "," + dnDir2 + "," + dnDir3);
  try {
    cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
    assertTrue("Data-node should startup.", cluster.isDataNodeUp());
  } finally {
    if (cluster != null) {
      cluster.shutdownDataNodes();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestDatanodeConfig.java

示例3: startDataNode

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
/**
 * Start the datanode.
 */
public DataNode startDataNode(int index, Configuration config) 
throws IOException {
  File dataNodeDir = new File(TEST_DATA_DIR, "data-" + index);
  config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataNodeDir.getPath());

  String[] args = new String[] {};
  // NameNode will modify config with the ports it bound to
  return DataNode.createDataNode(args, config);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestHDFSServerPorts.java

示例4: canStartDataNode

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
/**
 * Check whether the datanode can be started.
 */
private boolean canStartDataNode(Configuration conf) throws IOException {
  DataNode dn = null;
  try {
    dn = DataNode.createDataNode(new String[]{}, conf);
  } catch(IOException e) {
    if (e instanceof java.net.BindException)
      return false;
    throw e;
  } finally {
    if(dn != null) dn.shutdown();
  }
  return true;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestHDFSServerPorts.java

示例5: testDataDirectories

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
/**
 * Test that a data-node does not start if configuration specifies
 * incorrect URI scheme in data directory.
 * Test that a data-node starts if data directory is specified as
 * URI = "file:///path" or as a non URI path.
 */
@Test
public void testDataDirectories() throws IOException {
  File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
  Configuration conf = cluster.getConfiguration(0);
  // 1. Test unsupported ecPolicy. Only "file:" is supported.
  String dnDir = makeURI("shv", null, fileAsURI(dataDir).getPath());
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dnDir);
  DataNode dn = null;
  try {
    dn = DataNode.createDataNode(new String[]{}, conf);
    fail();
  } catch(Exception e) {
    // expecting exception here
  } finally {
    if (dn != null) {
      dn.shutdown();
    }
  }
  assertNull("Data-node startup should have failed.", dn);

  // 2. Test "file:" ecPolicy and no ecPolicy (path-only). Both should work.
  String dnDir1 = fileAsURI(dataDir).toString() + "1";
  String dnDir2 = makeURI("file", "localhost",
                  fileAsURI(dataDir).getPath() + "2");
  String dnDir3 = dataDir.getAbsolutePath() + "3";
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
              dnDir1 + "," + dnDir2 + "," + dnDir3);
  try {
    cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
    assertTrue("Data-node should startup.", cluster.isDataNodeUp());
  } finally {
    if (cluster != null) {
      cluster.shutdownDataNodes();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:43,代码来源:TestDatanodeConfig.java

示例6: testMemlockLimit

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testMemlockLimit() throws Exception {
  assumeTrue(NativeIO.isAvailable());
  final long memlockLimit =
      NativeIO.POSIX.getCacheManipulator().getMemlockLimit();

  // Can't increase the memlock limit past the maximum.
  assumeTrue(memlockLimit != Long.MAX_VALUE);

  File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
  Configuration conf = cluster.getConfiguration(0);
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
    makeURI("file", null, fileAsURI(dataDir).getPath()));
  long prevLimit = conf.
      getLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
          DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT);
  DataNode dn = null;
  try {
    // Try starting the DN with limit configured to the ulimit
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
        memlockLimit);
    dn = DataNode.createDataNode(new String[]{},  conf);
    dn.shutdown();
    dn = null;
    // Try starting the DN with a limit > ulimit
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
        memlockLimit+1);
    try {
      dn = DataNode.createDataNode(new String[]{}, conf);
    } catch (RuntimeException e) {
      GenericTestUtils.assertExceptionContains(
          "more than the datanode's available RLIMIT_MEMLOCK", e);
    }
  } finally {
    if (dn != null) {
      dn.shutdown();
    }
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
        prevLimit);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestDatanodeConfig.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.DataNode.createDataNode方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。