当前位置: 首页>>代码示例>>Java>>正文


Java DataNode.shutdown方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown方法的典型用法代码示例。如果您正苦于以下问题:Java DataNode.shutdown方法的具体用法?Java DataNode.shutdown怎么用?Java DataNode.shutdown使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.datanode.DataNode的用法示例。


在下文中一共展示了DataNode.shutdown方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: stopDataNodes

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
private void stopDataNodes(BlockLocation[] locs, int[] datanodes)
    throws IOException {
  if (locs != null && locs.length > 0) {
    for (int failedDNIdx : datanodes) {
      String name = (locs[0].getNames())[failedDNIdx];
      for (DataNode dn : cluster.getDataNodes()) {
        int port = dn.getXferPort();
        if (name.contains(Integer.toString(port))) {
          dn.shutdown();
          cluster.setDataNodeDead(dn.getDatanodeId());
          LOG.info("stop datanode " + failedDNIdx);
          break;
        }
      }
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:18,代码来源:TestReadStripedFileWithMissingBlocks.java

示例2: shutdownDataNodes

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
/**
 * Shutdown all DataNodes started by this class.  The NameNode
 * is left running so that new DataNodes may be started.
 */
public void shutdownDataNodes() {
  for (int i = dataNodes.size()-1; i >= 0; i--) {
    LOG.info("Shutting down DataNode " + i);
    DataNode dn = dataNodes.remove(i).datanode;
    dn.shutdown();
    numDataNodes--;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:MiniDFSCluster.java

示例3: stopDataNode

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
public synchronized DataNodeProperties stopDataNode(int i) {
  if (i < 0 || i >= dataNodes.size()) {
    return null;
  }
  DataNodeProperties dnprop = dataNodes.remove(i);
  DataNode dn = dnprop.datanode;
  LOG.info("MiniDFSCluster Stopping DataNode " +
                     dn.getDisplayName() +
                     " from a total of " + (dataNodes.size() + 1) + 
                     " datanodes.");
  dn.shutdown();
  numDataNodes--;
  return dnprop;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:MiniDFSCluster.java

示例4: testEncryptedAppendRequiringBlockTransfer

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
@Test
public void testEncryptedAppendRequiringBlockTransfer() throws IOException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    setEncryptionConfigKeys(conf);
    
    // start up 4 DNs
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    
    FileSystem fs = getFileSystem(conf);
    
    // Create a file with replication 3, so its block is on 3 / 4 DNs.
    writeTestDataToFile(fs);
    assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
    
    // Shut down one of the DNs holding a block replica.
    FSDataInputStream in = fs.open(TEST_PATH);
    List<LocatedBlock> locatedBlocks = DFSTestUtil.getAllBlocks(in);
    in.close();
    assertEquals(1, locatedBlocks.size());
    assertEquals(3, locatedBlocks.get(0).getLocations().length);
    DataNode dn = cluster.getDataNode(locatedBlocks.get(0).getLocations()[0].getIpcPort());
    dn.shutdown();
    
    // Reopen the file for append, which will need to add another DN to the
    // pipeline and in doing so trigger a block transfer.
    writeTestDataToFile(fs);
    assertEquals(PLAIN_TEXT + PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
    
    fs.close();
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:TestEncryptedTransfer.java

示例5: testDataDirectories

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
/**
 * Test that a data-node does not start if configuration specifies
 * incorrect URI scheme in data directory.
 * Test that a data-node starts if data directory is specified as
 * URI = "file:///path" or as a non URI path.
 */
@Test
public void testDataDirectories() throws IOException {
  File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
  Configuration conf = cluster.getConfiguration(0);
  // 1. Test unsupported schema. Only "file:" is supported.
  String dnDir = makeURI("shv", null, fileAsURI(dataDir).getPath());
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dnDir);
  DataNode dn = null;
  try {
    dn = DataNode.createDataNode(new String[]{}, conf);
    fail();
  } catch(Exception e) {
    // expecting exception here
  } finally {
    if (dn != null) {
      dn.shutdown();
    }
  }
  assertNull("Data-node startup should have failed.", dn);

  // 2. Test "file:" schema and no schema (path-only). Both should work.
  String dnDir1 = fileAsURI(dataDir).toString() + "1";
  String dnDir2 = makeURI("file", "localhost",
                  fileAsURI(dataDir).getPath() + "2");
  String dnDir3 = dataDir.getAbsolutePath() + "3";
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
              dnDir1 + "," + dnDir2 + "," + dnDir3);
  try {
    cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
    assertTrue("Data-node should startup.", cluster.isDataNodeUp());
  } finally {
    if (cluster != null) {
      cluster.shutdownDataNodes();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestDatanodeConfig.java

示例6: canStartDataNode

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
/**
 * Check whether the datanode can be started.
 */
private boolean canStartDataNode(Configuration conf) throws IOException {
  DataNode dn = null;
  try {
    dn = DataNode.createDataNode(new String[]{}, conf);
  } catch(IOException e) {
    if (e instanceof java.net.BindException)
      return false;
    throw e;
  } finally {
    if(dn != null) dn.shutdown();
  }
  return true;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestHDFSServerPorts.java

示例7: stopDataNode

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
private void stopDataNode(Path path, int failedDNIdx)
    throws IOException {
  BlockLocation[] locs = fs.getFileBlockLocations(path, 0, cellSize);
  if (locs != null && locs.length > 0) {
    String name = (locs[0].getNames())[failedDNIdx];
    for (DataNode dn : cluster.getDataNodes()) {
      int port = dn.getXferPort();
      if (name.contains(Integer.toString(port))) {
        dn.shutdown();
        break;
      }
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:15,代码来源:TestWriteReadStripedFile.java

示例8: testDeadNodeAsBlockTarget

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
@Test
public void testDeadNodeAsBlockTarget() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  cluster.waitActive();

  String poolId = cluster.getNamesystem().getBlockPoolId();
  // wait for datanode to be marked live
  DataNode dn = cluster.getDataNodes().get(0);
  DatanodeRegistration reg = DataNodeTestUtils.getDNRegistrationForBP(cluster
      .getDataNodes().get(0), poolId);
  // Get the updated datanode descriptor
  BlockManager bm = cluster.getNamesystem().getBlockManager();
  DatanodeManager dm = bm.getDatanodeManager();
  Node clientNode = dm.getDatanode(reg);

  DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true,
      20000);

  // Shutdown and wait for datanode to be marked dead
  dn.shutdown();
  DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false,
      20000);
  // Get the updated datanode descriptor available in DNM
  // choose the targets, but local node should not get selected as this is not
  // part of the cluster anymore
  DatanodeStorageInfo[] results = bm.chooseTarget4NewBlock("/hello", 3,
      clientNode, new HashSet<Node>(), 256 * 1024 * 1024L, null, (byte) 7,
      false);
  for (DatanodeStorageInfo datanodeStorageInfo : results) {
    assertFalse("Dead node should not be choosen", datanodeStorageInfo
        .getDatanodeDescriptor().equals(clientNode));
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:37,代码来源:TestDeadDatanode.java

示例9: testDataDirectories

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
/**
 * Test that a data-node does not start if configuration specifies
 * incorrect URI scheme in data directory.
 * Test that a data-node starts if data directory is specified as
 * URI = "file:///path" or as a non URI path.
 */
@Test
public void testDataDirectories() throws IOException {
  File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
  Configuration conf = cluster.getConfiguration(0);
  // 1. Test unsupported ecPolicy. Only "file:" is supported.
  String dnDir = makeURI("shv", null, fileAsURI(dataDir).getPath());
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dnDir);
  DataNode dn = null;
  try {
    dn = DataNode.createDataNode(new String[]{}, conf);
    fail();
  } catch(Exception e) {
    // expecting exception here
  } finally {
    if (dn != null) {
      dn.shutdown();
    }
  }
  assertNull("Data-node startup should have failed.", dn);

  // 2. Test "file:" ecPolicy and no ecPolicy (path-only). Both should work.
  String dnDir1 = fileAsURI(dataDir).toString() + "1";
  String dnDir2 = makeURI("file", "localhost",
                  fileAsURI(dataDir).getPath() + "2");
  String dnDir3 = dataDir.getAbsolutePath() + "3";
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
              dnDir1 + "," + dnDir2 + "," + dnDir3);
  try {
    cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
    assertTrue("Data-node should startup.", cluster.isDataNodeUp());
  } finally {
    if (cluster != null) {
      cluster.shutdownDataNodes();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:43,代码来源:TestDatanodeConfig.java

示例10: testReadWithDNFailure

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
private void testReadWithDNFailure(int fileLength, int dnFailureNum)
    throws Exception {
  String fileType = fileLength < (blockSize * dataBlocks) ?
      "smallFile" : "largeFile";
  String src = "/dnFailure_" + dnFailureNum + "_" + fileType;
  LOG.info("testReadWithDNFailure: file = " + src
      + ", fileSize = " + fileLength
      + ", dnFailureNum = " + dnFailureNum);

  Path testPath = new Path(src);
  final byte[] bytes = StripedFileTestUtil.generateBytes(fileLength);
  DFSTestUtil.writeFile(fs, testPath, bytes);
  StripedFileTestUtil.waitBlockGroupsReported(fs, src);

  // shut down the DN that holds an internal data block
  BlockLocation[] locs = fs.getFileBlockLocations(testPath, cellSize * 5,
      cellSize);
  for (int failedDnIdx = 0; failedDnIdx < dnFailureNum; failedDnIdx++) {
    String name = (locs[0].getNames())[failedDnIdx];
    for (DataNode dn : cluster.getDataNodes()) {
      int port = dn.getXferPort();
      if (name.contains(Integer.toString(port))) {
        dn.shutdown();
      }
    }
  }

  // check file length, pread, stateful read and seek
  verifyRead(testPath, fileLength, bytes);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:31,代码来源:TestReadStripedFileWithDecoding.java

示例11: testAppendInsufficientLocations

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
/**
 * Test that an append with no locations fails with an exception
 * showing insufficient locations.
 */
@Test(timeout = 60000)
public void testAppendInsufficientLocations() throws Exception {
  Configuration conf = new Configuration();

  // lower heartbeat interval for fast recognition of DN
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4)
      .build();
  DistributedFileSystem fileSystem = null;
  try {
    // create a file with replication 3
    fileSystem = cluster.getFileSystem();
    Path f = new Path("/testAppend");
    FSDataOutputStream create = fileSystem.create(f, (short) 2);
    create.write("/testAppend".getBytes());
    create.close();

    // Check for replications
    DFSTestUtil.waitReplication(fileSystem, f, (short) 2);

    // Shut down all DNs that have the last block location for the file
    LocatedBlocks lbs = fileSystem.dfs.getNamenode().
        getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
    List<DataNode> dnsOfCluster = cluster.getDataNodes();
    DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().
        getLocations();
    for( DataNode dn : dnsOfCluster) {
      for(DatanodeInfo loc: dnsWithLocations) {
        if(dn.getDatanodeId().equals(loc)){
          dn.shutdown();
          DFSTestUtil.waitForDatanodeDeath(dn);
        }
      }
    }

    // Wait till 0 replication is recognized
    DFSTestUtil.waitReplication(fileSystem, f, (short) 0);

    // Append to the file, at this state there are 3 live DNs but none of them
    // have the block.
    try{
      fileSystem.append(f);
      fail("Append should fail because insufficient locations");
    } catch (IOException e){
      LOG.info("Expected exception: ", e);
    }
    FSDirectory dir = cluster.getNamesystem().getFSDirectory();
    final INodeFile inode = INodeFile.
        valueOf(dir.getINode("/testAppend"), "/testAppend");
    assertTrue("File should remain closed", !inode.isUnderConstruction());
  } finally {
    if (null != fileSystem) {
      fileSystem.close();
    }
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:66,代码来源:TestFileAppend4.java

示例12: testDatanodeReport

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
/**
 * This test attempts to different types of datanode report.
 */
@Test
public void testDatanodeReport() throws Exception {
  conf.setInt(
      DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); // 0.5s
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
  MiniDFSCluster cluster = 
    new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
  try {
    //wait until the cluster is up
    cluster.waitActive();
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    final List<DataNode> datanodes = cluster.getDataNodes();
    final DFSClient client = cluster.getFileSystem().dfs;

    assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, bpid);
    assertReports(NUM_OF_DATANODES, DatanodeReportType.LIVE, client, datanodes, bpid);
    assertReports(0, DatanodeReportType.DEAD, client, datanodes, bpid);

    // bring down one datanode
    final DataNode last = datanodes.get(datanodes.size() - 1);
    LOG.info("XXX shutdown datanode " + last.getDatanodeUuid());
    last.shutdown();

    DatanodeInfo[] nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
    while (nodeInfo.length != 1) {
      try {
        Thread.sleep(500);
      } catch (Exception e) {
      }
      nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
    }

    assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, null);
    assertReports(NUM_OF_DATANODES - 1, DatanodeReportType.LIVE, client, datanodes, null);
    assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);

    Thread.sleep(5000);
    assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:46,代码来源:TestDatanodeReport.java

示例13: testMemlockLimit

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testMemlockLimit() throws Exception {
  assumeTrue(NativeIO.isAvailable());
  final long memlockLimit =
      NativeIO.POSIX.getCacheManipulator().getMemlockLimit();

  // Can't increase the memlock limit past the maximum.
  assumeTrue(memlockLimit != Long.MAX_VALUE);

  File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
  Configuration conf = cluster.getConfiguration(0);
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
    makeURI("file", null, fileAsURI(dataDir).getPath()));
  long prevLimit = conf.
      getLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
          DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT);
  DataNode dn = null;
  try {
    // Try starting the DN with limit configured to the ulimit
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
        memlockLimit);
    dn = DataNode.createDataNode(new String[]{},  conf);
    dn.shutdown();
    dn = null;
    // Try starting the DN with a limit > ulimit
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
        memlockLimit+1);
    try {
      dn = DataNode.createDataNode(new String[]{}, conf);
    } catch (RuntimeException e) {
      GenericTestUtils.assertExceptionContains(
          "more than the datanode's available RLIMIT_MEMLOCK", e);
    }
  } finally {
    if (dn != null) {
      dn.shutdown();
    }
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
        prevLimit);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestDatanodeConfig.java

示例14: stopDataNode

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
/**
 * Stop the datanode.
 */
public void stopDataNode(DataNode dn) {
  if (dn != null) {
    dn.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:TestHDFSServerPorts.java

示例15: testAppendInsufficientLocations

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
/**
 * Test that an append with no locations fails with an exception
 * showing insufficient locations.
 */
@Test(timeout = 60000)
public void testAppendInsufficientLocations() throws Exception {
  Configuration conf = new Configuration();

  // lower heartbeat interval for fast recognition of DN
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4)
      .build();
  DistributedFileSystem fileSystem = null;
  try {
    // create a file with replication 3
    fileSystem = cluster.getFileSystem();
    Path f = new Path("/testAppend");
    FSDataOutputStream create = fileSystem.create(f, (short) 2);
    create.write("/testAppend".getBytes());
    create.close();

    // Check for replications
    DFSTestUtil.waitReplication(fileSystem, f, (short) 2);

    // Shut down all DNs that have the last block location for the file
    LocatedBlocks lbs = fileSystem.dfs.getNamenode().
        getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
    List<DataNode> dnsOfCluster = cluster.getDataNodes();
    DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().
        getLocations();
    for( DataNode dn : dnsOfCluster) {
      for(DatanodeInfo loc: dnsWithLocations) {
        if(dn.getDatanodeId().equals(loc)){
          dn.shutdown();
          DFSTestUtil.waitForDatanodeDeath(dn);
        }
      }
    }

    // Wait till 0 replication is recognized
    DFSTestUtil.waitReplication(fileSystem, f, (short) 0);

    // Append to the file, at this state there are 3 live DNs but none of them
    // have the block.
    try{
      fileSystem.append(f);
      fail("Append should fail because insufficient locations");
    } catch (IOException e){
      LOG.info("Expected exception: ", e);
    }
    FSDirectory dir = cluster.getNamesystem().getFSDirectory();
    final INodeFile inode = INodeFile.
        valueOf(dir.getINode("/testAppend"), "/testAppend");
    assertTrue("File should remain closed", !inode.isUnderConstruction());
  } finally {
    if (null != fileSystem) {
      fileSystem.close();
    }
    cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:66,代码来源:TestFileAppend4.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。