当前位置: 首页>>代码示例>>Java>>正文


Java NameNodeAdapter.getDatanode方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.getDatanode方法的典型用法代码示例。如果您正苦于以下问题:Java NameNodeAdapter.getDatanode方法的具体用法?Java NameNodeAdapter.getDatanode怎么用?Java NameNodeAdapter.getDatanode使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter的用法示例。


在下文中一共展示了NameNodeAdapter.getDatanode方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setDataNodeDead

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
/**
 * Expire a DataNode heartbeat on the NameNode
 * @param dnId
 * @throws IOException
 */
public void setDataNodeDead(DatanodeID dnId) throws IOException {
  DatanodeDescriptor dnd =
      NameNodeAdapter.getDatanode(getNamesystem(), dnId);
  DFSTestUtil.setDatanodeDead(dnd);
  BlockManagerTestUtil.checkHeartbeat(getNamesystem().getBlockManager());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:MiniDFSCluster.java

示例2: testClusterStats

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
public void testClusterStats(int numNameNodes) throws IOException,
    InterruptedException {
  LOG.info("Starting test testClusterStats");
  int numDatanodes = 1;
  startCluster(numNameNodes, numDatanodes, conf);
  
  for (int i = 0; i < numNameNodes; i++) {
    FileSystem fileSys = cluster.getFileSystem(i);
    Path file = new Path("testClusterStats.dat");
    writeFile(fileSys, file, 1);
    
    FSNamesystem fsn = cluster.getNamesystem(i);
    NameNode namenode = cluster.getNameNode(i);
    
    DatanodeInfo decomInfo = decommissionNode(i, null, null,
        AdminStates.DECOMMISSION_INPROGRESS);
    DataNode decomNode = getDataNode(decomInfo);
    // Check namenode stats for multiple datanode heartbeats
    verifyStats(namenode, fsn, decomInfo, decomNode, true);
    
    // Stop decommissioning and verify stats
    writeConfigFile(excludeFile, null);
    refreshNodes(fsn, conf);
    DatanodeInfo retInfo = NameNodeAdapter.getDatanode(fsn, decomInfo);
    DataNode retNode = getDataNode(decomInfo);
    waitNodeState(retInfo, AdminStates.NORMAL);
    verifyStats(namenode, fsn, retInfo, retNode, false);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestDecommission.java

示例3: testInvalidateBlock

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
@Test
public void testInvalidateBlock() throws IOException {
  final Path file = new Path("/invalidate");
  final int length = 10;
  final byte[] bytes = StripedFileTestUtil.generateBytes(length);
  DFSTestUtil.writeFile(fs, file, bytes);

  int dnIndex = findFirstDataNode(file, cellSize * dataBlocks);
  Assert.assertNotEquals(-1, dnIndex);
  LocatedStripedBlock slb = (LocatedStripedBlock)fs.getClient()
      .getLocatedBlocks(file.toString(), 0, cellSize * dataBlocks).get(0);
  final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(slb,
      cellSize, dataBlocks, parityBlocks);
  final Block b = blks[0].getBlock().getLocalBlock();

  DataNode dn = cluster.getDataNodes().get(dnIndex);
  // disable the heartbeat from DN so that the invalidated block record is kept
  // in NameNode until heartbeat expires and NN mark the dn as dead
  DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);

  try {
    // delete the file
    fs.delete(file, true);
    // check the block is added to invalidateBlocks
    final FSNamesystem fsn = cluster.getNamesystem();
    final BlockManager bm = fsn.getBlockManager();
    DatanodeDescriptor dnd = NameNodeAdapter.getDatanode(fsn, dn.getDatanodeId());
    Assert.assertTrue(bm.containsInvalidateBlock(
        blks[0].getLocations()[0], b) || dnd.containsInvalidateBlock(b));
  } finally {
    DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:34,代码来源:TestReadStripedFileWithDecoding.java

示例4: decommissionNode

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
private DatanodeInfo decommissionNode(int nnIndex,
                                String datanodeUuid,
                                ArrayList<DatanodeInfo>decommissionedNodes,
                                AdminStates waitForState)
  throws IOException {
  DFSClient client = getDfsClient(cluster.getNameNode(nnIndex), conf);
  DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);

  //
  // pick one datanode randomly unless the caller specifies one.
  //
  int index = 0;
  if (datanodeUuid == null) {
    boolean found = false;
    while (!found) {
      index = myrand.nextInt(info.length);
      if (!info[index].isDecommissioned()) {
        found = true;
      }
    }
  } else {
    // The caller specifies a DN
    for (; index < info.length; index++) {
      if (info[index].getDatanodeUuid().equals(datanodeUuid)) {
        break;
      }
    }
    if (index == info.length) {
      throw new IOException("invalid datanodeUuid " + datanodeUuid);
    }
  }
  String nodename = info[index].getXferAddr();
  LOG.info("Decommissioning node: " + nodename);

  // write nodename into the exclude file.
  ArrayList<String> nodes = new ArrayList<String>();
  if (decommissionedNodes != null) {
    for (DatanodeInfo dn : decommissionedNodes) {
      nodes.add(dn.getName());
    }
  }
  nodes.add(nodename);
  writeConfigFile(excludeFile, nodes);
  refreshNodes(cluster.getNamesystem(nnIndex), conf);
  DatanodeInfo ret = NameNodeAdapter.getDatanode(
      cluster.getNamesystem(nnIndex), info[index]);
  waitNodeState(ret, waitForState);
  return ret;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:TestDecommission.java

示例5: testStorageWithRemainingCapacity

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
/**
 * Tests that a namenode doesn't choose a datanode with full disks to 
 * store blocks.
 * @throws Exception
 */
@Test
public void testStorageWithRemainingCapacity() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = FileSystem.get(conf);
  Path file1 = null;
  try {
    cluster.waitActive();
    final FSNamesystem namesystem = cluster.getNamesystem();
    final String poolId = namesystem.getBlockPoolId();
    final DatanodeRegistration nodeReg =
      DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().
      		get(0), poolId);
    final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem,
  		  nodeReg);
    // By default, MiniDFSCluster will create 1 datanode with 2 storages.
    // Assigning 64k for remaining storage capacity and will 
    //create a file with 100k.
    for(DatanodeStorageInfo storage:  dd.getStorageInfos()) { 
  	  storage.setUtilizationForTesting(65536, 0, 65536, 0);
    }
    //sum of the remaining capacity of both the storages
    dd.setRemaining(131072);
    file1 = new Path("testRemainingStorage.dat");
    try {
      DFSTestUtil.createFile(fs, file1, 102400, 102400, 102400, (short)1,
      		0x1BAD5EED);
    }
    catch (RemoteException re) {
  	  GenericTestUtils.assertExceptionContains("nodes instead of "
  	  		+ "minReplication", re);
    }
  }
  finally {
    // Clean up
    assertTrue(fs.exists(file1));
    fs.delete(file1, true);
    assertTrue(!fs.exists(file1));
    cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:47,代码来源:TestBlockManager.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.getDatanode方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。