当前位置: 首页>>代码示例>>Java>>正文


Java DatanodeDescriptor类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor的典型用法代码示例。如果您正苦于以下问题:Java DatanodeDescriptor类的具体用法?Java DatanodeDescriptor怎么用?Java DatanodeDescriptor使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


DatanodeDescriptor类属于org.apache.hadoop.hdfs.server.blockmanagement包,在下文中一共展示了DatanodeDescriptor类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkPendingCachedEmpty

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; //导入依赖的package包/类
/**
 * Check that the NameNode is not attempting to cache anything.
 */
private void checkPendingCachedEmpty(MiniDFSCluster cluster)
    throws Exception {
  cluster.getNamesystem().readLock();
  try {
    final DatanodeManager datanodeManager =
        cluster.getNamesystem().getBlockManager().getDatanodeManager();
    for (DataNode dn : cluster.getDataNodes()) {
      DatanodeDescriptor descriptor =
          datanodeManager.getDatanode(dn.getDatanodeId());
      Assert.assertTrue("Pending cached list of " + descriptor +
              " is not empty, "
              + Arrays.toString(descriptor.getPendingCached().toArray()), 
          descriptor.getPendingCached().isEmpty());
    }
  } finally {
    cluster.getNamesystem().readUnlock();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestCacheDirectives.java

示例2: datanodeReport

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; //导入依赖的package包/类
DatanodeInfo[] datanodeReport(final DatanodeReportType type
    ) throws AccessControlException, StandbyException {
  checkSuperuserPrivilege();
  checkOperation(OperationCategory.UNCHECKED);
  readLock();
  try {
    checkOperation(OperationCategory.UNCHECKED);
    final DatanodeManager dm = getBlockManager().getDatanodeManager();      
    final List<DatanodeDescriptor> results = dm.getDatanodeListForReport(type);

    DatanodeInfo[] arr = new DatanodeInfo[results.size()];
    for (int i=0; i<arr.length; i++) {
      arr[i] = new DatanodeInfo(results.get(i));
    }
    return arr;
  } finally {
    readUnlock();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:FSNamesystem.java

示例3: getDatanodeStorageReport

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; //导入依赖的package包/类
DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type
    ) throws AccessControlException, StandbyException {
  checkSuperuserPrivilege();
  checkOperation(OperationCategory.UNCHECKED);
  readLock();
  try {
    checkOperation(OperationCategory.UNCHECKED);
    final DatanodeManager dm = getBlockManager().getDatanodeManager();      
    final List<DatanodeDescriptor> datanodes = dm.getDatanodeListForReport(type);

    DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()];
    for (int i = 0; i < reports.length; i++) {
      final DatanodeDescriptor d = datanodes.get(i);
      reports[i] = new DatanodeStorageReport(new DatanodeInfo(d),
          d.getStorageReports());
    }
    return reports;
  } finally {
    readUnlock();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:FSNamesystem.java

示例4: getDeadNodes

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; //导入依赖的package包/类
/**
 * Returned information is a JSON representation of map with host name as the
 * key and value is a map of dead node attribute keys to its values
 */
@Override // NameNodeMXBean
public String getDeadNodes() {
  final Map<String, Map<String, Object>> info = 
    new HashMap<String, Map<String, Object>>();
  final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
  blockManager.getDatanodeManager().fetchDatanodes(null, dead, true);
  for (DatanodeDescriptor node : dead) {
    Map<String, Object> innerinfo = ImmutableMap.<String, Object>builder()
        .put("lastContact", getLastContact(node))
        .put("decommissioned", node.isDecommissioned())
        .put("xferaddr", node.getXferAddr())
        .build();
    info.put(node.getHostName() + ":" + node.getXferPort(), innerinfo);
  }
  return JSON.toString(info);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:FSNamesystem.java

示例5: getDecomNodes

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; //导入依赖的package包/类
/**
 * Returned information is a JSON representation of map with host name as the
 * key and value is a map of decommissioning node attribute keys to its
 * values
 */
@Override // NameNodeMXBean
public String getDecomNodes() {
  final Map<String, Map<String, Object>> info = 
    new HashMap<String, Map<String, Object>>();
  final List<DatanodeDescriptor> decomNodeList = blockManager.getDatanodeManager(
      ).getDecommissioningNodes();
  for (DatanodeDescriptor node : decomNodeList) {
    Map<String, Object> innerinfo = ImmutableMap
        .<String, Object> builder()
        .put("xferaddr", node.getXferAddr())
        .put("underReplicatedBlocks",
            node.decommissioningStatus.getUnderReplicatedBlocks())
        .put("decommissionOnlyReplicas",
            node.decommissioningStatus.getDecommissionOnlyReplicas())
        .put("underReplicateInOpenFiles",
            node.decommissioningStatus.getUnderReplicatedInOpenFiles())
        .build();
    info.put(node.getHostName() + ":" + node.getXferPort(), innerinfo);
  }
  return JSON.toString(info);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:FSNamesystem.java

示例6: testCreateInvalidTopology

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; //导入依赖的package包/类
@Test
public void testCreateInvalidTopology() throws Exception {
  NetworkTopology invalCluster = new NetworkTopology();
  DatanodeDescriptor invalDataNodes[] = new DatanodeDescriptor[] {
      DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
      DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
      DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1")
  };
  invalCluster.add(invalDataNodes[0]);
  invalCluster.add(invalDataNodes[1]);
  try {
    invalCluster.add(invalDataNodes[2]);
    fail("expected InvalidTopologyException");
  } catch (NetworkTopology.InvalidTopologyException e) {
    assertTrue(e.getMessage().startsWith("Failed to add "));
    assertTrue(e.getMessage().contains(
        "You cannot have a rack and a non-rack node at the same " +
        "level of the network topology."));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestNetworkTopology.java

示例7: getExpectedPrimaryNode

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; //导入依赖的package包/类
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfoContiguous storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoContiguousUnderConstruction);
  BlockInfoContiguousUnderConstruction ucBlock =
    (BlockInfoContiguousUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor()
      .getLastUpdateMonotonic();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor()
        .getLastUpdateMonotonic();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:DFSTestUtil.java

示例8: checkFailuresAtNameNode

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; //导入依赖的package包/类
/**
 * Checks NameNode tracking of a particular DataNode for correct reporting of
 * failed volumes.
 *
 * @param dm DatanodeManager to check
 * @param dn DataNode to check
 * @param expectCapacityKnown if true, then expect that the capacities of the
 *     volumes were known before the failures, and therefore the lost capacity
 *     can be reported
 * @param expectedFailedVolumes expected locations of failed volumes
 * @throws Exception if there is any failure
 */
private void checkFailuresAtNameNode(DatanodeManager dm, DataNode dn,
    boolean expectCapacityKnown, String... expectedFailedVolumes)
    throws Exception {
  DatanodeDescriptor dd = cluster.getNamesystem().getBlockManager()
      .getDatanodeManager().getDatanode(dn.getDatanodeId());
  assertEquals(expectedFailedVolumes.length, dd.getVolumeFailures());
  VolumeFailureSummary volumeFailureSummary = dd.getVolumeFailureSummary();
  if (expectedFailedVolumes.length > 0) {
    assertArrayEquals(expectedFailedVolumes, volumeFailureSummary
        .getFailedStorageLocations());
    assertTrue(volumeFailureSummary.getLastVolumeFailureDate() > 0);
    long expectedCapacityLost = getExpectedCapacityLost(expectCapacityKnown,
        expectedFailedVolumes.length);
    assertEquals(expectedCapacityLost,
        volumeFailureSummary.getEstimatedCapacityLostTotal());
  } else {
    assertNull(volumeFailureSummary);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestDataNodeVolumeFailureReporting.java

示例9: getDeadNodes

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; //导入依赖的package包/类
/**
 * Returned information is a JSON representation of map with host name as the
 * key and value is a map of dead node attribute keys to its values
 */
@Override // NameNodeMXBean
public String getDeadNodes() {
  final Map<String, Map<String, Object>> info = 
    new HashMap<String, Map<String, Object>>();
  final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
  blockManager.getDatanodeManager().fetchDatanodes(null, dead, false);
  for (DatanodeDescriptor node : dead) {
    Map<String, Object> innerinfo = ImmutableMap.<String, Object>builder()
        .put("lastContact", getLastContact(node))
        .put("decommissioned", node.isDecommissioned())
        .put("xferaddr", node.getXferAddr())
        .build();
    info.put(node.getHostName() + ":" + node.getXferPort(), innerinfo);
  }
  return JSON.toString(info);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:21,代码来源:FSNamesystem.java

示例10: getExpectedPrimaryNode

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; //导入依赖的package包/类
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock, !storedBlock.isComplete());
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = storedBlock
      .getUnderConstructionFeature().getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor()
      .getLastUpdateMonotonic();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor()
        .getLastUpdateMonotonic();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:29,代码来源:DFSTestUtil.java

示例11: waitFirstBRCompleted

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; //导入依赖的package包/类
/** Wait until the given namenode gets first block reports from all the datanodes */
public void waitFirstBRCompleted(int nnIndex, int timeout) throws
    IOException, TimeoutException, InterruptedException {
  if (namenodes.size() == 0 || getNN(nnIndex) == null || getNN(nnIndex).nameNode == null) {
    return;
  }

  final FSNamesystem ns = getNamesystem(nnIndex);
  final DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      List<DatanodeDescriptor> nodes = dm.getDatanodeListForReport
          (DatanodeReportType.LIVE);
      for (DatanodeDescriptor node : nodes) {
        if (!node.checkBlockReportReceived()) {
          return false;
        }
      }
      return true;
    }
  }, 100, timeout);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:MiniDFSCluster.java

示例12: testPlacementWithLocalRackNodesDecommissioned

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; //导入依赖的package包/类
/**
 * Verify decommissioned nodes should not be selected.
 */
@Test
public void testPlacementWithLocalRackNodesDecommissioned() throws Exception {
  String clientMachine = "client.foo.com";
  // Map client to RACK3
  String clientRack = "/RACK3";
  StaticMapping.addNodeToRack(clientMachine, clientRack);
  final DatanodeManager dnm = namesystem.getBlockManager().getDatanodeManager();
  DatanodeDescriptor dnd3 = dnm.getDatanode(
      cluster.getDataNodes().get(3).getDatanodeId());
  assertEquals(dnd3.getNetworkLocation(), clientRack);
  dnm.getDecomManager().startDecommission(dnd3);
  try {
    testPlacement(clientMachine, clientRack, false);
  } finally {
    dnm.getDecomManager().stopDecommission(dnd3);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:21,代码来源:TestDefaultBlockPlacementPolicy.java

示例13: waitForBlockReport

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; //导入依赖的package包/类
private boolean waitForBlockReport(final DataNode dn,
    final DatanodeDescriptor dnd) throws Exception {
  final DatanodeStorageInfo storage = dnd.getStorageInfos()[0];
  final long lastCount = storage.getBlockReportCount();
  dn.triggerBlockReport(
      new BlockReportOptions.Factory().setIncremental(false).build());
  try {
    GenericTestUtils.waitFor(new Supplier<Boolean>() {
      @Override
      public Boolean get() {
        return lastCount != storage.getBlockReportCount();
      }
    }, 10, 100);
  } catch (TimeoutException te) {
    return false;
  }
  return true;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:19,代码来源:TestDatanodeRegistration.java

示例14: getNumDecomLiveDataNodes

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; //导入依赖的package包/类
@Override // FSNamesystemMBean
public int getNumDecomLiveDataNodes() {
  final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
  getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true);
  int liveDecommissioned = 0;
  for (DatanodeDescriptor node : live) {
    liveDecommissioned += node.isDecommissioned() ? 1 : 0;
  }
  return liveDecommissioned;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:FSNamesystem.java

示例15: getNumDecomDeadDataNodes

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; //导入依赖的package包/类
@Override // FSNamesystemMBean
public int getNumDecomDeadDataNodes() {
  final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
  getBlockManager().getDatanodeManager().fetchDatanodes(null, dead, true);
  int deadDecommissioned = 0;
  for (DatanodeDescriptor node : dead) {
    deadDecommissioned += node.isDecommissioned() ? 1 : 0;
  }
  return deadDecommissioned;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:FSNamesystem.java


注:本文中的org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。