当前位置: 首页>>代码示例>>Java>>正文


Java DatanodeStorageInfo.getDatanodeDescriptor方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.getDatanodeDescriptor方法的典型用法代码示例。如果您正苦于以下问题:Java DatanodeStorageInfo.getDatanodeDescriptor方法的具体用法?Java DatanodeStorageInfo.getDatanodeDescriptor怎么用?Java DatanodeStorageInfo.getDatanodeDescriptor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo的用法示例。


在下文中一共展示了DatanodeStorageInfo.getDatanodeDescriptor方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getExpectedPrimaryNode

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入方法依赖的package包/类
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfoContiguous storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoContiguousUnderConstruction);
  BlockInfoContiguousUnderConstruction ucBlock =
    (BlockInfoContiguousUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor()
      .getLastUpdateMonotonic();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor()
        .getLastUpdateMonotonic();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:DFSTestUtil.java

示例2: getExpectedPrimaryNode

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入方法依赖的package包/类
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock, !storedBlock.isComplete());
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = storedBlock
      .getUnderConstructionFeature().getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor()
      .getLastUpdateMonotonic();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor()
        .getLastUpdateMonotonic();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:29,代码来源:DFSTestUtil.java

示例3: getExpectedPrimaryNode

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入方法依赖的package包/类
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoUnderConstruction);
  BlockInfoUnderConstruction ucBlock =
    (BlockInfoUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor().getLastUpdate();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor().getLastUpdate();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:29,代码来源:DFSTestUtil.java

示例4: getExpectedPrimaryNode

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入方法依赖的package包/类
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
private DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoUnderConstruction);
  BlockInfoUnderConstruction ucBlock =
    (BlockInfoUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor().getLastUpdate();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor().getLastUpdate();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:29,代码来源:TestPipelinesFailover.java

示例5: getReplicaInfo

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入方法依赖的package包/类
/**
 * Display info of each replica for replication block.
 * For striped block group, display info of each internal block.
 */
private String getReplicaInfo(BlockInfo storedBlock) {
  if (!(showLocations || showRacks || showReplicaDetails)) {
    return "";
  }
  final boolean isComplete = storedBlock.isComplete();
  DatanodeStorageInfo[] storages = isComplete ?
      blockManager.getStorages(storedBlock) :
      storedBlock.getUnderConstructionFeature().getExpectedStorageLocations();
  StringBuilder sb = new StringBuilder(" [");

  for (int i = 0; i < storages.length; i++) {
    DatanodeStorageInfo storage = storages[i];
    DatanodeDescriptor dnDesc = storage.getDatanodeDescriptor();
    if (showRacks) {
      sb.append(NodeBase.getPath(dnDesc));
    } else {
      sb.append(new DatanodeInfoWithStorage(dnDesc, storage.getStorageID(),
          storage.getStorageType()));
    }
    if (showReplicaDetails) {
      LightWeightHashSet<BlockInfo> blocksExcess =
          blockManager.excessReplicateMap.get(dnDesc.getDatanodeUuid());
      Collection<DatanodeDescriptor> corruptReplicas =
          blockManager.getCorruptReplicas(storedBlock);
      sb.append("(");
      if (dnDesc.isDecommissioned()) {
        sb.append("DECOMMISSIONED)");
      } else if (dnDesc.isDecommissionInProgress()) {
        sb.append("DECOMMISSIONING)");
      } else if (corruptReplicas != null
          && corruptReplicas.contains(dnDesc)) {
        sb.append("CORRUPT)");
      } else if (blocksExcess != null
          && blocksExcess.contains(storedBlock)) {
        sb.append("EXCESS)");
      } else if (dnDesc.isStale(this.staleInterval)) {
        sb.append("STALE_NODE)");
      } else if (storage.areBlockContentsStale()) {
        sb.append("STALE_BLOCK_CONTENT)");
      } else {
        sb.append("LIVE)");
      }
    }
    if (i < storages.length - 1) {
      sb.append(", ");
    }
  }
  sb.append(']');
  return sb.toString();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:55,代码来源:NamenodeFsck.java


注:本文中的org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.getDatanodeDescriptor方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。