当前位置: 首页>>代码示例>>Java>>正文


Java DatanodeStorageInfo类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo的典型用法代码示例。如果您正苦于以下问题:Java DatanodeStorageInfo类的具体用法?Java DatanodeStorageInfo怎么用?Java DatanodeStorageInfo使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


DatanodeStorageInfo类属于org.apache.hadoop.hdfs.server.blockmanagement包,在下文中一共展示了DatanodeStorageInfo类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setLastBlock

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入依赖的package包/类
@Override // BlockCollection, the file should be under construction
public BlockInfoContiguousUnderConstruction setLastBlock(
    BlockInfoContiguous lastBlock, DatanodeStorageInfo[] locations)
    throws IOException {
  Preconditions.checkState(isUnderConstruction(),
      "file is no longer under construction");

  if (numBlocks() == 0) {
    throw new IOException("Failed to set last block: File is empty.");
  }
  BlockInfoContiguousUnderConstruction ucBlock =
    lastBlock.convertToBlockUnderConstruction(
        BlockUCState.UNDER_CONSTRUCTION, locations);
  setBlock(numBlocks() - 1, ucBlock);
  return ucBlock;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:INodeFile.java

示例2: BlockCommand

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入依赖的package包/类
/**
 * Create BlockCommand for transferring blocks to another datanode
 * @param blocktargetlist    blocks to be transferred 
 */
public BlockCommand(int action, String poolId,
    List<BlockTargetPair> blocktargetlist) {
  super(action);
  this.poolId = poolId;
  blocks = new Block[blocktargetlist.size()]; 
  targets = new DatanodeInfo[blocks.length][];
  targetStorageTypes = new StorageType[blocks.length][];
  targetStorageIDs = new String[blocks.length][];

  for(int i = 0; i < blocks.length; i++) {
    BlockTargetPair p = blocktargetlist.get(i);
    blocks[i] = p.block;
    targets[i] = DatanodeStorageInfo.toDatanodeInfos(p.targets);
    targetStorageTypes[i] = DatanodeStorageInfo.toStorageTypes(p.targets);
    targetStorageIDs[i] = DatanodeStorageInfo.toStorageIDs(p.targets);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:BlockCommand.java

示例3: createDatanodeStorageInfos

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入依赖的package包/类
public static DatanodeStorageInfo[] createDatanodeStorageInfos(
    int n, String[] racks, String[] hostnames, StorageType[] types) {
  DatanodeStorageInfo[] storages = new DatanodeStorageInfo[n];
  for(int i = storages.length; i > 0; ) {
    final String storageID = "s" + i;
    final String ip = i + "." + i + "." + i + "." + i;
    i--;
    final String rack = (racks!=null && i < racks.length)? racks[i]: "defaultRack";
    final String hostname = (hostnames!=null && i < hostnames.length)? hostnames[i]: "host";
    final StorageType type = (types != null && i < types.length) ? types[i]
        : StorageType.DEFAULT;
    storages[i] = createDatanodeStorageInfo(storageID, ip, rack, hostname,
        type);
  }
  return storages;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:DFSTestUtil.java

示例4: getExpectedPrimaryNode

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入依赖的package包/类
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfoContiguous storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoContiguousUnderConstruction);
  BlockInfoContiguousUnderConstruction ucBlock =
    (BlockInfoContiguousUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor()
      .getLastUpdateMonotonic();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor()
        .getLastUpdateMonotonic();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:DFSTestUtil.java

示例5: chooseTarget

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入依赖的package包/类
@Override
public DatanodeStorageInfo[] chooseTarget(String srcPath,
                                  int numOfReplicas,
                                  Node writer,
                                  List<DatanodeStorageInfo> chosenNodes,
                                  boolean returnChosenNodes,
                                  Set<Node> excludedNodes,
                                  long blocksize,
                                  final BlockStoragePolicy storagePolicy) {
  DatanodeStorageInfo[] results = super.chooseTarget(srcPath,
      numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes,
      blocksize, storagePolicy);
  try {
    Thread.sleep(3000);
  } catch (InterruptedException e) {}
  return results;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestDeleteRace.java

示例6: testNnLearnsNewStorages

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入依赖的package包/类
/**
 * Verify that the NameNode can learn about new storages from incremental
 * block reports.
 * This tests the fix for the error condition seen in HDFS-6904.
 *
 * @throws IOException
 * @throws InterruptedException
 */
@Test (timeout=60000)
public void testNnLearnsNewStorages()
    throws IOException, InterruptedException {

  // Generate a report for a fake block on a fake storage.
  final String newStorageUuid = UUID.randomUUID().toString();
  final DatanodeStorage newStorage = new DatanodeStorage(newStorageUuid);
  StorageReceivedDeletedBlocks[] reports = makeReportForReceivedBlock(
      getDummyBlock(), newStorage);

  // Send the report to the NN.
  cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg, poolId, reports);

  // Make sure that the NN has learned of the new storage.
  DatanodeStorageInfo storageInfo = cluster.getNameNode()
                                           .getNamesystem()
                                           .getBlockManager()
                                           .getDatanodeManager()
                                           .getDatanode(dn0.getDatanodeId())
                                           .getStorageInfo(newStorageUuid);
  assertNotNull(storageInfo);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestIncrementalBrVariations.java

示例7: chooseTargetForNewBlock

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入依赖的package包/类
static DatanodeStorageInfo[] chooseTargetForNewBlock(
    BlockManager bm, String src, DatanodeInfo[] excludedNodes, String[]
    favoredNodes, ValidateAddBlockResult r) throws IOException {
  Node clientNode = bm.getDatanodeManager()
      .getDatanodeByHost(r.clientMachine);
  if (clientNode == null) {
    clientNode = getClientNode(bm, r.clientMachine);
  }

  Set<Node> excludedNodesSet = null;
  if (excludedNodes != null) {
    excludedNodesSet = new HashSet<>(excludedNodes.length);
    Collections.addAll(excludedNodesSet, excludedNodes);
  }
  List<String> favoredNodesList = (favoredNodes == null) ? null
      : Arrays.asList(favoredNodes);

  // choose targets for the new block to be allocated.
  return bm.chooseTarget4NewBlock(src, r.numTargets, clientNode,
                                  excludedNodesSet, r.blockSize,
                                  favoredNodesList, r.storagePolicyID,
                                  r.isStriped);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:FSDirWriteFileOp.java

示例8: createDatanodeStorageInfos

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入依赖的package包/类
public static DatanodeStorageInfo[] createDatanodeStorageInfos(
    int n, String[] racks, String[] hostnames, StorageType[] types) {
  DatanodeStorageInfo[] storages = new DatanodeStorageInfo[n];
  for(int i = storages.length; i > 0; ) {
    final String storageID = "s" + i;
    final String ip = i + "." + i + "." + i + "." + i;
    i--;
    final String rack = (racks!=null && i < racks.length)? racks[i]: "defaultRack";
    final String hostname = (hostnames!=null && i < hostnames.length)? hostnames[i]: "host";
    final StorageType type = (types != null && i < types.length) ? types[i]
        : StorageType.DEFAULT;
    storages[i] = createDatanodeStorageInfo(storageID, ip, rack, hostname,
        type, null);
  }
  return storages;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:17,代码来源:DFSTestUtil.java

示例9: getExpectedPrimaryNode

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入依赖的package包/类
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock, !storedBlock.isComplete());
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = storedBlock
      .getUnderConstructionFeature().getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor()
      .getLastUpdateMonotonic();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor()
        .getLastUpdateMonotonic();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:29,代码来源:DFSTestUtil.java

示例10: checkStripedBlockUC

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入依赖的package包/类
private void checkStripedBlockUC(BlockInfoStriped block,
    boolean checkReplica) {
  assertEquals(0, block.numNodes());
  Assert.assertFalse(block.isComplete());
  Assert.assertEquals(StripedFileTestUtil.NUM_DATA_BLOCKS, block.getDataBlockNum());
  Assert.assertEquals(StripedFileTestUtil.NUM_PARITY_BLOCKS,
      block.getParityBlockNum());
  Assert.assertEquals(0,
      block.getBlockId() & HdfsServerConstants.BLOCK_GROUP_INDEX_MASK);

  Assert.assertEquals(HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION,
      block.getBlockUCState());
  if (checkReplica) {
    Assert.assertEquals(GROUP_SIZE,
        block.getUnderConstructionFeature().getNumExpectedLocations());
    DatanodeStorageInfo[] storages = block.getUnderConstructionFeature()
        .getExpectedStorageLocations();
    for (DataNode dn : cluster.getDataNodes()) {
      Assert.assertTrue(includeDataNode(dn.getDatanodeId(), storages));
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:23,代码来源:TestAddStripedBlocks.java

示例11: testNnLearnsNewStorages

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入依赖的package包/类
/**
 * Verify that the NameNode can learn about new storages from incremental
 * block reports.
 * This tests the fix for the error condition seen in HDFS-6904.
 *
 * @throws IOException
 * @throws InterruptedException
 */
@Test (timeout=60000)
public void testNnLearnsNewStorages()
    throws IOException, InterruptedException {

  // Generate a report for a fake block on a fake storage.
  final String newStorageUuid = UUID.randomUUID().toString();
  final DatanodeStorage newStorage = new DatanodeStorage(newStorageUuid);
  StorageReceivedDeletedBlocks[] reports = DFSTestUtil.
      makeReportForReceivedBlock(getDummyBlock(), BlockStatus.RECEIVED_BLOCK,
          newStorage);

  // Send the report to the NN.
  cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg, poolId, reports);
  // IBRs are async, make sure the NN processes all of them.
  cluster.getNamesystem().getBlockManager().flushBlockOps();
  // Make sure that the NN has learned of the new storage.
  DatanodeStorageInfo storageInfo = cluster.getNameNode()
                                           .getNamesystem()
                                           .getBlockManager()
                                           .getDatanodeManager()
                                           .getDatanode(dn0.getDatanodeId())
                                           .getStorageInfo(newStorageUuid);
  assertNotNull(storageInfo);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:33,代码来源:TestIncrementalBrVariations.java

示例12: testProcessErasureCodingTasksSubmitionShouldSucceed

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入依赖的package包/类
@Test
public void testProcessErasureCodingTasksSubmitionShouldSucceed()
    throws Exception {
  DataNode dataNode = cluster.dataNodes.get(0).datanode;

  // Pack invalid(dummy) parameters in ecTasks. Irrespective of parameters, each task
  // thread pool submission should succeed, so that it will not prevent
  // processing other tasks in the list if any exceptions.
  int size = cluster.dataNodes.size();
  byte[] liveIndices = new byte[size];
  DatanodeInfo[] dataDNs = new DatanodeInfo[size + 1];
  DatanodeStorageInfo targetDnInfos_1 = BlockManagerTestUtil
      .newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(),
          new DatanodeStorage("s01"));
  DatanodeStorageInfo[] dnStorageInfo = new DatanodeStorageInfo[] {
      targetDnInfos_1 };

  BlockECRecoveryInfo invalidECInfo = new BlockECRecoveryInfo(
      new ExtendedBlock("bp-id", 123456), dataDNs, dnStorageInfo, liveIndices,
      ErasureCodingPolicyManager.getSystemDefaultPolicy());
  List<BlockECRecoveryInfo> ecTasks = new ArrayList<BlockECRecoveryInfo>();
  ecTasks.add(invalidECInfo);
  dataNode.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:25,代码来源:TestRecoverStripedFile.java

示例13: waitForBlockReport

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入依赖的package包/类
private boolean waitForBlockReport(final DataNode dn,
    final DatanodeDescriptor dnd) throws Exception {
  final DatanodeStorageInfo storage = dnd.getStorageInfos()[0];
  final long lastCount = storage.getBlockReportCount();
  dn.triggerBlockReport(
      new BlockReportOptions.Factory().setIncremental(false).build());
  try {
    GenericTestUtils.waitFor(new Supplier<Boolean>() {
      @Override
      public Boolean get() {
        return lastCount != storage.getBlockReportCount();
      }
    }, 10, 100);
  } catch (TimeoutException te) {
    return false;
  }
  return true;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:19,代码来源:TestDatanodeRegistration.java

示例14: setLastBlock

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入依赖的package包/类
@Override // BlockCollection, the file should be under construction
public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
    DatanodeStorageInfo[] locations) throws IOException {
  Preconditions.checkState(isUnderConstruction(),
      "file is no longer under construction");

  if (numBlocks() == 0) {
    throw new IOException("Failed to set last block: File is empty.");
  }
  BlockInfoUnderConstruction ucBlock =
    lastBlock.convertToBlockUnderConstruction(
        BlockUCState.UNDER_CONSTRUCTION, locations);
  ucBlock.setBlockCollection(this);
  setBlock(numBlocks() - 1, ucBlock);
  return ucBlock;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:17,代码来源:INodeFile.java

示例15: getExpectedPrimaryNode

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入依赖的package包/类
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoUnderConstruction);
  BlockInfoUnderConstruction ucBlock =
    (BlockInfoUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor().getLastUpdate();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor().getLastUpdate();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:29,代码来源:DFSTestUtil.java


注:本文中的org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。