当前位置: 首页>>代码示例>>Java>>正文


Java LocatedBlocks.get方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.LocatedBlocks.get方法的典型用法代码示例。如果您正苦于以下问题:Java LocatedBlocks.get方法的具体用法?Java LocatedBlocks.get怎么用?Java LocatedBlocks.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.protocol.LocatedBlocks的用法示例。


在下文中一共展示了LocatedBlocks.get方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createAFileWithCorruptedBlockReplicas

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
 * Create a file with one block and corrupt some/all of the block replicas.
 */
private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl,
    int corruptBlockCount) throws IOException, AccessControlException,
    FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
  DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
  DFSTestUtil.waitReplication(dfs, filePath, repl);
  // Locate the file blocks by asking name node
  final LocatedBlocks locatedblocks = dfs.dfs.getNamenode()
      .getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
  Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
  // The file only has one block
  LocatedBlock lblock = locatedblocks.get(0);
  DatanodeInfo[] datanodeinfos = lblock.getLocations();
  ExtendedBlock block = lblock.getBlock();
  // corrupt some /all of the block replicas
  for (int i = 0; i < corruptBlockCount; i++) {
    DatanodeInfo dninfo = datanodeinfos[i];
    final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
    corruptBlock(block, dn);
    LOG.debug("Corrupted block " + block.getBlockName() + " on data node "
        + dninfo);

  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestClientReportBadBlock.java

示例2: getBlockLocations

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
 * Get block location info about file
 * 
 * getBlockLocations() returns a list of hostnames that store 
 * data for a specific file region.  It returns a set of hostnames
 * for every block within the indicated region.
 *
 * This function is very useful when writing code that considers
 * data-placement when performing operations.  For example, the
 * MapReduce system tries to schedule tasks on the same machines
 * as the data-block the task processes. 
 */
public BlockLocation[] getBlockLocations(String src, long start, 
      long length) throws IOException, UnresolvedLinkException {
  TraceScope scope = getPathTraceScope("getBlockLocations", src);
  try {
    LocatedBlocks blocks = getLocatedBlocks(src, start, length);
    BlockLocation[] locations =  DFSUtil.locatedBlocks2Locations(blocks);
    HdfsBlockLocation[] hdfsLocations = new HdfsBlockLocation[locations.length];
    for (int i = 0; i < locations.length; i++) {
      hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i));
    }
    return hdfsLocations;
  } finally {
    scope.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:DFSClient.java

示例3: makeBadBlockList

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) {
  LocatedBlock goodLocatedBlock = goodBlockList.get(0);
  LocatedBlock badLocatedBlock = new LocatedBlock(
    goodLocatedBlock.getBlock(),
    new DatanodeInfo[] {
      DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234)
    },
    goodLocatedBlock.getStartOffset(),
    false);


  List<LocatedBlock> badBlocks = new ArrayList<LocatedBlock>();
  badBlocks.add(badLocatedBlock);
  return new LocatedBlocks(goodBlockList.getFileLength(), false,
                           badBlocks, null, true,
                           null);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestDFSClientRetries.java

示例4: createAFileWithCorruptedBlockReplicas

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
 * Create a file with one block and corrupt some/all of the block replicas.
 */
private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl,
    int corruptBlockCount) throws IOException, AccessControlException,
    FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
  DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
  DFSTestUtil.waitReplication(dfs, filePath, repl);
  // Locate the file blocks by asking name node
  final LocatedBlocks locatedblocks = dfs.dfs.getNamenode()
      .getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
  Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
  // The file only has one block
  LocatedBlock lblock = locatedblocks.get(0);
  DatanodeInfo[] datanodeinfos = lblock.getLocations();
  ExtendedBlock block = lblock.getBlock();
  // corrupt some /all of the block replicas
  for (int i = 0; i < corruptBlockCount; i++) {
    DatanodeInfo dninfo = datanodeinfos[i];
    final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
    cluster.corruptReplica(dn, block);
    LOG.debug("Corrupted block " + block.getBlockName() + " on data node "
        + dninfo);

  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:27,代码来源:TestClientReportBadBlock.java

示例5: makeBadBlockList

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) {
  LocatedBlock goodLocatedBlock = goodBlockList.get(0);
  LocatedBlock badLocatedBlock = new LocatedBlock(
    goodLocatedBlock.getBlock(),
    new DatanodeInfo[] {
      DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234)
    });
  badLocatedBlock.setStartOffset(goodLocatedBlock.getStartOffset());


  List<LocatedBlock> badBlocks = new ArrayList<LocatedBlock>();
  badBlocks.add(badLocatedBlock);
  return new LocatedBlocks(goodBlockList.getFileLength(), false,
                           badBlocks, null, true,
                           null, null);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:17,代码来源:TestDFSClientRetries.java

示例6: verifyFirstBlockCorrupted

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
 * Verify the first block of the file is corrupted (for all its replica).
 */
private void verifyFirstBlockCorrupted(Path filePath, boolean isCorrupted)
    throws AccessControlException, FileNotFoundException,
    UnresolvedLinkException, IOException {
  final LocatedBlocks locatedBlocks = dfs.dfs.getNamenode()
      .getBlockLocations(filePath.toUri().getPath(), 0, Long.MAX_VALUE);
  final LocatedBlock firstLocatedBlock = locatedBlocks.get(0);
  Assert.assertEquals(isCorrupted, firstLocatedBlock.isCorrupt());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TestClientReportBadBlock.java

示例7: verifyCorruptedBlockCount

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
 * Verify the number of corrupted block replicas by fetching the block
 * location from name node.
 */
private void verifyCorruptedBlockCount(Path filePath, int expectedReplicas)
    throws AccessControlException, FileNotFoundException,
    UnresolvedLinkException, IOException {
  final LocatedBlocks lBlocks = dfs.dfs.getNamenode().getBlockLocations(
      filePath.toUri().getPath(), 0, Long.MAX_VALUE);
  // we expect only the first block of the file is used for this test
  LocatedBlock firstLocatedBlock = lBlocks.get(0);
  Assert.assertEquals(expectedReplicas,
      firstLocatedBlock.getLocations().length);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestClientReportBadBlock.java

示例8: waitForBlockReplication

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
private void waitForBlockReplication(String filename, 
                                     ClientProtocol namenode,
                                     int expected, long maxWaitSec) 
                                     throws IOException {
  long start = Time.monotonicNow();
  
  //wait for all the blocks to be replicated;
  LOG.info("Checking for block replication for " + filename);
  
  LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE);
  assertEquals(numBlocks, blocks.locatedBlockCount());
  
  for (int i = 0; i < numBlocks; ++i) {
    LOG.info("Checking for block:" + (i+1));
    while (true) { // Loop to check for block i (usually when 0 is done all will be done
      blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE);
      assertEquals(numBlocks, blocks.locatedBlockCount());
      LocatedBlock block = blocks.get(i);
      int actual = block.getLocations().length;
      if ( actual == expected ) {
        LOG.info("Got enough replicas for " + (i+1) + "th block " + block.getBlock() +
            ", got " + actual + ".");
        break;
      }
      LOG.info("Not enough replicas for " + (i+1) + "th block " + block.getBlock() +
                             " yet. Expecting " + expected + ", got " + 
                             actual + ".");
    
      if (maxWaitSec > 0 && 
          (Time.monotonicNow() - start) > (maxWaitSec * 1000)) {
        throw new IOException("Timedout while waiting for all blocks to " +
                              " be replicated for " + filename);
      }
    
      try {
        Thread.sleep(500);
      } catch (InterruptedException ignored) {}
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:TestInjectionForSimulatedStorage.java

示例9: waitForAllReplicas

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
private void waitForAllReplicas(int expectedReplicaNum, Path file,
    DistributedFileSystem dfs) throws Exception {
  for (int i = 0; i < 5; i++) {
    LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(file.toString(), 0,
        BLOCK_SIZE);
    LocatedBlock lb = lbs.get(0);
    if (lb.getLocations().length >= expectedReplicaNum) {
      return;
    } else {
      Thread.sleep(1000);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestStorageMover.java

示例10: testGetLocatedStripedBlocks

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
@Test
public void testGetLocatedStripedBlocks() throws Exception {
  final Path file = new Path("/file1");
  // create an empty file
  FSDataOutputStream out = null;
  try {
    out = dfs.create(file, (short) 1);
    writeAndFlushStripedOutputStream(
        (DFSStripedOutputStream) out.getWrappedStream(),
        DFS_BYTES_PER_CHECKSUM_DEFAULT);

    FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
    INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
    BlockInfoStriped lastBlk = (BlockInfoStriped) fileNode.getLastBlock();
    DatanodeInfo[] expectedDNs = DatanodeStorageInfo.toDatanodeInfos(
        lastBlk.getUnderConstructionFeature().getExpectedStorageLocations());
    byte[] indices = lastBlk.getUnderConstructionFeature().getBlockIndices();

    LocatedBlocks blks = dfs.getClient().getLocatedBlocks(file.toString(), 0L);
    Assert.assertEquals(1, blks.locatedBlockCount());
    LocatedBlock lblk = blks.get(0);

    Assert.assertTrue(lblk instanceof LocatedStripedBlock);
    DatanodeInfo[] datanodes = lblk.getLocations();
    byte[] blockIndices = ((LocatedStripedBlock) lblk).getBlockIndices();
    Assert.assertEquals(GROUP_SIZE, datanodes.length);
    Assert.assertEquals(GROUP_SIZE, blockIndices.length);
    Assert.assertArrayEquals(indices, blockIndices);
    Assert.assertArrayEquals(expectedDNs, datanodes);
  } finally {
    IOUtils.cleanup(null, out);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:34,代码来源:TestAddStripedBlocks.java

示例11: testRetryAddBlockWhileInChooseTarget

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
 * Retry addBlock() while another thread is in chooseTarget().
 * See HDFS-4452.
 */
@Test
public void testRetryAddBlockWhileInChooseTarget() throws Exception {
  final String src = "/testRetryAddBlockWhileInChooseTarget";

  final FSNamesystem ns = cluster.getNamesystem();
  final NamenodeProtocols nn = cluster.getNameNodeRpc();

  // create file
  nn.create(src, FsPermission.getFileDefault(),
      "clientName",
      new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
      true, (short)3, 1024, null);

  // start first addBlock()
  LOG.info("Starting first addBlock for " + src);
  LocatedBlock[] onRetryBlock = new LocatedBlock[1];
  DatanodeStorageInfo targets[] = ns.getNewBlockTargets(
      src, INodeId.GRANDFATHER_INODE_ID, "clientName",
      null, null, null, onRetryBlock);
  assertNotNull("Targets must be generated", targets);

  // run second addBlock()
  LOG.info("Starting second addBlock for " + src);
  nn.addBlock(src, "clientName", null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  assertTrue("Penultimate block must be complete",
      checkFileProgress(src, false));
  LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
  assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
  LocatedBlock lb2 = lbs.get(0);
  assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length);

  // continue first addBlock()
  LocatedBlock newBlock = ns.storeAllocatedBlock(
      src, INodeId.GRANDFATHER_INODE_ID, "clientName", null, targets);
  assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock());

  // check locations
  lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
  assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
  LocatedBlock lb1 = lbs.get(0);
  assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length);
  assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:TestAddBlockRetry.java

示例12: testReplicationError

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
 * Test that when there is a failure replicating a block the temporary
 * and meta files are cleaned up and subsequent replication succeeds.
 */
@Test
public void testReplicationError() throws Exception {
  // create a file of replication factor of 1
  final Path fileName = new Path("/test.txt");
  final int fileLen = 1;
  DFSTestUtil.createFile(fs, fileName, 1, (short)1, 1L);
  DFSTestUtil.waitReplication(fs, fileName, (short)1);

  // get the block belonged to the created file
  LocatedBlocks blocks = NameNodeAdapter.getBlockLocations(
      cluster.getNameNode(), fileName.toString(), 0, (long)fileLen);
  assertEquals("Should only find 1 block", blocks.locatedBlockCount(), 1);
  LocatedBlock block = blocks.get(0);

  // bring up a second datanode
  cluster.startDataNodes(conf, 1, true, null, null);
  cluster.waitActive();
  final int sndNode = 1;
  DataNode datanode = cluster.getDataNodes().get(sndNode);
  
  // replicate the block to the second datanode
  InetSocketAddress target = datanode.getXferAddress();
  Socket s = new Socket(target.getAddress(), target.getPort());
  // write the header.
  DataOutputStream out = new DataOutputStream(s.getOutputStream());

  DataChecksum checksum = DataChecksum.newDataChecksum(
      DataChecksum.Type.CRC32, 512);
  new Sender(out).writeBlock(block.getBlock(), StorageType.DEFAULT,
      BlockTokenSecretManager.DUMMY_TOKEN, "",
      new DatanodeInfo[0], new StorageType[0], null,
      BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L,
      checksum, CachingStrategy.newDefaultStrategy(), false, false, null);
  out.flush();

  // close the connection before sending the content of the block
  out.close();

  // the temporary block & meta files should be deleted
  String bpid = cluster.getNamesystem().getBlockPoolId();
  File storageDir = cluster.getInstanceStorageDir(sndNode, 0);
  File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
  storageDir = cluster.getInstanceStorageDir(sndNode, 1);
  File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
  while (dir1.listFiles().length != 0 || dir2.listFiles().length != 0) {
    Thread.sleep(100);
  }

  // then increase the file's replication factor
  fs.setReplication(fileName, (short)2);
  // replication should succeed
  DFSTestUtil.waitReplication(fs, fileName, (short)1);

  // clean up the file
  fs.delete(fileName, false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:61,代码来源:TestDiskError.java

示例13: testPread

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
@Test
public void testPread() throws Exception {
  final int numBlocks = 2;
  DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
      NUM_STRIPE_PER_BLOCK, false);
  LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
      filePath.toString(), 0, BLOCK_GROUP_SIZE * numBlocks);
  int fileLen = BLOCK_GROUP_SIZE * numBlocks;

  byte[] expected = new byte[fileLen];
  assertEquals(numBlocks, lbs.getLocatedBlocks().size());
  for (int bgIdx = 0; bgIdx < numBlocks; bgIdx++) {
    LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(bgIdx));
    for (int i = 0; i < DATA_BLK_NUM; i++) {
      Block blk = new Block(bg.getBlock().getBlockId() + i,
          NUM_STRIPE_PER_BLOCK * CELLSIZE,
          bg.getBlock().getGenerationStamp());
      blk.setGenerationStamp(bg.getBlock().getGenerationStamp());
      cluster.injectBlocks(i, Arrays.asList(blk),
          bg.getBlock().getBlockPoolId());
    }

    /** A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks */
    for (int i = 0; i < NUM_STRIPE_PER_BLOCK; i++) {
      for (int j = 0; j < DATA_BLK_NUM; j++) {
        for (int k = 0; k < CELLSIZE; k++) {
          int posInBlk = i * CELLSIZE + k;
          int posInFile = i * CELLSIZE * DATA_BLK_NUM + j * CELLSIZE + k;
          expected[bgIdx*BLOCK_GROUP_SIZE + posInFile] =
              SimulatedFSDataset.simulatedByte(
                  new Block(bg.getBlock().getBlockId() + j), posInBlk);
        }
      }
    }
  }
  DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(),
      filePath.toString(), false, ecPolicy, null);

  int[] startOffsets = {0, 1, CELLSIZE - 102, CELLSIZE, CELLSIZE + 102,
      CELLSIZE*DATA_BLK_NUM, CELLSIZE*DATA_BLK_NUM + 102,
      BLOCK_GROUP_SIZE - 102, BLOCK_GROUP_SIZE, BLOCK_GROUP_SIZE + 102,
      fileLen - 1};
  for (int startOffset : startOffsets) {
    startOffset = Math.max(0, Math.min(startOffset, fileLen - 1));
    int remaining = fileLen - startOffset;
    byte[] buf = new byte[fileLen];
    int ret = in.read(startOffset, buf, 0, fileLen);
    assertEquals(remaining, ret);
    for (int i = 0; i < remaining; i++) {
      Assert.assertEquals("Byte at " + (startOffset + i) + " should be the " +
              "same",
          expected[startOffset + i], buf[i]);
    }
  }
  in.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:57,代码来源:TestDFSStripedInputStream.java

示例14: testProcessOverReplicatedStripedBlock

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
@Test
public void testProcessOverReplicatedStripedBlock() throws Exception {
  // create a file which has exact one block group to the first GROUP_SIZE DNs
  long fileLen = DATA_BLK_NUM * BLOCK_SIZE;
  DFSTestUtil.createStripedFile(cluster, filePath, null, 1,
      NUM_STRIPE_PER_BLOCK, false);
  LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(
      filePath.toString(), 0, fileLen);
  LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
  long gs = bg.getBlock().getGenerationStamp();
  String bpid = bg.getBlock().getBlockPoolId();
  long groupId = bg.getBlock().getBlockId();
  Block blk = new Block(groupId, BLOCK_SIZE, gs);
  for (int i = 0; i < GROUP_SIZE; i++) {
    blk.setBlockId(groupId + i);
    cluster.injectBlocks(i, Arrays.asList(blk), bpid);
  }
  cluster.triggerBlockReports();

  // let a internal block be over replicated with 2 redundant blocks.
  blk.setBlockId(groupId + 2);
  cluster.injectBlocks(numDNs - 3, Arrays.asList(blk), bpid);
  cluster.injectBlocks(numDNs - 2, Arrays.asList(blk), bpid);
  // let a internal block be over replicated with 1 redundant block.
  blk.setBlockId(groupId + 6);
  cluster.injectBlocks(numDNs - 1, Arrays.asList(blk), bpid);

  // update blocksMap
  cluster.triggerBlockReports();
  // add to invalidates
  cluster.triggerHeartbeats();
  // datanode delete block
  cluster.triggerHeartbeats();
  // update blocksMap
  cluster.triggerBlockReports();

  // verify that all internal blocks exists
  lbs = cluster.getNameNodeRpc().getBlockLocations(
      filePath.toString(), 0, fileLen);
  StripedFileTestUtil.verifyLocatedStripedBlocks(lbs, GROUP_SIZE);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:42,代码来源:TestAddOverReplicatedStripedBlocks.java

示例15: testProcessOverReplicatedSBSmallerThanFullBlocks

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
@Test
public void testProcessOverReplicatedSBSmallerThanFullBlocks()
    throws Exception {
  // Create a EC file which doesn't fill full internal blocks.
  int fileLen = CELLSIZE * (DATA_BLK_NUM - 1);
  byte[] content = new byte[fileLen];
  DFSTestUtil.writeFile(fs, filePath, new String(content));
  LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(
      filePath.toString(), 0, fileLen);
  LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
  long gs = bg.getBlock().getGenerationStamp();
  String bpid = bg.getBlock().getBlockPoolId();
  long groupId = bg.getBlock().getBlockId();
  Block blk = new Block(groupId, BLOCK_SIZE, gs);
  cluster.triggerBlockReports();
  List<DatanodeInfo> infos = Arrays.asList(bg.getLocations());

  // let a internal block be over replicated with 2 redundant blocks.
  // Therefor number of internal blocks is over GROUP_SIZE. (5 data blocks +
  // 3 parity blocks  + 2 redundant blocks > GROUP_SIZE)
  blk.setBlockId(groupId + 2);
  List<DataNode> dataNodeList = cluster.getDataNodes();
  for (int i = 0; i < numDNs; i++) {
    if (!infos.contains(dataNodeList.get(i).getDatanodeId())) {
      cluster.injectBlocks(i, Arrays.asList(blk), bpid);
      System.out.println("XXX: inject block into datanode " + i);
    }
  }

  // update blocksMap
  cluster.triggerBlockReports();
  // add to invalidates
  cluster.triggerHeartbeats();
  // datanode delete block
  cluster.triggerHeartbeats();
  // update blocksMap
  cluster.triggerBlockReports();

  // verify that all internal blocks exists
  lbs = cluster.getNameNodeRpc().getBlockLocations(
      filePath.toString(), 0, fileLen);
  StripedFileTestUtil.verifyLocatedStripedBlocks(lbs, GROUP_SIZE - 1);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:44,代码来源:TestAddOverReplicatedStripedBlocks.java


注:本文中的org.apache.hadoop.hdfs.protocol.LocatedBlocks.get方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。