当前位置: 首页>>代码示例>>Java>>正文


Java LocatedBlocks.getLocatedBlocks方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.LocatedBlocks.getLocatedBlocks方法的典型用法代码示例。如果您正苦于以下问题:Java LocatedBlocks.getLocatedBlocks方法的具体用法?Java LocatedBlocks.getLocatedBlocks怎么用?Java LocatedBlocks.getLocatedBlocks使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.protocol.LocatedBlocks的用法示例。


在下文中一共展示了LocatedBlocks.getLocatedBlocks方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: verifyBlockDeletedFromDir

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
protected final boolean verifyBlockDeletedFromDir(File dir,
    LocatedBlocks locatedBlocks) {

  for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
    File targetDir =
      DatanodeUtil.idToBlockDir(dir, lb.getBlock().getBlockId());

    File blockFile = new File(targetDir, lb.getBlock().getBlockName());
    if (blockFile.exists()) {
      LOG.warn("blockFile: " + blockFile.getAbsolutePath() +
        " exists after deletion.");
      return false;
    }
    File metaFile = new File(targetDir,
      DatanodeUtil.getMetaName(lb.getBlock().getBlockName(),
        lb.getBlock().getGenerationStamp()));
    if (metaFile.exists()) {
      LOG.warn("metaFile: " + metaFile.getAbsolutePath() +
        " exists after deletion.");
      return false;
    }
  }
  return true;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:LazyPersistTestCase.java

示例2: testRefreshBlock

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
 * Test {@link DFSStripedInputStream#getBlockAt(long)}
 */
@Test
public void testRefreshBlock() throws Exception {
  final int numBlocks = 4;
  DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
      NUM_STRIPE_PER_BLOCK, false);
  LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
      filePath.toString(), 0, BLOCK_GROUP_SIZE * numBlocks);
  final DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(),
      filePath.toString(), false, ecPolicy, null);

  List<LocatedBlock> lbList = lbs.getLocatedBlocks();
  for (LocatedBlock aLbList : lbList) {
    LocatedStripedBlock lsb = (LocatedStripedBlock) aLbList;
    LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(lsb,
        CELLSIZE, DATA_BLK_NUM, PARITY_BLK_NUM);
    for (int j = 0; j < DATA_BLK_NUM; j++) {
      LocatedBlock refreshed = in.refreshLocatedBlock(blks[j]);
      assertEquals(blks[j].getBlock(), refreshed.getBlock());
      assertEquals(blks[j].getStartOffset(), refreshed.getStartOffset());
      assertArrayEquals(blks[j].getLocations(), refreshed.getLocations());
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:27,代码来源:TestDFSStripedInputStream.java

示例3: checkEquals

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
private static void checkEquals(LocatedBlocks l1, LocatedBlocks l2) {
  List<LocatedBlock> list1 = l1.getLocatedBlocks();
  List<LocatedBlock> list2 = l2.getLocatedBlocks();
  assertEquals(list1.size(), list2.size());
  
  for (int i = 0; i < list1.size(); i++) {
    LocatedBlock b1 = list1.get(i);
    LocatedBlock b2 = list2.get(i);
    assertEquals(b1.getBlock(), b2.getBlock());
    assertEquals(b1.getBlockSize(), b2.getBlockSize());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestINodeFile.java

示例4: testGetBlockLocations

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
 * Test NameNode.getBlockLocations(..) on reading un-closed files.
 */
@Test
public void testGetBlockLocations() throws IOException {
  final NamenodeProtocols namenode = cluster.getNameNodeRpc();
  final Path p = new Path(BASE_DIR, "file2.dat");
  final String src = p.toString();
  final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);

  // write a half block
  int len = BLOCK_SIZE >>> 1;
  writeFile(p, out, len);

  for(int i = 1; i < NUM_BLOCKS; ) {
    // verify consistency
    final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
    final List<LocatedBlock> blocks = lb.getLocatedBlocks();
    assertEquals(i, blocks.size());
    final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
    assertTrue(b instanceof BlockInfoContiguousUnderConstruction);

    if (++i < NUM_BLOCKS) {
      // write one more block
      writeFile(p, out, BLOCK_SIZE);
      len += BLOCK_SIZE;
    }
  }
  // close file
  out.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestBlockUnderConstruction.java

示例5: ensureFileReplicasOnStorageType

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
protected final LocatedBlocks ensureFileReplicasOnStorageType(
    Path path, StorageType storageType) throws IOException {
  // Ensure that returned block locations returned are correct!
  LOG.info("Ensure path: " + path + " is on StorageType: " + storageType);
  assertThat(fs.exists(path), is(true));
  long fileLength = client.getFileInfo(path.toString()).getLen();
  LocatedBlocks locatedBlocks =
      client.getLocatedBlocks(path.toString(), 0, fileLength);
  for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
    assertThat(locatedBlock.getStorageTypes()[0], is(storageType));
  }
  return locatedBlocks;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:LazyPersistTestCase.java

示例6: ensureLazyPersistBlocksAreSaved

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
 * Make sure at least one non-transient volume has a saved copy of the replica.
 * An infinite loop is used to ensure the async lazy persist tasks are completely
 * done before verification. Caller of ensureLazyPersistBlocksAreSaved expects
 * either a successful pass or timeout failure.
 */
protected final void ensureLazyPersistBlocksAreSaved(
    LocatedBlocks locatedBlocks) throws IOException, InterruptedException {
  final String bpid = cluster.getNamesystem().getBlockPoolId();
  List<? extends FsVolumeSpi> volumes =
    cluster.getDataNodes().get(0).getFSDataset().getVolumes();
  final Set<Long> persistedBlockIds = new HashSet<Long>();

  while (persistedBlockIds.size() < locatedBlocks.getLocatedBlocks().size()) {
    // Take 1 second sleep before each verification iteration
    Thread.sleep(1000);

    for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
      for (FsVolumeSpi v : volumes) {
        if (v.isTransientStorage()) {
          continue;
        }

        FsVolumeImpl volume = (FsVolumeImpl) v;
        File lazyPersistDir = volume.getBlockPoolSlice(bpid).getLazypersistDir();

        long blockId = lb.getBlock().getBlockId();
        File targetDir =
          DatanodeUtil.idToBlockDir(lazyPersistDir, blockId);
        File blockFile = new File(targetDir, lb.getBlock().getBlockName());
        if (blockFile.exists()) {
          // Found a persisted copy for this block and added to the Set
          persistedBlockIds.add(blockId);
        }
      }
    }
  }

  // We should have found a persisted copy for each located block.
  assertThat(persistedBlockIds.size(), is(locatedBlocks.getLocatedBlocks().size()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:LazyPersistTestCase.java

示例7: getLastLocatedBlock

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
public static LocatedBlock getLastLocatedBlock(
    ClientProtocol namenode, String src) throws IOException {
  //get block info for the last block
  LocatedBlocks locations = namenode.getBlockLocations(src, 0, Long.MAX_VALUE);
  List<LocatedBlock> blocks = locations.getLocatedBlocks();
  DataNode.LOG.info("blocks.size()=" + blocks.size());
  assertTrue(blocks.size() > 0);

  return blocks.get(blocks.size() - 1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestInterDatanodeProtocol.java

示例8: testFallbackToDiskPartial

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
 * File partially fit in RamDisk after eviction.
 * RamDisk can fit 2 blocks. Write a file with 5 blocks.
 * Expect 2 or less blocks are on RamDisk and 3 or more on disk.
 * @throws IOException
 */
@Test
public void testFallbackToDiskPartial()
  throws IOException, InterruptedException {
  startUpCluster(true, 2);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeTestFile(path, BLOCK_SIZE * 5, true);

  // Sleep for a short time to allow the lazy writer thread to do its job
  Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000);

  triggerBlockReport();

  int numBlocksOnRamDisk = 0;
  int numBlocksOnDisk = 0;

  long fileLength = client.getFileInfo(path.toString()).getLen();
  LocatedBlocks locatedBlocks =
    client.getLocatedBlocks(path.toString(), 0, fileLength);
  for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
    if (locatedBlock.getStorageTypes()[0] == RAM_DISK) {
      numBlocksOnRamDisk++;
    } else if (locatedBlock.getStorageTypes()[0] == DEFAULT) {
      numBlocksOnDisk++;
    }
  }

  // Since eviction is asynchronous, depending on the timing of eviction
  // wrt writes, we may get 2 or less blocks on RAM disk.
  assert(numBlocksOnRamDisk <= 2);
  assert(numBlocksOnDisk >= 3);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestLazyPersistFiles.java

示例9: assertBlocks

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
private void assertBlocks(BlockManager bm, LocatedBlocks lbs, 
    boolean exist) {
  for (LocatedBlock locatedBlock : lbs.getLocatedBlocks()) {
    if (exist) {
      assertTrue(bm.getStoredBlock(locatedBlock.getBlock().
          getLocalBlock()) != null);
    } else {
      assertTrue(bm.getStoredBlock(locatedBlock.getBlock().
          getLocalBlock()) == null);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestFileCreation.java

示例10: reorderBlocks

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src)
    throws IOException {

  ServerName sn = DefaultWALProvider.getServerNameFromWALDirectoryName(conf, src);
  if (sn == null) {
    // It's not an WAL
    return;
  }

  // Ok, so it's an WAL
  String hostName = sn.getHostname();
  if (LOG.isTraceEnabled()) {
    LOG.trace(src +
        " is an WAL file, so reordering blocks, last hostname will be:" + hostName);
  }

  // Just check for all blocks
  for (LocatedBlock lb : lbs.getLocatedBlocks()) {
    DatanodeInfo[] dnis = lb.getLocations();
    if (dnis != null && dnis.length > 1) {
      boolean found = false;
      for (int i = 0; i < dnis.length - 1 && !found; i++) {
        if (hostName.equals(dnis[i].getHostName())) {
          // advance the other locations by one and put this one at the last place.
          DatanodeInfo toLast = dnis[i];
          System.arraycopy(dnis, i + 1, dnis, i, dnis.length - i - 1);
          dnis[dnis.length - 1] = toLast;
          found = true;
        }
      }
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:HFileSystem.java

示例11: testGetBlockLocations

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
 * Test NameNode.getBlockLocations(..) on reading un-closed files.
 */
@Test
public void testGetBlockLocations() throws IOException {
  final NamenodeProtocols namenode = cluster.getNameNodeRpc();
  final BlockManager blockManager = cluster.getNamesystem().getBlockManager();
  final Path p = new Path(BASE_DIR, "file2.dat");
  final String src = p.toString();
  final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);

  // write a half block
  int len = BLOCK_SIZE >>> 1;
  writeFile(p, out, len);

  for(int i = 1; i < NUM_BLOCKS; ) {
    // verify consistency
    final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
    final List<LocatedBlock> blocks = lb.getLocatedBlocks();
    assertEquals(i, blocks.size());
    final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
    assertFalse(blockManager.getStoredBlock(b).isComplete());

    if (++i < NUM_BLOCKS) {
      // write one more block
      writeFile(p, out, BLOCK_SIZE);
      len += BLOCK_SIZE;
    }
  }
  // close file
  out.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:33,代码来源:TestBlockUnderConstruction.java

示例12: ensureLazyPersistBlocksAreSaved

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
 * Make sure at least one non-transient volume has a saved copy of the replica.
 * An infinite loop is used to ensure the async lazy persist tasks are completely
 * done before verification. Caller of ensureLazyPersistBlocksAreSaved expects
 * either a successful pass or timeout failure.
 */
protected final void ensureLazyPersistBlocksAreSaved(
    LocatedBlocks locatedBlocks) throws IOException, InterruptedException {
  final String bpid = cluster.getNamesystem().getBlockPoolId();

  final Set<Long> persistedBlockIds = new HashSet<Long>();

  try (FsDatasetSpi.FsVolumeReferences volumes =
      cluster.getDataNodes().get(0).getFSDataset().getFsVolumeReferences()) {
    while (persistedBlockIds.size() < locatedBlocks.getLocatedBlocks()
        .size()) {
      // Take 1 second sleep before each verification iteration
      Thread.sleep(1000);

      for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
        for (FsVolumeSpi v : volumes) {
          if (v.isTransientStorage()) {
            continue;
          }

          FsVolumeImpl volume = (FsVolumeImpl) v;
          File lazyPersistDir =
              volume.getBlockPoolSlice(bpid).getLazypersistDir();

          long blockId = lb.getBlock().getBlockId();
          File targetDir =
              DatanodeUtil.idToBlockDir(lazyPersistDir, blockId);
          File blockFile = new File(targetDir, lb.getBlock().getBlockName());
          if (blockFile.exists()) {
            // Found a persisted copy for this block and added to the Set
            persistedBlockIds.add(blockId);
          }
        }
      }
    }
  }

  // We should have found a persisted copy for each located block.
  assertThat(persistedBlockIds.size(), is(locatedBlocks.getLocatedBlocks().size()));
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:46,代码来源:LazyPersistTestCase.java

示例13: testFallbackToDiskPartial

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
 * File partially fit in RamDisk after eviction.
 * RamDisk can fit 2 blocks. Write a file with 5 blocks.
 * Expect 2 blocks are on RamDisk and rest on disk.
 * @throws IOException
 */
@Test
public void testFallbackToDiskPartial()
    throws IOException, InterruptedException {
  getClusterBuilder().setMaxLockedMemory(2 * BLOCK_SIZE).build();
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeTestFile(path, BLOCK_SIZE * 5, true);

  // Sleep for a short time to allow the lazy writer thread to do its job
  Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000);

  triggerBlockReport();

  int numBlocksOnRamDisk = 0;
  int numBlocksOnDisk = 0;

  long fileLength = client.getFileInfo(path.toString()).getLen();
  LocatedBlocks locatedBlocks =
      client.getLocatedBlocks(path.toString(), 0, fileLength);
  for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
    if (locatedBlock.getStorageTypes()[0] == RAM_DISK) {
      numBlocksOnRamDisk++;
    } else if (locatedBlock.getStorageTypes()[0] == DEFAULT) {
      numBlocksOnDisk++;
    }
  }

  // Since eviction is asynchronous, depending on the timing of eviction
  // wrt writes, we may get 2 or less blocks on RAM disk.
  assertThat(numBlocksOnRamDisk, is(2));
  assertThat(numBlocksOnDisk, is(3));
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:40,代码来源:TestLazyPersistReplicaPlacement.java

示例14: waitBlockGroupsReported

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
 * Wait for all the internalBlocks of the blockGroups of the given file to be
 * reported.
 */
public static void waitBlockGroupsReported(DistributedFileSystem fs,
    String src, int numDeadDNs) throws Exception {
  boolean success;
  final int ATTEMPTS = 40;
  int count = 0;

  do {
    success = true;
    count++;
    LocatedBlocks lbs = fs.getClient().getLocatedBlocks(src, 0);
    for (LocatedBlock lb : lbs.getLocatedBlocks()) {
      short expected = (short) (getRealTotalBlockNum((int) lb.getBlockSize())
          - numDeadDNs);
      int reported = lb.getLocations().length;
      if (reported < expected){
        success = false;
        LOG.info("blockGroup " + lb.getBlock() + " of file " + src
            + " has reported internalBlocks " + reported
            + " (desired " + expected + "); locations "
            + Joiner.on(' ').join(lb.getLocations()));
        Thread.sleep(1000);
        break;
      }
    }
    if (success) {
      LOG.info("All blockGroups of file " + src
          + " verified to have all internalBlocks.");
    }
  } while (!success && count < ATTEMPTS);

  if (count == ATTEMPTS) {
    throw new TimeoutException("Timed out waiting for " + src +
        " to have all the internalBlocks");
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:40,代码来源:StripedFileTestUtil.java

示例15: getBlockLocationsInt

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
private GetBlockLocationsResult getBlockLocationsInt(
    FSPermissionChecker pc, final String srcArg, long offset, long length,
    boolean needBlockToken)
    throws IOException {
  String src = srcArg;
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  src = dir.resolvePath(pc, srcArg, pathComponents);
  final INodesInPath iip = dir.getINodesInPath(src, true);
  final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
  if (isPermissionEnabled) {
    dir.checkPathAccess(pc, iip, FsAction.READ);
    checkUnreadableBySuperuser(pc, inode, iip.getPathSnapshotId());
  }

  final long fileSize = iip.isSnapshot()
      ? inode.computeFileSize(iip.getPathSnapshotId())
      : inode.computeFileSizeNotIncludingLastUcBlock();
  boolean isUc = inode.isUnderConstruction();
  if (iip.isSnapshot()) {
    // if src indicates a snapshot file, we need to make sure the returned
    // blocks do not exceed the size of the snapshot file.
    length = Math.min(length, fileSize - offset);
    isUc = false;
  }

  final FileEncryptionInfo feInfo =
      FSDirectory.isReservedRawName(srcArg) ? null
          : dir.getFileEncryptionInfo(inode, iip.getPathSnapshotId(), iip);

  final LocatedBlocks blocks = blockManager.createLocatedBlocks(
      inode.getBlocks(iip.getPathSnapshotId()), fileSize,
      isUc, offset, length, needBlockToken, iip.isSnapshot(), feInfo);

  // Set caching information for the located blocks.
  for (LocatedBlock lb : blocks.getLocatedBlocks()) {
    cacheManager.setCachedLocations(lb);
  }

  final long now = now();
  boolean updateAccessTime = isAccessTimeSupported() && !isInSafeMode()
      && !iip.isSnapshot()
      && now > inode.getAccessTime() + getAccessTimePrecision();
  return new GetBlockLocationsResult(updateAccessTime, blocks);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:45,代码来源:FSNamesystem.java


注:本文中的org.apache.hadoop.hdfs.protocol.LocatedBlocks.getLocatedBlocks方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。