当前位置: 首页>>代码示例>>Java>>正文


Java LocatedBlocks类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.LocatedBlocks的典型用法代码示例。如果您正苦于以下问题:Java LocatedBlocks类的具体用法?Java LocatedBlocks怎么用?Java LocatedBlocks使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


LocatedBlocks类属于org.apache.hadoop.hdfs.protocol包,在下文中一共展示了LocatedBlocks类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createAFileWithCorruptedBlockReplicas

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入依赖的package包/类
/**
 * Create a file with one block and corrupt some/all of the block replicas.
 */
private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl,
    int corruptBlockCount) throws IOException, AccessControlException,
    FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
  DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
  DFSTestUtil.waitReplication(dfs, filePath, repl);
  // Locate the file blocks by asking name node
  final LocatedBlocks locatedblocks = dfs.dfs.getNamenode()
      .getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
  Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
  // The file only has one block
  LocatedBlock lblock = locatedblocks.get(0);
  DatanodeInfo[] datanodeinfos = lblock.getLocations();
  ExtendedBlock block = lblock.getBlock();
  // corrupt some /all of the block replicas
  for (int i = 0; i < corruptBlockCount; i++) {
    DatanodeInfo dninfo = datanodeinfos[i];
    final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
    corruptBlock(block, dn);
    LOG.debug("Corrupted block " + block.getBlockName() + " on data node "
        + dninfo);

  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestClientReportBadBlock.java

示例2: getBlockLocations

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入依赖的package包/类
/**
 * Get block location info about file
 * 
 * getBlockLocations() returns a list of hostnames that store 
 * data for a specific file region.  It returns a set of hostnames
 * for every block within the indicated region.
 *
 * This function is very useful when writing code that considers
 * data-placement when performing operations.  For example, the
 * MapReduce system tries to schedule tasks on the same machines
 * as the data-block the task processes. 
 */
public BlockLocation[] getBlockLocations(String src, long start, 
      long length) throws IOException, UnresolvedLinkException {
  TraceScope scope = getPathTraceScope("getBlockLocations", src);
  try {
    LocatedBlocks blocks = getLocatedBlocks(src, start, length);
    BlockLocation[] locations =  DFSUtil.locatedBlocks2Locations(blocks);
    HdfsBlockLocation[] hdfsLocations = new HdfsBlockLocation[locations.length];
    for (int i = 0; i < locations.length; i++) {
      hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i));
    }
    return hdfsLocations;
  } finally {
    scope.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:DFSClient.java

示例3: convert

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入依赖的package包/类
public static LocatedBlocksProto convert(LocatedBlocks lb) {
  if (lb == null) {
    return null;
  }
  LocatedBlocksProto.Builder builder = 
      LocatedBlocksProto.newBuilder();
  if (lb.getLastLocatedBlock() != null) {
    builder.setLastBlock(PBHelper.convert(lb.getLastLocatedBlock()));
  }
  if (lb.getFileEncryptionInfo() != null) {
    builder.setFileEncryptionInfo(convert(lb.getFileEncryptionInfo()));
  }
  return builder.setFileLength(lb.getFileLength())
      .setUnderConstruction(lb.isUnderConstruction())
      .addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks()))
      .setIsLastBlockComplete(lb.isLastBlockComplete()).build();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:PBHelper.java

示例4: getBlockLocations

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入依赖的package包/类
@Override
public GetBlockLocationsResponseProto getBlockLocations(
    RpcController controller, GetBlockLocationsRequestProto req)
    throws ServiceException {
  try {
    LocatedBlocks b = server.getBlockLocations(req.getSrc(), req.getOffset(),
        req.getLength());
    Builder builder = GetBlockLocationsResponseProto
        .newBuilder();
    if (b != null) {
      builder.setLocations(PBHelper.convert(b)).build();
    }
    return builder.build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:ClientNamenodeProtocolServerSideTranslatorPB.java

示例5: getBlockLocations

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入依赖的package包/类
@Override
public LocatedBlocks getBlockLocations(String src, long offset, long length)
    throws AccessControlException, FileNotFoundException,
    UnresolvedLinkException, IOException {
  GetBlockLocationsRequestProto req = GetBlockLocationsRequestProto
      .newBuilder()
      .setSrc(src)
      .setOffset(offset)
      .setLength(length)
      .build();
  try {
    GetBlockLocationsResponseProto resp = rpcProxy.getBlockLocations(null,
        req);
    return resp.hasLocations() ? 
      PBHelper.convert(resp.getLocations()) : null;
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:ClientNamenodeProtocolTranslatorPB.java

示例6: checkBlockRecovery

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入依赖的package包/类
public static void checkBlockRecovery(Path p, DistributedFileSystem dfs,
    int attempts, long sleepMs) throws IOException {
  boolean success = false;
  for(int i = 0; i < attempts; i++) {
    LocatedBlocks blocks = getLocatedBlocks(p, dfs);
    boolean noLastBlock = blocks.getLastLocatedBlock() == null;
    if(!blocks.isUnderConstruction() &&
        (noLastBlock || blocks.isLastBlockComplete())) {
      success = true;
      break;
    }
    try { Thread.sleep(sleepMs); } catch (InterruptedException ignored) {}
  }
  assertThat("inode should complete in ~" + sleepMs * attempts + " ms.",
      success, is(true));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestFileTruncate.java

示例7: prepare

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入依赖的package包/类
@Override
void prepare() throws Exception {
  final Path filePath = new Path(file);
  DFSTestUtil.createFile(dfs, filePath, BlockSize, DataNodes, 0);
  // append to the file and leave the last block under construction
  out = this.client.append(file, BlockSize, EnumSet.of(CreateFlag.APPEND),
      null, null);
  byte[] appendContent = new byte[100];
  new Random().nextBytes(appendContent);
  out.write(appendContent);
  ((HdfsDataOutputStream) out).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  
  LocatedBlocks blks = dfs.getClient()
      .getLocatedBlocks(file, BlockSize + 1);
  assertEquals(1, blks.getLocatedBlocks().size());
  nodes = blks.get(0).getLocations();
  oldBlock = blks.get(0).getBlock();
  
  LocatedBlock newLbk = client.getNamenode().updateBlockForPipeline(
      oldBlock, client.getClientName());
  newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(),
      oldBlock.getBlockId(), oldBlock.getNumBytes(), 
      newLbk.getBlock().getGenerationStamp());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestRetryCacheWithHA.java

示例8: verifyBlockDeletedFromDir

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入依赖的package包/类
protected final boolean verifyBlockDeletedFromDir(File dir,
    LocatedBlocks locatedBlocks) {

  for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
    File targetDir =
      DatanodeUtil.idToBlockDir(dir, lb.getBlock().getBlockId());

    File blockFile = new File(targetDir, lb.getBlock().getBlockName());
    if (blockFile.exists()) {
      LOG.warn("blockFile: " + blockFile.getAbsolutePath() +
        " exists after deletion.");
      return false;
    }
    File metaFile = new File(targetDir,
      DatanodeUtil.getMetaName(lb.getBlock().getBlockName(),
        lb.getBlock().getGenerationStamp()));
    if (metaFile.exists()) {
      LOG.warn("metaFile: " + metaFile.getAbsolutePath() +
        " exists after deletion.");
      return false;
    }
  }
  return true;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:LazyPersistTestCase.java

示例9: testLazyPersistBlocksAreSaved

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入依赖的package包/类
@Test
public void testLazyPersistBlocksAreSaved()
    throws IOException, InterruptedException {
  startUpCluster(true, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  // Create a test file
  makeTestFile(path, BLOCK_SIZE * 10, true);
  LocatedBlocks locatedBlocks = ensureFileReplicasOnStorageType(path, RAM_DISK);

  // Sleep for a short time to allow the lazy writer thread to do its job
  Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000);
  
  LOG.info("Verifying copy was saved to lazyPersist/");

  // Make sure that there is a saved copy of the replica on persistent
  // storage.
  ensureLazyPersistBlocksAreSaved(locatedBlocks);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestLazyPersistFiles.java

示例10: testDeleteBeforePersist

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入依赖的package包/类
/**
 * Delete lazy-persist file that has not been persisted to disk.
 * Memory is freed up and file is gone.
 * @throws IOException
 */
@Test
public void testDeleteBeforePersist()
  throws Exception {
  startUpCluster(true, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));

  Path path = new Path("/" + METHOD_NAME + ".dat");
  makeTestFile(path, BLOCK_SIZE, true);
  LocatedBlocks locatedBlocks =
    ensureFileReplicasOnStorageType(path, RAM_DISK);

  // Delete before persist
  client.delete(path.toString(), false);
  Assert.assertFalse(fs.exists(path));

  assertThat(verifyDeletedBlocks(locatedBlocks), is(true));

  verifyRamDiskJMXMetric("RamDiskBlocksDeletedBeforeLazyPersisted", 1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestLazyPersistFiles.java

示例11: testDeleteAfterPersist

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入依赖的package包/类
/**
 * Delete lazy-persist file that has been persisted to disk
 * Both memory blocks and disk blocks are deleted.
 * @throws IOException
 * @throws InterruptedException
 */
@Test
public void testDeleteAfterPersist()
  throws Exception {
  startUpCluster(true, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeTestFile(path, BLOCK_SIZE, true);
  LocatedBlocks locatedBlocks = ensureFileReplicasOnStorageType(path, RAM_DISK);

  // Sleep for a short time to allow the lazy writer thread to do its job
  Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000);

  // Delete after persist
  client.delete(path.toString(), false);
  Assert.assertFalse(fs.exists(path));

  assertThat(verifyDeletedBlocks(locatedBlocks), is(true));

  verifyRamDiskJMXMetric("RamDiskBlocksLazyPersisted", 1);
  verifyRamDiskJMXMetric("RamDiskBytesLazyPersisted", BLOCK_SIZE);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestLazyPersistFiles.java

示例12: makeBadBlockList

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入依赖的package包/类
private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) {
  LocatedBlock goodLocatedBlock = goodBlockList.get(0);
  LocatedBlock badLocatedBlock = new LocatedBlock(
    goodLocatedBlock.getBlock(),
    new DatanodeInfo[] {
      DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234)
    },
    goodLocatedBlock.getStartOffset(),
    false);


  List<LocatedBlock> badBlocks = new ArrayList<LocatedBlock>();
  badBlocks.add(badLocatedBlock);
  return new LocatedBlocks(goodBlockList.getFileLength(), false,
                           badBlocks, null, true,
                           null);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestDFSClientRetries.java

示例13: testFromDFS

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入依赖的package包/类
private void testFromDFS(DistributedFileSystem dfs, String src, int repCount, String localhost)
    throws Exception {
  // Multiple times as the order is random
  for (int i = 0; i < 10; i++) {
    LocatedBlocks l;
    // The NN gets the block list asynchronously, so we may need multiple tries to get the list
    final long max = System.currentTimeMillis() + 10000;
    boolean done;
    do {
      Assert.assertTrue("Can't get enouth replica.", System.currentTimeMillis() < max);
      l = getNamenode(dfs.getClient()).getBlockLocations(src, 0, 1);
      Assert.assertNotNull("Can't get block locations for " + src, l);
      Assert.assertNotNull(l.getLocatedBlocks());
      Assert.assertTrue(l.getLocatedBlocks().size() > 0);

      done = true;
      for (int y = 0; y < l.getLocatedBlocks().size() && done; y++) {
        done = (l.get(y).getLocations().length == repCount);
      }
    } while (!done);

    for (int y = 0; y < l.getLocatedBlocks().size() && done; y++) {
      Assert.assertEquals(localhost, l.get(y).getLocations()[repCount - 1].getHostName());
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestBlockReorder.java

示例14: toLocatedBlocks

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入依赖的package包/类
/** Convert a Json map to LocatedBlock. */
static LocatedBlocks toLocatedBlocks(
    final Map<?, ?> json) throws IOException {
  if (json == null) {
    return null;
  }

  final Map<?, ?> m = (Map<?, ?>)json.get(
      LocatedBlocks.class.getSimpleName());
  final long fileLength = ((Number) m.get("fileLength")).longValue();
  final boolean isUnderConstruction = (Boolean)m.get("isUnderConstruction");
  final List<LocatedBlock> locatedBlocks = toLocatedBlockList(
      getList(m, "locatedBlocks"));
  final LocatedBlock lastLocatedBlock = toLocatedBlock(
      (Map<?, ?>) m.get("lastLocatedBlock"));
  final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete");
  return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks,
      lastLocatedBlock, isLastBlockComplete, null, null);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:JsonUtilClient.java

示例15: DFSStripedInputStream

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入依赖的package包/类
DFSStripedInputStream(DFSClient dfsClient, String src,
    boolean verifyChecksum, ErasureCodingPolicy ecPolicy,
    LocatedBlocks locatedBlocks) throws IOException {
  super(dfsClient, src, verifyChecksum, locatedBlocks);

  assert ecPolicy != null;
  this.ecPolicy = ecPolicy;
  this.cellSize = ecPolicy.getCellSize();
  dataBlkNum = (short) ecPolicy.getNumDataUnits();
  parityBlkNum = (short) ecPolicy.getNumParityUnits();
  groupSize = dataBlkNum + parityBlkNum;
  blockReaders = new BlockReaderInfo[groupSize];
  curStripeRange = new StripeRange(0, 0);
  readingService =
      new ExecutorCompletionService<>(dfsClient.getStripedReadsThreadPool());
  decoder = CodecUtil.createRSRawDecoder(dfsClient.getConfiguration(),
      dataBlkNum, parityBlkNum);
  if (DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("Creating an striped input stream for file " + src);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:22,代码来源:DFSStripedInputStream.java


注:本文中的org.apache.hadoop.hdfs.protocol.LocatedBlocks类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。