当前位置: 首页>>代码示例>>Java>>正文


Java LocatedBlocks.getLastLocatedBlock方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.LocatedBlocks.getLastLocatedBlock方法的典型用法代码示例。如果您正苦于以下问题:Java LocatedBlocks.getLastLocatedBlock方法的具体用法?Java LocatedBlocks.getLastLocatedBlock怎么用?Java LocatedBlocks.getLastLocatedBlock使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.protocol.LocatedBlocks的用法示例。


在下文中一共展示了LocatedBlocks.getLastLocatedBlock方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: convert

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
public static LocatedBlocksProto convert(LocatedBlocks lb) {
  if (lb == null) {
    return null;
  }
  LocatedBlocksProto.Builder builder = 
      LocatedBlocksProto.newBuilder();
  if (lb.getLastLocatedBlock() != null) {
    builder.setLastBlock(PBHelper.convert(lb.getLastLocatedBlock()));
  }
  if (lb.getFileEncryptionInfo() != null) {
    builder.setFileEncryptionInfo(convert(lb.getFileEncryptionInfo()));
  }
  return builder.setFileLength(lb.getFileLength())
      .setUnderConstruction(lb.isUnderConstruction())
      .addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks()))
      .setIsLastBlockComplete(lb.isLastBlockComplete()).build();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:PBHelper.java

示例2: checkBlockRecovery

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
public static void checkBlockRecovery(Path p, DistributedFileSystem dfs,
    int attempts, long sleepMs) throws IOException {
  boolean success = false;
  for(int i = 0; i < attempts; i++) {
    LocatedBlocks blocks = getLocatedBlocks(p, dfs);
    boolean noLastBlock = blocks.getLastLocatedBlock() == null;
    if(!blocks.isUnderConstruction() &&
        (noLastBlock || blocks.isLastBlockComplete())) {
      success = true;
      break;
    }
    try { Thread.sleep(sleepMs); } catch (InterruptedException ignored) {}
  }
  assertThat("inode should complete in ~" + sleepMs * attempts + " ms.",
      success, is(true));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestFileTruncate.java

示例3: convert

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
public static LocatedBlocksProto convert(LocatedBlocks lb) {
  if (lb == null) {
    return null;
  }
  LocatedBlocksProto.Builder builder =
      LocatedBlocksProto.newBuilder();
  if (lb.getLastLocatedBlock() != null) {
    builder.setLastBlock(
        convertLocatedBlock(lb.getLastLocatedBlock()));
  }
  if (lb.getFileEncryptionInfo() != null) {
    builder.setFileEncryptionInfo(convert(lb.getFileEncryptionInfo()));
  }
  if (lb.getErasureCodingPolicy() != null) {
    builder.setEcPolicy(convertErasureCodingPolicy(
        lb.getErasureCodingPolicy()));
  }
  return builder.setFileLength(lb.getFileLength())
      .setUnderConstruction(lb.isUnderConstruction())
      .addAllBlocks(convertLocatedBlocks2(lb.getLocatedBlocks()))
      .setIsLastBlockComplete(lb.isLastBlockComplete()).build();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:23,代码来源:PBHelperClient.java

示例4: testBlockTokenInLastLocatedBlock

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
 * This test writes a file and gets the block locations without closing the
 * file, and tests the block token in the last block. Block token is verified
 * by ensuring it is of correct kind.
 * 
 * @throws IOException
 * @throws InterruptedException
 */
@Test
public void testBlockTokenInLastLocatedBlock() throws IOException,
    InterruptedException {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(1).build();
  cluster.waitActive();

  try {
    FileSystem fs = cluster.getFileSystem();
    String fileName = "/testBlockTokenInLastLocatedBlock";
    Path filePath = new Path(fileName);
    FSDataOutputStream out = fs.create(filePath, (short) 1);
    out.write(new byte[1000]);
    // ensure that the first block is written out (see FSOutputSummer#flush)
    out.flush();
    LocatedBlocks locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(
        fileName, 0, 1000);
    while (locatedBlocks.getLastLocatedBlock() == null) {
      Thread.sleep(100);
      locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(fileName, 0,
          1000);
    }
    Token<BlockTokenIdentifier> token = locatedBlocks.getLastLocatedBlock()
        .getBlockToken();
    Assert.assertEquals(BlockTokenIdentifier.KIND_NAME, token.getKind());
    out.close();
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestBlockToken.java

示例5: testAbandonBlock

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
@Test
/** Abandon a block while creating a file */
public void testAbandonBlock() throws IOException {
  String src = FILE_NAME_PREFIX + "foo";

  // Start writing a file but do not close it
  FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short)1, 512L);
  for (int i = 0; i < 1024; i++) {
    fout.write(123);
  }
  fout.hflush();
  long fileId = ((DFSOutputStream)fout.getWrappedStream()).getFileId();

  // Now abandon the last block
  DFSClient dfsclient = DFSClientAdapter.getDFSClient(fs);
  LocatedBlocks blocks =
    dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
  int orginalNumBlocks = blocks.locatedBlockCount();
  LocatedBlock b = blocks.getLastLocatedBlock();
  dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
      dfsclient.clientName);
  
  // call abandonBlock again to make sure the operation is idempotent
  dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
      dfsclient.clientName);

  // And close the file
  fout.close();

  // Close cluster and check the block has been abandoned after restart
  cluster.restartNameNode();
  blocks = dfsclient.getNamenode().getBlockLocations(src, 0,
      Integer.MAX_VALUE);
  Assert.assertEquals("Blocks " + b + " has not been abandoned.",
      orginalNumBlocks, blocks.locatedBlockCount() + 1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestAbandonBlock.java

示例6: waitForRecoveryFinished

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
private LocatedBlocks waitForRecoveryFinished(Path file, int groupSize) 
    throws Exception {
  final int ATTEMPTS = 60;
  for (int i = 0; i < ATTEMPTS; i++) {
    LocatedBlocks locatedBlocks = getLocatedBlocks(file);
    LocatedStripedBlock lastBlock = 
        (LocatedStripedBlock)locatedBlocks.getLastLocatedBlock();
    DatanodeInfo[] storageInfos = lastBlock.getLocations();
    if (storageInfos.length >= groupSize) {
      return locatedBlocks;
    }
    Thread.sleep(1000);
  }
  throw new IOException ("Time out waiting for EC block recovery.");
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:16,代码来源:TestRecoverStripedFile.java

示例7: corruptBlocks

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
private void corruptBlocks(Path srcPath, int dataBlkDelNum,
    int parityBlkDelNum, boolean deleteBlockFile) throws IOException {
  int recoverBlkNum = dataBlkDelNum + parityBlkDelNum;

  LocatedBlocks locatedBlocks = getLocatedBlocks(srcPath);
  LocatedStripedBlock lastBlock =
      (LocatedStripedBlock)locatedBlocks.getLastLocatedBlock();

  int[] delDataBlkIndices = StripedFileTestUtil.randomArray(0, dataBlocks,
      dataBlkDelNum);
  Assert.assertNotNull(delDataBlkIndices);
  int[] delParityBlkIndices = StripedFileTestUtil.randomArray(dataBlocks,
      dataBlocks + parityBlocks, parityBlkDelNum);
  Assert.assertNotNull(delParityBlkIndices);

  int[] delBlkIndices = new int[recoverBlkNum];
  System.arraycopy(delDataBlkIndices, 0,
      delBlkIndices, 0, delDataBlkIndices.length);
  System.arraycopy(delParityBlkIndices, 0,
      delBlkIndices, delDataBlkIndices.length, delParityBlkIndices.length);

  ExtendedBlock[] delBlocks = new ExtendedBlock[recoverBlkNum];
  for (int i = 0; i < recoverBlkNum; i++) {
    delBlocks[i] = StripedBlockUtil
        .constructInternalBlock(lastBlock.getBlock(),
            cellSize, dataBlocks, delBlkIndices[i]);
    if (deleteBlockFile) {
      // delete the block file
      cluster.corruptBlockOnDataNodesByDeletingBlockFile(delBlocks[i]);
    } else {
      // corrupt the block file
      cluster.corruptBlockOnDataNodes(delBlocks[i]);
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:36,代码来源:TestReadStripedFileWithDecoding.java


注:本文中的org.apache.hadoop.hdfs.protocol.LocatedBlocks.getLastLocatedBlock方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。