当前位置: 首页>>代码示例>>Java>>正文


Java INodeFile.getBlocks方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.INodeFile.getBlocks方法的典型用法代码示例。如果您正苦于以下问题:Java INodeFile.getBlocks方法的具体用法?Java INodeFile.getBlocks怎么用?Java INodeFile.getBlocks使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.INodeFile的用法示例。


在下文中一共展示了INodeFile.getBlocks方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkRaidProgress

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
/**
 * Count the number of live replicas of each parity block in the raided file
 * If any stripe has not enough parity block replicas, add the stripe to 
 *  raidEncodingTasks to schedule encoding.
 * If forceAdd is true, we always add the stripe to raidEncodingTasks 
 * without checking
 * @param sourceINode
 * @param raidTasks
 * @param fs
 * @param forceAdd
 * @return true if all parity blocks of the file have enough replicas
 * @throws IOException
 */
public boolean checkRaidProgress(INodeFile sourceINode, 
    LightWeightLinkedSet<RaidBlockInfo> raidEncodingTasks, FSNamesystem fs,
    boolean forceAdd) throws IOException {
  boolean result = true;
  BlockInfo[] blocks = sourceINode.getBlocks();
  for (int i = 0; i < blocks.length;
      i += numStripeBlocks) {
    boolean hasParity = true;
    if (!forceAdd) {
      for (int j = 0; j < numParityBlocks; j++) {
        if (fs.countLiveNodes(blocks[i + j]) < this.parityReplication) {
          hasParity = false;
          break;
        }
      }
    }
    if (!hasParity || forceAdd) {
      raidEncodingTasks.add(new RaidBlockInfo(blocks[i], parityReplication, i));
      result = false; 
    }
  }
  return result;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:37,代码来源:RaidCodec.java

示例2: checkNamenodeBeforeReturn

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  INodeFile fileNode = cluster.getNamesystem(0).getFSDirectory()
      .getINode4Write(file).asFile();
  BlockInfoContiguousUnderConstruction blkUC =
      (BlockInfoContiguousUnderConstruction) (fileNode.getBlocks())[1];
  int datanodeNum = blkUC.getExpectedStorageLocations().length;
  for (int i = 0; i < CHECKTIMES && datanodeNum != 2; i++) {
    Thread.sleep(1000);
    datanodeNum = blkUC.getExpectedStorageLocations().length;
  }
  return datanodeNum == 2;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestRetryCacheWithHA.java

示例3: assertBlockCollection

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
static INodeFile assertBlockCollection(String path, int numBlocks,
   final FSDirectory dir, final BlockManager blkManager) throws Exception {
  final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
  assertEquals(numBlocks, file.getBlocks().length);
  for(BlockInfoContiguous b : file.getBlocks()) {
    assertBlockCollection(blkManager, file, b);
  }
  return file;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestSnapshotBlocksMap.java

示例4: testDeletionWithZeroSizeBlock

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
/**
 * Make sure we delete 0-sized block when deleting an INodeFileUCWithSnapshot
 */
@Test
public void testDeletionWithZeroSizeBlock() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  barNode = fsdir.getINode4Write(bar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  hdfs.delete(bar, true);
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1",
      bar.getName());
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:TestSnapshotBlocksMap.java

示例5: testDeletionWithZeroSizeBlock2

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
/**
 * Make sure we delete 0-sized block when deleting an under-construction file
 */
@Test
public void testDeletionWithZeroSizeBlock2() throws Exception {
  final Path foo = new Path("/foo");
  final Path subDir = new Path(foo, "sub");
  final Path bar = new Path(subDir, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  barNode = fsdir.getINode4Write(bar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  hdfs.delete(subDir, true);
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestSnapshotBlocksMap.java

示例6: testDeletionWithZeroSizeBlock3

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
/**
 * 1. rename under-construction file with 0-sized blocks after snapshot.
 * 2. delete the renamed directory.
 * make sure we delete the 0-sized block.
 * see HDFS-5476.
 */
@Test
public void testDeletionWithZeroSizeBlock3() throws Exception {
  final Path foo = new Path("/foo");
  final Path subDir = new Path(foo, "sub");
  final Path bar = new Path(subDir, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  // rename bar
  final Path bar2 = new Path(subDir, "bar2");
  hdfs.rename(bar, bar2);
  
  INodeFile bar2Node = fsdir.getINode4Write(bar2.toString()).asFile();
  blks = bar2Node.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  // delete subDir
  hdfs.delete(subDir, true);
  
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:45,代码来源:TestSnapshotBlocksMap.java

示例7: testDeletionOfLaterBlocksWithZeroSizeFirstBlock

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
/**
 * Make sure that a delete of a non-zero-length file which results in a
 * zero-length file in a snapshot works.
 */
@Test
public void testDeletionOfLaterBlocksWithZeroSizeFirstBlock() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  final byte[] testData = "foo bar baz".getBytes();
  
  // Create a zero-length file.
  DFSTestUtil.createFile(hdfs, bar, 0, REPLICATION, 0L);
  assertEquals(0, fsdir.getINode4Write(bar.toString()).asFile().getBlocks().length);

  // Create a snapshot that includes that file.
  SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
  
  // Extend that file.
  FSDataOutputStream out = hdfs.append(bar);
  out.write(testData);
  out.close();
  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(testData.length, blks[0].getNumBytes());
  
  // Delete the file.
  hdfs.delete(bar, true);
  
  // Now make sure that the NN can still save an fsimage successfully.
  cluster.getNameNode().getRpcServer().setSafeMode(
      SafeModeAction.SAFEMODE_ENTER, false);
  cluster.getNameNode().getRpcServer().saveNamespace();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestSnapshotBlocksMap.java

示例8: checkNamenodeBeforeReturn

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  INodeFile fileNode = cluster.getNamesystem(0).getFSDirectory()
      .getINode4Write(file).asFile();
  BlockInfo blkUC = (fileNode.getBlocks())[1];
  int datanodeNum = blkUC.getUnderConstructionFeature()
      .getExpectedStorageLocations().length;
  for (int i = 0; i < CHECKTIMES && datanodeNum != 2; i++) {
    Thread.sleep(1000);
    datanodeNum = blkUC.getUnderConstructionFeature()
        .getExpectedStorageLocations().length;
  }
  return datanodeNum == 2;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:15,代码来源:TestRetryCacheWithHA.java

示例9: assertBlockCollection

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
static INodeFile assertBlockCollection(String path, int numBlocks,
   final FSDirectory dir, final BlockManager blkManager) throws Exception {
  final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
  assertEquals(numBlocks, file.getBlocks().length);
  for(BlockInfo b : file.getBlocks()) {
    assertBlockCollection(blkManager, file, b);
  }
  return file;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:10,代码来源:TestSnapshotBlocksMap.java

示例10: testDeletionWithZeroSizeBlock

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
/**
 * Make sure we delete 0-sized block when deleting an INodeFileUCWithSnapshot
 */
@Test
public void testDeletionWithZeroSizeBlock() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfo[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  barNode = fsdir.getINode4Write(bar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  hdfs.delete(bar, true);
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1",
      bar.getName());
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:38,代码来源:TestSnapshotBlocksMap.java

示例11: testDeletionWithZeroSizeBlock2

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
/**
 * Make sure we delete 0-sized block when deleting an under-construction file
 */
@Test
public void testDeletionWithZeroSizeBlock2() throws Exception {
  final Path foo = new Path("/foo");
  final Path subDir = new Path(foo, "sub");
  final Path bar = new Path(subDir, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfo[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  barNode = fsdir.getINode4Write(bar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  hdfs.delete(subDir, true);
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:36,代码来源:TestSnapshotBlocksMap.java

示例12: testDeletionWithZeroSizeBlock3

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
/**
 * 1. rename under-construction file with 0-sized blocks after snapshot.
 * 2. delete the renamed directory.
 * make sure we delete the 0-sized block.
 * see HDFS-5476.
 */
@Test
public void testDeletionWithZeroSizeBlock3() throws Exception {
  final Path foo = new Path("/foo");
  final Path subDir = new Path(foo, "sub");
  final Path bar = new Path(subDir, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfo[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  // rename bar
  final Path bar2 = new Path(subDir, "bar2");
  hdfs.rename(bar, bar2);
  
  INodeFile bar2Node = fsdir.getINode4Write(bar2.toString()).asFile();
  blks = bar2Node.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  // delete subDir
  hdfs.delete(subDir, true);
  
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:45,代码来源:TestSnapshotBlocksMap.java

示例13: testDeletionOfLaterBlocksWithZeroSizeFirstBlock

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
/**
 * Make sure that a delete of a non-zero-length file which results in a
 * zero-length file in a snapshot works.
 */
@Test
public void testDeletionOfLaterBlocksWithZeroSizeFirstBlock() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  final byte[] testData = "foo bar baz".getBytes();
  
  // Create a zero-length file.
  DFSTestUtil.createFile(hdfs, bar, 0, REPLICATION, 0L);
  assertEquals(0, fsdir.getINode4Write(bar.toString()).asFile().getBlocks().length);

  // Create a snapshot that includes that file.
  SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
  
  // Extend that file.
  FSDataOutputStream out = hdfs.append(bar);
  out.write(testData);
  out.close();
  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfo[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(testData.length, blks[0].getNumBytes());
  
  // Delete the file.
  hdfs.delete(bar, true);
  
  // Now make sure that the NN can still save an fsimage successfully.
  cluster.getNameNode().getRpcServer().setSafeMode(
      SafeModeAction.SAFEMODE_ENTER, false);
  cluster.getNameNode().getRpcServer().saveNamespace(0, 0);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:35,代码来源:TestSnapshotBlocksMap.java

示例14: testReplicationAfterDeletion

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
/**
 * Test replication for a file with snapshots, also including the scenario
 * where the original file is deleted
 */
@Test (timeout=60000)
public void testReplicationAfterDeletion() throws Exception {
  // Create file1, set its replication to 3
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
  Map<Path, Short> snapshotRepMap = new HashMap<Path, Short>();
  // Take 3 snapshots of sub1
  for (int i = 1; i <= 3; i++) {
    Path root = SnapshotTestHelper.createSnapshot(hdfs, sub1, "s" + i);
    Path ssFile = new Path(root, file1.getName());
    snapshotRepMap.put(ssFile, REPLICATION);
  }
  // Check replication
  checkFileReplication(file1, REPLICATION, REPLICATION);
  checkSnapshotFileReplication(file1, snapshotRepMap, REPLICATION);
  
  // Delete file1
  hdfs.delete(file1, true);
  // Check replication of snapshots
  for (Path ss : snapshotRepMap.keySet()) {
    final INodeFile ssInode = getINodeFile(ss);
    // The replication number derived from the
    // INodeFileWithLink#getPreferredBlockReplication should
    // always == expectedBlockRep
    for (BlockInfo b : ssInode.getBlocks()) {
      assertEquals(REPLICATION, b.getReplication());
    }

    // Also check the number derived from INodeFile#getFileReplication
    assertEquals(snapshotRepMap.get(ss).shortValue(),
        ssInode.getFileReplication());
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:37,代码来源:TestSnapshotReplication.java

示例15: collectBlocksBeyondMax

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
private void collectBlocksBeyondMax(final INodeFile file, final long max,
    final BlocksMapUpdateInfo collectedBlocks) {
  final BlockInfo[] oldBlocks = file.getBlocks();
  if (oldBlocks != null) {
    //find the minimum n such that the size of the first n blocks > max
    int n = 0;
    for(long size = 0; n < oldBlocks.length && max > size; n++) {
      size += oldBlocks[n].getNumBytes();
    }
    
    // starting from block n, the data is beyond max.
    if (n < oldBlocks.length) {
      // resize the array.  
      final BlockInfo[] newBlocks;
      if (n == 0) {
        newBlocks = BlockInfo.EMPTY_ARRAY;
      } else {
        newBlocks = new BlockInfo[n];
        System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
      }
      
      // set new blocks
      file.setBlocks(newBlocks);

      // collect the blocks beyond max.  
      if (collectedBlocks != null) {
        for(; n < oldBlocks.length; n++) {
          collectedBlocks.addDeleteBlock(oldBlocks[n]);
        }
      }
    }
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:34,代码来源:FileWithSnapshotFeature.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.INodeFile.getBlocks方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。