当前位置: 首页>>代码示例>>Java>>正文


Java TestInterDatanodeProtocol类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestInterDatanodeProtocol的典型用法代码示例。如果您正苦于以下问题:Java TestInterDatanodeProtocol类的具体用法?Java TestInterDatanodeProtocol怎么用?Java TestInterDatanodeProtocol使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


TestInterDatanodeProtocol类属于org.apache.hadoop.hdfs.server.datanode.fsdataset.impl包,在下文中一共展示了TestInterDatanodeProtocol类的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkMetaInfo

import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestInterDatanodeProtocol; //导入依赖的package包/类
static void checkMetaInfo(ExtendedBlock b, DataNode dn
    ) throws IOException {
  TestInterDatanodeProtocol.checkMetaInfo(b, dn);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:TestLeaseRecovery.java

示例2: testBlockSynchronization

import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestInterDatanodeProtocol; //导入依赖的package包/类
/**
 * The following test first creates a file with a few blocks.
 * It randomly truncates the replica of the last block stored in each datanode.
 * Finally, it triggers block synchronization to synchronize all stored block.
 */
@Test
public void testBlockSynchronization() throws Exception {
  final int ORG_FILE_SIZE = 3000; 
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
  cluster.waitActive();

  //create a file
  DistributedFileSystem dfs = cluster.getFileSystem();
  String filestr = "/foo";
  Path filepath = new Path(filestr);
  DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L);
  assertTrue(dfs.exists(filepath));
  DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);

  //get block info for the last block
  LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
      dfs.dfs.getNamenode(), filestr);
  DatanodeInfo[] datanodeinfos = locatedblock.getLocations();
  assertEquals(REPLICATION_NUM, datanodeinfos.length);

  //connect to data nodes
  DataNode[] datanodes = new DataNode[REPLICATION_NUM];
  for(int i = 0; i < REPLICATION_NUM; i++) {
    datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort());
    assertTrue(datanodes[i] != null);
  }

  //verify Block Info
  ExtendedBlock lastblock = locatedblock.getBlock();
  DataNode.LOG.info("newblocks=" + lastblock);
  for(int i = 0; i < REPLICATION_NUM; i++) {
    checkMetaInfo(lastblock, datanodes[i]);
  }

  DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
  cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName,
      new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));

  // expire lease to trigger block recovery.
  waitLeaseRecovery(cluster);

  Block[] updatedmetainfo = new Block[REPLICATION_NUM];
  long oldSize = lastblock.getNumBytes();
  lastblock = TestInterDatanodeProtocol.getLastLocatedBlock(
      dfs.dfs.getNamenode(), filestr).getBlock();
  long currentGS = lastblock.getGenerationStamp();
  for(int i = 0; i < REPLICATION_NUM; i++) {
    updatedmetainfo[i] = DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(
        lastblock.getBlockPoolId(), lastblock.getBlockId());
    assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId());
    assertEquals(oldSize, updatedmetainfo[i].getNumBytes());
    assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp());
  }

  // verify that lease recovery does not occur when namenode is in safemode
  System.out.println("Testing that lease recovery cannot happen during safemode.");
  filestr = "/foo.safemode";
  filepath = new Path(filestr);
  dfs.create(filepath, (short)1);
  cluster.getNameNodeRpc().setSafeMode(
      HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
  assertTrue(dfs.dfs.exists(filestr));
  DFSTestUtil.waitReplication(dfs, filepath, (short)1);
  waitLeaseRecovery(cluster);
  // verify that we still cannot recover the lease
  LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
  assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1);
  cluster.getNameNodeRpc().setSafeMode(
      HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:78,代码来源:TestLeaseRecovery.java

示例3: testBlockSynchronization

import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestInterDatanodeProtocol; //导入依赖的package包/类
/**
 * The following test first creates a file with a few blocks.
 * It randomly truncates the replica of the last block stored in each datanode.
 * Finally, it triggers block synchronization to synchronize all stored block.
 */
@Test
public void testBlockSynchronization() throws Exception {
  final int ORG_FILE_SIZE = 3000; 
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
  cluster.waitActive();

  //create a file
  DistributedFileSystem dfs = cluster.getFileSystem();
  String filestr = "/foo";
  Path filepath = new Path(filestr);
  DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L);
  assertTrue(dfs.exists(filepath));
  DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);

  //get block info for the last block
  LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
      dfs.dfs.getNamenode(), filestr);
  DatanodeInfo[] datanodeinfos = locatedblock.getLocations();
  assertEquals(REPLICATION_NUM, datanodeinfos.length);

  //connect to data nodes
  DataNode[] datanodes = new DataNode[REPLICATION_NUM];
  for(int i = 0; i < REPLICATION_NUM; i++) {
    datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort());
    assertTrue(datanodes[i] != null);
  }

  //verify Block Info
  ExtendedBlock lastblock = locatedblock.getBlock();
  DataNode.LOG.info("newblocks=" + lastblock);
  for(int i = 0; i < REPLICATION_NUM; i++) {
    checkMetaInfo(lastblock, datanodes[i]);
  }

  DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
  cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName);

  // expire lease to trigger block recovery.
  waitLeaseRecovery(cluster);

  Block[] updatedmetainfo = new Block[REPLICATION_NUM];
  long oldSize = lastblock.getNumBytes();
  lastblock = TestInterDatanodeProtocol.getLastLocatedBlock(
      dfs.dfs.getNamenode(), filestr).getBlock();
  long currentGS = lastblock.getGenerationStamp();
  for(int i = 0; i < REPLICATION_NUM; i++) {
    updatedmetainfo[i] = DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(
        lastblock.getBlockPoolId(), lastblock.getBlockId());
    assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId());
    assertEquals(oldSize, updatedmetainfo[i].getNumBytes());
    assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp());
  }

  // verify that lease recovery does not occur when namenode is in safemode
  System.out.println("Testing that lease recovery cannot happen during safemode.");
  filestr = "/foo.safemode";
  filepath = new Path(filestr);
  dfs.create(filepath, (short)1);
  cluster.getNameNodeRpc().setSafeMode(
      HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
  assertTrue(dfs.dfs.exists(filestr));
  DFSTestUtil.waitReplication(dfs, filepath, (short)1);
  waitLeaseRecovery(cluster);
  // verify that we still cannot recover the lease
  LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
  assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1);
  cluster.getNameNodeRpc().setSafeMode(
      HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:77,代码来源:TestLeaseRecovery.java

示例4: checkMetaInfo

import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestInterDatanodeProtocol; //导入依赖的package包/类
static void checkMetaInfo(ExtendedBlock b, DataNode dn) throws IOException {
  TestInterDatanodeProtocol.checkMetaInfo(b, dn);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:4,代码来源:TestLeaseRecovery.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestInterDatanodeProtocol类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。