当前位置: 首页>>代码示例>>Java>>正文


Java DataNodeTestUtils.triggerBlockReport方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils.triggerBlockReport方法的典型用法代码示例。如果您正苦于以下问题:Java DataNodeTestUtils.triggerBlockReport方法的具体用法?Java DataNodeTestUtils.triggerBlockReport怎么用?Java DataNodeTestUtils.triggerBlockReport使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils的用法示例。


在下文中一共展示了DataNodeTestUtils.triggerBlockReport方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testReleaseOnFileDeletion

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
@Test
public void testReleaseOnFileDeletion()
    throws IOException, TimeoutException, InterruptedException {
  getClusterBuilder().setNumDatanodes(1)
                     .setMaxLockedMemory(BLOCK_SIZE).build();
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  final FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();

  Path path = new Path("/" + METHOD_NAME + ".dat");
  makeTestFile(path, BLOCK_SIZE, true);
  ensureFileReplicasOnStorageType(path, RAM_DISK);
  assertThat(fsd.getCacheUsed(), is((long) BLOCK_SIZE));

  // Delete the file and ensure that the locked memory is released.
  fs.delete(path, false);
  DataNodeTestUtils.triggerBlockReport(cluster.getDataNodes().get(0));
  waitForLockedBytesUsed(fsd, 0);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:19,代码来源:TestLazyPersistLockedMemory.java

示例2: verify

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/**
 * Verify block locations after running the migration tool.
 */
void verify(boolean verifyAll) throws Exception {
  for (DataNode dn : cluster.getDataNodes()) {
    DataNodeTestUtils.triggerBlockReport(dn);
  }
  if (verifyAll) {
    verifyNamespace();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TestStorageMover.java

示例3: testWritePipelineFailure

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/**
 * Verify that locked bytes are correctly updated when the client goes
 * away unexpectedly during a write.
 */
@Test
public void testWritePipelineFailure()
  throws IOException, TimeoutException, InterruptedException {
  getClusterBuilder().setNumDatanodes(1).build();
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  final FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();

  Path path = new Path("/" + METHOD_NAME + ".dat");

  EnumSet<CreateFlag> createFlags = EnumSet.of(CREATE, LAZY_PERSIST);
  // Write 1 byte to the file and kill the writer.
  final FSDataOutputStream fos =
      fs.create(path,
                FsPermission.getFileDefault(),
                createFlags,
                BUFFER_LENGTH,
                REPL_FACTOR,
                BLOCK_SIZE,
                null);

  fos.write(new byte[1]);
  fos.hsync();
  DFSTestUtil.abortStream((DFSOutputStream) fos.getWrappedStream());
  waitForLockedBytesUsed(fsd, osPageSize);

  // Delete the file and ensure locked RAM goes to zero.
  fs.delete(path, false);
  DataNodeTestUtils.triggerBlockReport(cluster.getDataNodes().get(0));
  waitForLockedBytesUsed(fsd, 0);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:35,代码来源:TestLazyPersistLockedMemory.java

示例4: triggerBlockReports

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
public void triggerBlockReports()
    throws IOException {
  for (DataNode dn : getDataNodes()) {
    DataNodeTestUtils.triggerBlockReport(dn);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:7,代码来源:MiniDFSCluster.java

示例5: testDecommissionStatus

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/**
 * Tests Decommissioning Status in DFS.
 */
@Test
public void testDecommissionStatus() throws Exception {
  InetSocketAddress addr = new InetSocketAddress("localhost", cluster
      .getNameNodePort());
  DFSClient client = new DFSClient(addr, conf);
  DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
  assertEquals("Number of Datanodes ", 2, info.length);
  DistributedFileSystem fileSys = cluster.getFileSystem();
  DFSAdmin admin = new DFSAdmin(cluster.getConfiguration(0));

  short replicas = numDatanodes;
  //
  // Decommission one node. Verify the decommission status
  // 
  Path file1 = new Path("decommission.dat");
  writeFile(fileSys, file1, replicas);

  Path file2 = new Path("decommission1.dat");
  FSDataOutputStream st1 = writeIncompleteFile(fileSys, file2, replicas);
  for (DataNode d: cluster.getDataNodes()) {
    DataNodeTestUtils.triggerBlockReport(d);
  }

  FSNamesystem fsn = cluster.getNamesystem();
  final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
  for (int iteration = 0; iteration < numDatanodes; iteration++) {
    String downnode = decommissionNode(fsn, client, localFileSys, iteration);
    dm.refreshNodes(conf);
    decommissionedNodes.add(downnode);
    BlockManagerTestUtil.recheckDecommissionState(dm);
    final List<DatanodeDescriptor> decommissioningNodes = dm.getDecommissioningNodes();
    if (iteration == 0) {
      assertEquals(decommissioningNodes.size(), 1);
      DatanodeDescriptor decommNode = decommissioningNodes.get(0);
      checkDecommissionStatus(decommNode, 3, 0, 1);
      checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 1),
          fileSys, admin);
    } else {
      assertEquals(decommissioningNodes.size(), 2);
      DatanodeDescriptor decommNode1 = decommissioningNodes.get(0);
      DatanodeDescriptor decommNode2 = decommissioningNodes.get(1);
      // This one is still 3,3,1 since it passed over the UC block 
      // earlier, before node 2 was decommed
      checkDecommissionStatus(decommNode1, 3, 3, 1);
      // This one is 4,4,2 since it has the full state
      checkDecommissionStatus(decommNode2, 4, 4, 2);
      checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 2),
          fileSys, admin);
    }
  }
  // Call refreshNodes on FSNamesystem with empty exclude file.
  // This will remove the datanodes from decommissioning list and
  // make them available again.
  writeConfigFile(localFileSys, excludeFile, null);
  dm.refreshNodes(conf);
  st1.close();
  cleanupFile(fileSys, file1);
  cleanupFile(fileSys, file2);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:63,代码来源:TestDecommissioningStatus.java

示例6: runTest

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
private static void runTest(final String testCaseName,
                            final boolean createFiles,
                            final int numInitialStorages,
                            final int expectedStoragesAfterTest) throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster
        .Builder(conf)
        .numDataNodes(1)
        .storagesPerDatanode(numInitialStorages)
        .build();
    cluster.waitActive();

    final DataNode dn0 = cluster.getDataNodes().get(0);

    // Ensure NN knows about the storage.
    final DatanodeID dnId = dn0.getDatanodeId();
    final DatanodeDescriptor dnDescriptor =
        cluster.getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dnId);
    assertThat(dnDescriptor.getStorageInfos().length, is(numInitialStorages));

    final String bpid = cluster.getNamesystem().getBlockPoolId();
    final DatanodeRegistration dnReg = dn0.getDNRegistrationForBP(bpid);
    DataNodeTestUtils.triggerBlockReport(dn0);

    if (createFiles) {
      final Path path = new Path("/", testCaseName);
      DFSTestUtil.createFile(
          cluster.getFileSystem(), path, 1024, (short) 1, 0x1BAD5EED);
      DataNodeTestUtils.triggerBlockReport(dn0);
    }

    // Generate a fake StorageReport that is missing one storage.
    final StorageReport reports[] =
        dn0.getFSDataset().getStorageReports(bpid);
    final StorageReport prunedReports[] = new StorageReport[numInitialStorages - 1];
    System.arraycopy(reports, 0, prunedReports, 0, prunedReports.length);

    // Stop the DataNode and send fake heartbeat with missing storage.
    cluster.stopDataNode(0);
    cluster.getNameNodeRpc().sendHeartbeat(dnReg, prunedReports, 0L, 0L, 0, 0,
        0, null);

    // Check that the missing storage was pruned.
    assertThat(dnDescriptor.getStorageInfos().length, is(expectedStoragesAfterTest));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:54,代码来源:TestNameNodePrunesMissingStorages.java

示例7: testPendingDeleteUnknownBlocks

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/**
 * Test whether we can delay the deletion of unknown blocks in DataNode's
 * first several block reports.
 */
@Test
public void testPendingDeleteUnknownBlocks() throws Exception {
  final int fileNum = 5; // 5 files
  final Path[] files = new Path[fileNum];
  final DataNodeProperties[] dnprops = new DataNodeProperties[REPLICATION];
  // create a group of files, each file contains 1 block
  for (int i = 0; i < fileNum; i++) {
    files[i] = new Path("/file" + i);
    DFSTestUtil.createFile(dfs, files[i], BLOCKSIZE, REPLICATION, i);
  }
  // wait until all DataNodes have replicas
  waitForReplication();
  for (int i = REPLICATION - 1; i >= 0; i--) {
    dnprops[i] = cluster.stopDataNode(i);
  }
  Thread.sleep(2000);
  // delete 2 files, we still have 3 files remaining so that we can cover
  // every DN storage
  for (int i = 0; i < 2; i++) {
    dfs.delete(files[i], true);
  }

  // restart NameNode
  cluster.restartNameNode(false);
  InvalidateBlocks invalidateBlocks = (InvalidateBlocks) Whitebox
      .getInternalState(cluster.getNamesystem().getBlockManager(),
          "invalidateBlocks");
  InvalidateBlocks mockIb = Mockito.spy(invalidateBlocks);
  Mockito.doReturn(1L).when(mockIb).getInvalidationDelay();
  Whitebox.setInternalState(cluster.getNamesystem().getBlockManager(),
      "invalidateBlocks", mockIb);

  Assert.assertEquals(0L, cluster.getNamesystem().getPendingDeletionBlocks());
  // restart DataNodes
  for (int i = 0; i < REPLICATION; i++) {
    cluster.restartDataNode(dnprops[i], true);
  }
  cluster.waitActive();

  for (int i = 0; i < REPLICATION; i++) {
    DataNodeTestUtils.triggerBlockReport(cluster.getDataNodes().get(i));
  }
  Thread.sleep(2000);
  // make sure we have received block reports by checking the total block #
  Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
  Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());

  cluster.restartNameNode(true);
  Thread.sleep(6000);
  Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
  Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:57,代码来源:TestPendingInvalidateBlock.java

示例8: triggerBlockReport

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
protected final void triggerBlockReport()
    throws IOException, InterruptedException {
  // Trigger block report to NN
  DataNodeTestUtils.triggerBlockReport(cluster.getDataNodes().get(0));
  Thread.sleep(10 * 1000);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:7,代码来源:LazyPersistTestCase.java

示例9: testDecommissionStatus

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/**
 * Tests Decommissioning Status in DFS.
 */
@Test
public void testDecommissionStatus() throws Exception {
  InetSocketAddress addr = new InetSocketAddress("localhost", cluster
      .getNameNodePort());
  DFSClient client = new DFSClient(addr, conf);
  DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
  assertEquals("Number of Datanodes ", 2, info.length);
  DistributedFileSystem fileSys = cluster.getFileSystem();
  DFSAdmin admin = new DFSAdmin(cluster.getConfiguration(0));

  short replicas = numDatanodes;
  //
  // Decommission one node. Verify the decommission status
  // 
  Path file1 = new Path("decommission.dat");
  DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
      replicas, seed);

  Path file2 = new Path("decommission1.dat");
  FSDataOutputStream st1 = writeIncompleteFile(fileSys, file2, replicas);
  for (DataNode d: cluster.getDataNodes()) {
    DataNodeTestUtils.triggerBlockReport(d);
  }

  FSNamesystem fsn = cluster.getNamesystem();
  final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
  for (int iteration = 0; iteration < numDatanodes; iteration++) {
    String downnode = decommissionNode(fsn, client, localFileSys, iteration);
    dm.refreshNodes(conf);
    decommissionedNodes.add(downnode);
    BlockManagerTestUtil.recheckDecommissionState(dm);
    final List<DatanodeDescriptor> decommissioningNodes = dm.getDecommissioningNodes();
    if (iteration == 0) {
      assertEquals(decommissioningNodes.size(), 1);
      DatanodeDescriptor decommNode = decommissioningNodes.get(0);
      checkDecommissionStatus(decommNode, 3, 0, 1);
      checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 1),
          fileSys, admin);
    } else {
      assertEquals(decommissioningNodes.size(), 2);
      DatanodeDescriptor decommNode1 = decommissioningNodes.get(0);
      DatanodeDescriptor decommNode2 = decommissioningNodes.get(1);
      // This one is still 3,3,1 since it passed over the UC block 
      // earlier, before node 2 was decommed
      checkDecommissionStatus(decommNode1, 3, 3, 1);
      // This one is 4,4,2 since it has the full state
      checkDecommissionStatus(decommNode2, 4, 4, 2);
      checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 2),
          fileSys, admin);
    }
  }
  // Call refreshNodes on FSNamesystem with empty exclude file.
  // This will remove the datanodes from decommissioning list and
  // make them available again.
  writeConfigFile(localFileSys, excludeFile, null);
  dm.refreshNodes(conf);
  st1.close();
  cleanupFile(fileSys, file1);
  cleanupFile(fileSys, file2);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:64,代码来源:TestDecommissioningStatus.java

示例10: runTest

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
private static void runTest(final String testCaseName,
                            final boolean createFiles,
                            final int numInitialStorages,
                            final int expectedStoragesAfterTest) throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster
        .Builder(conf)
        .numDataNodes(1)
        .storagesPerDatanode(numInitialStorages)
        .build();
    cluster.waitActive();

    final DataNode dn0 = cluster.getDataNodes().get(0);

    // Ensure NN knows about the storage.
    final DatanodeID dnId = dn0.getDatanodeId();
    final DatanodeDescriptor dnDescriptor =
        cluster.getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dnId);
    assertThat(dnDescriptor.getStorageInfos().length, is(numInitialStorages));

    final String bpid = cluster.getNamesystem().getBlockPoolId();
    final DatanodeRegistration dnReg = dn0.getDNRegistrationForBP(bpid);
    DataNodeTestUtils.triggerBlockReport(dn0);

    if (createFiles) {
      final Path path = new Path("/", testCaseName);
      DFSTestUtil.createFile(
          cluster.getFileSystem(), path, 1024, (short) 1, 0x1BAD5EED);
      DataNodeTestUtils.triggerBlockReport(dn0);
    }

    // Generate a fake StorageReport that is missing one storage.
    final StorageReport reports[] =
        dn0.getFSDataset().getStorageReports(bpid);
    final StorageReport prunedReports[] = new StorageReport[numInitialStorages - 1];
    System.arraycopy(reports, 0, prunedReports, 0, prunedReports.length);

    // Stop the DataNode and send fake heartbeat with missing storage.
    cluster.stopDataNode(0);
    cluster.getNameNodeRpc().sendHeartbeat(dnReg, prunedReports, 0L, 0L, 0, 0,
        0, null, true);

    // Check that the missing storage was pruned.
    assertThat(dnDescriptor.getStorageInfos().length, is(expectedStoragesAfterTest));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:54,代码来源:TestNameNodePrunesMissingStorages.java

示例11: triggerBlockReports

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
public void triggerBlockReports() throws IOException {
  for (DataNode dn : getDataNodes()) {
    DataNodeTestUtils.triggerBlockReport(dn);
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:6,代码来源:MiniDFSCluster.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils.triggerBlockReport方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。