当前位置: 首页>>代码示例>>Java>>正文


Java DFSTestUtil.readFile方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil.readFile方法的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil.readFile方法的具体用法?Java DFSTestUtil.readFile怎么用?Java DFSTestUtil.readFile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DFSTestUtil的用法示例。


在下文中一共展示了DFSTestUtil.readFile方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testReadSnapshotFileWithCheckpoint

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test(timeout = 30000)
public void testReadSnapshotFileWithCheckpoint() throws Exception {
  Path foo = new Path("/foo");
  hdfs.mkdirs(foo);
  hdfs.allowSnapshot(foo);
  Path bar = new Path("/foo/bar");
  DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L);
  hdfs.createSnapshot(foo, "s1");
  assertTrue(hdfs.delete(bar, true));

  // checkpoint
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);

  // restart namenode to load snapshot files from fsimage
  cluster.restartNameNode(true);
  String snapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar");
  DFSTestUtil.readFile(hdfs, new Path(snapshotPath));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestSnapshotBlocksMap.java

示例2: testWithCheckpoint

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testWithCheckpoint() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);
  fs.delete(new Path("/test/test"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);
  
  // read snapshot file after restart
  String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test2");
  DFSTestUtil.readFile(fs, new Path(test2snapshotPath));
  String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test3");
  DFSTestUtil.readFile(fs, new Path(test3snapshotPath));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestOpenFilesWithSnapshot.java

示例3: testFilesDeletionWithCheckpoint

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testFilesDeletionWithCheckpoint() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);
  fs.delete(new Path("/test/test/test2"), true);
  fs.delete(new Path("/test/test/test3"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);
  
  // read snapshot file after restart
  String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test2");
  DFSTestUtil.readFile(fs, new Path(test2snapshotPath));
  String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test3");
  DFSTestUtil.readFile(fs, new Path(test3snapshotPath));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestOpenFilesWithSnapshot.java

示例4: testOverwriteFile

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test case where there is no existing file
 */
@Test
public void testOverwriteFile() throws IOException {
  assertTrue("Creating empty dst file", DST_FILE.createNewFile());
  
  OutputStream fos = new AtomicFileOutputStream(DST_FILE);
  
  assertTrue("Empty file still exists", DST_FILE.exists());
  fos.write(TEST_STRING.getBytes());
  fos.flush();
  
  // Original contents still in place
  assertEquals("", DFSTestUtil.readFile(DST_FILE));

  fos.close();

  // New contents replace original file
  String readBackData = DFSTestUtil.readFile(DST_FILE);
  assertEquals(TEST_STRING, readBackData);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestAtomicFileOutputStream.java

示例5: testBlockReportsWhileFileBeingWritten

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Regression test for HDFS-2742. The issue in this bug was:
 * - DN does a block report while file is open. This BR contains
 *   the block in RBW state.
 * - Standby queues the RBW state in PendingDatanodeMessages
 * - Standby processes edit logs during failover. Before fixing
 *   this bug, it was mistakenly applying the RBW reported state
 *   after the block had been completed, causing the block to get
 *   marked corrupt. Instead, we should now be applying the RBW
 *   message on OP_ADD, and then the FINALIZED message on OP_CLOSE.
 */
@Test
public void testBlockReportsWhileFileBeingWritten() throws Exception {
  FSDataOutputStream out = fs.create(TEST_FILE_PATH);
  try {
    AppendTestUtil.write(out, 0, 10);
    out.hflush();
    
    // Block report will include the RBW replica, but will be
    // queued on the StandbyNode.
    cluster.triggerBlockReports();
    
  } finally {
    IOUtils.closeStream(out);
  }

  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);
  
  // Verify that no replicas are marked corrupt, and that the
  // file is readable from the failed-over standby.
  BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
  BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
  assertEquals(0, nn1.getNamesystem().getCorruptReplicaBlocks());
  assertEquals(0, nn2.getNamesystem().getCorruptReplicaBlocks());
  
  DFSTestUtil.readFile(fs, TEST_FILE_PATH);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestDNFencing.java

示例6: testReadRenamedSnapshotFileWithCheckpoint

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test(timeout = 30000)
public void testReadRenamedSnapshotFileWithCheckpoint() throws Exception {
  final Path foo = new Path("/foo");
  final Path foo2 = new Path("/foo2");
  hdfs.mkdirs(foo);
  hdfs.mkdirs(foo2);

  hdfs.allowSnapshot(foo);
  hdfs.allowSnapshot(foo2);
  final Path bar = new Path(foo, "bar");
  final Path bar2 = new Path(foo2, "bar");
  DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L);
  hdfs.createSnapshot(foo, "s1");
  // rename to another snapshottable directory and take snapshot
  assertTrue(hdfs.rename(bar, bar2));
  hdfs.createSnapshot(foo2, "s2");
  // delete the original renamed file to make sure blocks are not updated by
  // the original file
  assertTrue(hdfs.delete(bar2, true));

  // checkpoint
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  // restart namenode to load snapshot files from fsimage
  cluster.restartNameNode(true);
  // file in first snapshot
  String barSnapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar");
  DFSTestUtil.readFile(hdfs, new Path(barSnapshotPath));
  // file in second snapshot after rename+delete
  String bar2SnapshotPath = Snapshot.getSnapshotPath(foo2.toString(),
      "s2/bar");
  DFSTestUtil.readFile(hdfs, new Path(bar2SnapshotPath));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestSnapshotBlocksMap.java

示例7: testDatanodeRollingUpgradeWithRollback

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test (timeout=600000)
public void testDatanodeRollingUpgradeWithRollback() throws Exception {
  try {
    startCluster();

    // Create files in DFS.
    Path testFile1 = new Path("/" + GenericTestUtils.getMethodName() + ".01.dat");
    DFSTestUtil.createFile(fs, testFile1, FILE_SIZE, REPL_FACTOR, SEED);
    String fileContents1 = DFSTestUtil.readFile(fs, testFile1);

    startRollingUpgrade();

    File blockFile = getBlockForFile(testFile1, true);
    File trashFile = getTrashFileForBlock(blockFile, false);
    deleteAndEnsureInTrash(testFile1, blockFile, trashFile);

    // Now perform a rollback to restore DFS to the pre-rollback state.
    rollbackRollingUpgrade();

    // Ensure that block was restored from trash
    ensureTrashRestored(blockFile, trashFile);

    // Ensure that files exist and restored file contents are the same.
    assert(fs.exists(testFile1));
    String fileContents2 = DFSTestUtil.readFile(fs, testFile1);
    assertThat(fileContents1, is(fileContents2));
  } finally {
    shutdownCluster();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestDataNodeRollingUpgrade.java

示例8: testSendDataPacketMetrics

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testSendDataPacketMetrics() throws Exception {
  Configuration conf = new HdfsConfiguration();
  final int interval = 1;
  conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    FileSystem fs = cluster.getFileSystem();
    // Create and read a 1 byte file
    Path tmpfile = new Path("/tmp.txt");
    DFSTestUtil.createFile(fs, tmpfile,
        (long)1, (short)1, 1L);
    DFSTestUtil.readFile(fs, tmpfile);
    List<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(datanodes.size(), 1);
    DataNode datanode = datanodes.get(0);
    MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
    // Expect 2 packets, 1 for the 1 byte read, 1 for the empty packet
    // signaling the end of the block
    assertCounter("SendDataPacketTransferNanosNumOps", (long)2, rb);
    assertCounter("SendDataPacketBlockedOnNetworkNanosNumOps", (long)2, rb);
    // Wait for at least 1 rollover
    Thread.sleep((interval + 1) * 1000);
    // Check that the sendPacket percentiles rolled to non-zero values
    String sec = interval + "s";
    assertQuantileGauges("SendDataPacketBlockedOnNetworkNanos" + sec, rb);
    assertQuantileGauges("SendDataPacketTransferNanos" + sec, rb);
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestDataNodeMetrics.java

示例9: testDataNodeTimeSpend

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * This function ensures that writing causes TotalWritetime to increment
 * and reading causes totalReadTime to move.
 * @throws Exception
 */
@Test
public void testDataNodeTimeSpend() throws Exception {
  Configuration conf = new HdfsConfiguration();
  SimulatedFSDataset.setFactory(conf);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    FileSystem fs = cluster.getFileSystem();
    List<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(datanodes.size(), 1);
    DataNode datanode = datanodes.get(0);
    MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
    final long LONG_FILE_LEN = 1024 * 1024 * 10;

    long startWriteValue = getLongCounter("TotalWriteTime", rb);
    long startReadValue = getLongCounter("TotalReadTime", rb);

    for (int x =0; x < 50; x++) {
      DFSTestUtil.createFile(fs, new Path("/time.txt."+ x),
              LONG_FILE_LEN, (short) 1, Time.monotonicNow());
    }

    for (int x =0; x < 50; x++) {
      String s = DFSTestUtil.readFile(fs, new Path("/time.txt." + x));
    }

    MetricsRecordBuilder rbNew = getMetrics(datanode.getMetrics().name());
    long endWriteValue = getLongCounter("TotalWriteTime", rbNew);
    long endReadValue = getLongCounter("TotalReadTime", rbNew);

    assertTrue(endReadValue > startReadValue);
    assertTrue(endWriteValue > startWriteValue);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestDataNodeMetrics.java

示例10: testRemoveOneVolume

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testRemoveOneVolume()
    throws ReconfigurationException, InterruptedException, TimeoutException,
    IOException {
  startDFSCluster(1, 1);
  final short replFactor = 1;
  Path testFile = new Path("/test");
  createFile(testFile, 10, replFactor);

  DataNode dn = cluster.getDataNodes().get(0);
  Collection<String> oldDirs = getDataDirs(dn);
  String newDirs = oldDirs.iterator().next();  // Keep the first volume.
  dn.reconfigurePropertyImpl(
      DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs);
  assertFileLocksReleased(
    new ArrayList<String>(oldDirs).subList(1, oldDirs.size()));
  dn.scheduleAllBlockReport(0);

  try {
    DFSTestUtil.readFile(cluster.getFileSystem(), testFile);
    fail("Expect to throw BlockMissingException.");
  } catch (BlockMissingException e) {
    GenericTestUtils.assertExceptionContains("Could not obtain block", e);
  }

  Path newFile = new Path("/newFile");
  createFile(newFile, 6);

  String bpid = cluster.getNamesystem().getBlockPoolId();
  List<Map<DatanodeStorage, BlockListAsLongs>> blockReports =
      cluster.getAllBlockReports(bpid);
  assertEquals((int)replFactor, blockReports.size());

  BlockListAsLongs blocksForVolume1 =
      blockReports.get(0).values().iterator().next();
  // The first volume has half of the testFile and full of newFile.
  assertEquals(10 / 2 + 6, blocksForVolume1.getNumberOfBlocks());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestDataNodeHotSwapVolumes.java

示例11: testWriteNewFile

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test case where there is no existing file
 */
@Test
public void testWriteNewFile() throws IOException {
  OutputStream fos = new AtomicFileOutputStream(DST_FILE);
  assertFalse(DST_FILE.exists());
  fos.write(TEST_STRING.getBytes());
  fos.flush();
  assertFalse(DST_FILE.exists());
  fos.close();
  assertTrue(DST_FILE.exists());
  
  String readBackData = DFSTestUtil.readFile(DST_FILE);
  assertEquals(TEST_STRING, readBackData);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestAtomicFileOutputStream.java

示例12: testCanReadData

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Verify that a file can be read both from NameNode and BackupNode.
 */
@Test
public void testCanReadData() throws IOException {
  Path file1 = new Path("/fileToRead.dat");
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  FileSystem fileSys = null;
  BackupNode backup = null;
  try {
    // Start NameNode and BackupNode
    cluster = new MiniDFSCluster.Builder(conf)
                                .numDataNodes(0).format(true).build();
    fileSys = cluster.getFileSystem();
    long txid = cluster.getNameNodeRpc().getTransactionID();
    backup = startBackupNode(conf, StartupOption.BACKUP, 1);
    waitCheckpointDone(cluster, txid);

    // Setup dual NameNode configuration for DataNodes
    String rpcAddrKeyPreffix =
        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + ".bnCluster";
    String nnAddr = cluster.getNameNode().getNameNodeAddressHostPortString();
        conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
    String bnAddr = backup.getNameNodeAddressHostPortString();
    conf.set(DFSConfigKeys.DFS_NAMESERVICES, "bnCluster");
    conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, "bnCluster");
    conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + ".bnCluster",
        "nnActive, nnBackup");
    conf.set(rpcAddrKeyPreffix + ".nnActive", nnAddr);
    conf.set(rpcAddrKeyPreffix + ".nnBackup", bnAddr);
    cluster.startDataNodes(conf, 3, true, StartupOption.REGULAR, null);

    DFSTestUtil.createFile(
        fileSys, file1, 8192, (short)3, 0);

    // Read the same file from file systems pointing to NN and BN
    FileSystem bnFS = FileSystem.get(
        new Path("hdfs://" + bnAddr).toUri(), conf);
    String nnData = DFSTestUtil.readFile(fileSys, file1);
    String bnData = DFSTestUtil.readFile(bnFS, file1);
    assertEquals("Data read from BackupNode and NameNode is not the same.",
        nnData, bnData);
  } catch(IOException e) {
    LOG.error("Error in TestBackupNode: ", e);
    assertTrue(e.getLocalizedMessage(), false);
  } finally {
    if(fileSys != null) fileSys.close();
    if(backup != null) backup.stop();
    if(cluster != null) cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:53,代码来源:TestBackupNode.java

示例13: testDnFencing

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testDnFencing() throws Exception {
  // Create a file with replication level 3.
  DFSTestUtil.createFile(fs, TEST_FILE_PATH, 30*SMALL_BLOCK, (short)3, 1L);
  ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, TEST_FILE_PATH);
  
  // Drop its replication count to 1, so it becomes over-replicated.
  // Then compute the invalidation of the extra blocks and trigger
  // heartbeats so the invalidations are flushed to the DNs.
  nn1.getRpcServer().setReplication(TEST_FILE, (short) 1);
  BlockManagerTestUtil.computeInvalidationWork(
      nn1.getNamesystem().getBlockManager());
  cluster.triggerHeartbeats();
  
  // Transition nn2 to active even though nn1 still thinks it's active.
  banner("Failing to NN2 but let NN1 continue to think it's active");
  NameNodeAdapter.abortEditLogs(nn1);
  NameNodeAdapter.enterSafeMode(nn1, false);
  cluster.transitionToActive(1);
  
  // Check that the standby picked up the replication change.
  assertEquals(1,
      nn2.getRpcServer().getFileInfo(TEST_FILE).getReplication());

  // Dump some info for debugging purposes.
  banner("NN2 Metadata immediately after failover");
  doMetasave(nn2);
  
  banner("Triggering heartbeats and block reports so that fencing is completed");
  cluster.triggerHeartbeats();
  cluster.triggerBlockReports();
  
  banner("Metadata after nodes have all block-reported");
  doMetasave(nn2);

  // Force a rescan of postponedMisreplicatedBlocks.
  BlockManager nn2BM = nn2.getNamesystem().getBlockManager();
  BlockManagerTestUtil.checkHeartbeat(nn2BM);
  BlockManagerTestUtil.rescanPostponedMisreplicatedBlocks(nn2BM);

  // The blocks should no longer be postponed.
  assertEquals(0, nn2.getNamesystem().getPostponedMisreplicatedBlocks());
  
  // Wait for NN2 to enact its deletions (replication monitor has to run, etc)
  BlockManagerTestUtil.computeInvalidationWork(
      nn2.getNamesystem().getBlockManager());
  cluster.triggerHeartbeats();
  HATestUtil.waitForDNDeletions(cluster);
  cluster.triggerDeletionReports();
  assertEquals(0, nn2.getNamesystem().getUnderReplicatedBlocks());
  assertEquals(0, nn2.getNamesystem().getPendingReplicationBlocks());
  
  banner("Making sure the file is still readable");
  FileSystem fs2 = cluster.getFileSystem(1);
  DFSTestUtil.readFile(fs2, TEST_FILE_PATH);

  banner("Waiting for the actual block files to get deleted from DNs.");
  waitForTrueReplication(cluster, block, 1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:60,代码来源:TestDNFencing.java

示例14: testRBWReportArrivesAfterEdits

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Another regression test for HDFS-2742. This tests the following sequence:
 * - DN does a block report while file is open. This BR contains
 *   the block in RBW state.
 * - The block report is delayed in reaching the standby.
 * - The file is closed.
 * - The standby processes the OP_ADD and OP_CLOSE operations before
 *   the RBW block report arrives.
 * - The standby should not mark the block as corrupt.
 */
@Test
public void testRBWReportArrivesAfterEdits() throws Exception {
  final CountDownLatch brFinished = new CountDownLatch(1);
  DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG) {
    @Override
    protected Object passThrough(InvocationOnMock invocation)
        throws Throwable {
      try {
        return super.passThrough(invocation);
      } finally {
        // inform the test that our block report went through.
        brFinished.countDown();
      }
    }
  };

  FSDataOutputStream out = fs.create(TEST_FILE_PATH);
  try {
    AppendTestUtil.write(out, 0, 10);
    out.hflush();

    DataNode dn = cluster.getDataNodes().get(0);
    DatanodeProtocolClientSideTranslatorPB spy =
      DataNodeTestUtils.spyOnBposToNN(dn, nn2);
    
    Mockito.doAnswer(delayer)
      .when(spy).blockReport(
        Mockito.<DatanodeRegistration>anyObject(),
        Mockito.anyString(),
        Mockito.<StorageBlockReport[]>anyObject(),
        Mockito.<BlockReportContext>anyObject());
    dn.scheduleAllBlockReport(0);
    delayer.waitForCall();
    
  } finally {
    IOUtils.closeStream(out);
  }

  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);
  
  delayer.proceed();
  brFinished.await();
  
  // Verify that no replicas are marked corrupt, and that the
  // file is readable from the failed-over standby.
  BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
  BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
  assertEquals(0, nn1.getNamesystem().getCorruptReplicaBlocks());
  assertEquals(0, nn2.getNamesystem().getCorruptReplicaBlocks());
  
  DFSTestUtil.readFile(fs, TEST_FILE_PATH);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:64,代码来源:TestDNFencing.java

示例15: testCorruptBlockRereplicatedAcrossRacks

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testCorruptBlockRereplicatedAcrossRacks() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 2;
  int fileLen = 512;
  final Path filePath = new Path("/testFile");
  // Datanodes are spread across two racks
  String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block with a replication factor of 2
    final FileSystem fs = cluster.getFileSystem();
    
    DFSTestUtil.createFile(fs, filePath, fileLen, REPLICATION_FACTOR, 1L);
    final String fileContent = DFSTestUtil.readFile(fs, filePath);

    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Corrupt a replica of the block
    int dnToCorrupt = DFSTestUtil.firstDnWithBlock(cluster, b);
    assertTrue(cluster.corruptReplica(dnToCorrupt, b));

    // Restart the datanode so blocks are re-scanned, and the corrupt
    // block is detected.
    cluster.restartDataNode(dnToCorrupt);

    // Wait for the namenode to notice the corrupt replica
    DFSTestUtil.waitCorruptReplicas(fs, ns, filePath, b, 1);

    // The rack policy is still respected
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Ensure all replicas are valid (the corrupt replica may not
    // have been cleaned up yet).
    for (int i = 0; i < racks.length; i++) {
      String blockContent = cluster.readBlockOnDataNode(i, b);
      if (blockContent != null && i != dnToCorrupt) {
        assertEquals("Corrupt replica", fileContent, blockContent);
      }
    }
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:TestBlocksWithNotEnoughRacks.java


注:本文中的org.apache.hadoop.hdfs.DFSTestUtil.readFile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。