当前位置: 首页>>代码示例>>Java>>正文


Java DFSTestUtil.readFileBuffer方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil.readFileBuffer方法的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil.readFileBuffer方法的具体用法?Java DFSTestUtil.readFileBuffer怎么用?Java DFSTestUtil.readFileBuffer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DFSTestUtil的用法示例。


在下文中一共展示了DFSTestUtil.readFileBuffer方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: doShortCircuitReadBlockFileCorruptionTest

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
public void doShortCircuitReadBlockFileCorruptionTest() throws IOException,
    InterruptedException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
  Path path2 = new Path("/" + METHOD_NAME + ".02.dat");

  final int SEED = 0xFADED;
  makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);
  ensureFileReplicasOnStorageType(path1, RAM_DISK);

  // Create another file with a replica on RAM_DISK, which evicts the first.
  makeRandomTestFile(path2, BLOCK_SIZE, true, SEED);

  // Sleep for a short time to allow the lazy writer thread to do its job.
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
  triggerBlockReport();

  // Corrupt the lazy-persisted block file, and verify that checksum
  // verification catches it.
  ensureFileReplicasOnStorageType(path1, DEFAULT);
  cluster.corruptReplica(0, DFSTestUtil.getFirstBlock(fs, path1));
  exception.expect(ChecksumException.class);
  DFSTestUtil.readFileBuffer(fs, path1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestScrLazyPersistFiles.java

示例2: doShortCircuitReadMetaFileCorruptionTest

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
public void doShortCircuitReadMetaFileCorruptionTest() throws IOException,
    InterruptedException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
  Path path2 = new Path("/" + METHOD_NAME + ".02.dat");

  final int SEED = 0xFADED;
  makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);
  ensureFileReplicasOnStorageType(path1, RAM_DISK);

  // Create another file with a replica on RAM_DISK, which evicts the first.
  makeRandomTestFile(path2, BLOCK_SIZE, true, SEED);

  // Sleep for a short time to allow the lazy writer thread to do its job.
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
  triggerBlockReport();

  // Corrupt the lazy-persisted checksum file, and verify that checksum
  // verification catches it.
  ensureFileReplicasOnStorageType(path1, DEFAULT);
  File metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  MiniDFSCluster.corruptBlock(metaFile);
  exception.expect(ChecksumException.class);
  DFSTestUtil.readFileBuffer(fs, path1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestScrLazyPersistFiles.java

示例3: verifyCopy

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
private void verifyCopy(FileStatus s, FileStatus t, boolean compareName)
    throws Exception {
  Assert.assertEquals(s.isDirectory(), t.isDirectory());
  if (compareName) {
    Assert.assertEquals(s.getPath().getName(), t.getPath().getName());
  }
  if (!s.isDirectory()) {
    // verify the file content is the same
    byte[] sbytes = DFSTestUtil.readFileBuffer(dfs, s.getPath());
    byte[] tbytes = DFSTestUtil.readFileBuffer(dfs, t.getPath());
    Assert.assertArrayEquals(sbytes, tbytes);
  } else {
    FileStatus[] slist = dfs.listStatus(s.getPath());
    FileStatus[] tlist = dfs.listStatus(t.getPath());
    Assert.assertEquals(slist.length, tlist.length);
    for (int i = 0; i < slist.length; i++) {
      verifyCopy(slist[i], tlist[i], true);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestDistCpSync.java

示例4: testDataXceiverCleansUpSlotsOnFailure

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testDataXceiverCleansUpSlotsOnFailure() throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testDataXceiverCleansUpSlotsOnFailure", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path TEST_PATH1 = new Path("/test_file1");
  final Path TEST_PATH2 = new Path("/test_file2");
  final int TEST_FILE_LEN = 4096;
  final int SEED = 0xFADE1;
  DFSTestUtil.createFile(fs, TEST_PATH1, TEST_FILE_LEN,
      (short)1, SEED);
  DFSTestUtil.createFile(fs, TEST_PATH2, TEST_FILE_LEN,
      (short)1, SEED);

  // The first read should allocate one shared memory segment and slot.
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);

  // The second read should fail, and we should only have 1 segment and 1 slot
  // left.
  fs.getClient().getConf().brfFailureInjector =
      new TestCleanupFailureInjector();
  try {
    DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
  } catch (Throwable t) {
    GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
        "testing, but we failed to do a non-TCP read.", t);
  }
  checkNumberOfSegmentsAndSlots(1, 1,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());
  cluster.shutdown();
  sockDir.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestShortCircuitCache.java

示例5: testPreReceiptVerificationDfsClientCanDoScr

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testPreReceiptVerificationDfsClientCanDoScr() throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testPreReceiptVerificationDfsClientCanDoScr", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  fs.getClient().getConf().brfFailureInjector =
      new TestPreReceiptVerificationFailureInjector();
  final Path TEST_PATH1 = new Path("/test_file1");
  DFSTestUtil.createFile(fs, TEST_PATH1, 4096, (short)1, 0xFADE2);
  final Path TEST_PATH2 = new Path("/test_file2");
  DFSTestUtil.createFile(fs, TEST_PATH2, 4096, (short)1, 0xFADE2);
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
  DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
  ShortCircuitRegistry registry =
      cluster.getDataNodes().get(0).getShortCircuitRegistry();
  registry.visit(new ShortCircuitRegistry.Visitor() {
    @Override
    public void accept(HashMap<ShmId, RegisteredShm> segments,
                       HashMultimap<ExtendedBlockId, Slot> slots) {
      Assert.assertEquals(1, segments.size());
      Assert.assertEquals(2, slots.size());
    }
  });
  cluster.shutdown();
  sockDir.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestShortCircuitCache.java

示例6: assertFilePermissionDenied

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Asserts that permission is denied to the given fs/user for the given file.
 *
 * @param fs FileSystem to check
 * @param user UserGroupInformation owner of fs
 * @param pathToCheck Path file to check
 * @throws Exception if there is an unexpected error
 */
public static void assertFilePermissionDenied(FileSystem fs,
    UserGroupInformation user, Path pathToCheck) throws Exception {
  try {
    DFSTestUtil.readFileBuffer(fs, pathToCheck);
    fail("expected AccessControlException for user " + user + ", path = " +
      pathToCheck);
  } catch (AccessControlException e) {
    // expected
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:AclTestHelpers.java

示例7: assertFilePermissionGranted

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Asserts that permission is granted to the given fs/user for the given file.
 *
 * @param fs FileSystem to check
 * @param user UserGroupInformation owner of fs
 * @param pathToCheck Path file to check
 * @throws Exception if there is an unexpected error
 */
public static void assertFilePermissionGranted(FileSystem fs,
    UserGroupInformation user, Path pathToCheck) throws Exception {
  try {
    DFSTestUtil.readFileBuffer(fs, pathToCheck);
  } catch (AccessControlException e) {
    fail("expected permission granted for user " + user + ", path = " +
      pathToCheck);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:AclTestHelpers.java

示例8: verifyReadRandomFile

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
protected final boolean verifyReadRandomFile(
    Path path, int fileLength, int seed) throws IOException {
  byte contents[] = DFSTestUtil.readFileBuffer(fs, path);
  byte expected[] = DFSTestUtil.
    calculateFileContentsFromSeed(seed, fileLength);
  return Arrays.equals(contents, expected);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:LazyPersistTestCase.java

示例9: testDataXceiverHandlesRequestShortCircuitShmFailure

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testDataXceiverHandlesRequestShortCircuitShmFailure()
    throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testDataXceiverHandlesRequestShortCircuitShmFailure", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path TEST_PATH1 = new Path("/test_file1");
  DFSTestUtil.createFile(fs, TEST_PATH1, 4096,
      (short)1, 0xFADE1);
  LOG.info("Setting failure injector and performing a read which " +
      "should fail...");
  DataNodeFaultInjector failureInjector = Mockito.mock(DataNodeFaultInjector.class);
  Mockito.doAnswer(new Answer<Void>() {
    @Override
    public Void answer(InvocationOnMock invocation) throws Throwable {
      throw new IOException("injected error into sendShmResponse");
    }
  }).when(failureInjector).sendShortCircuitShmResponse();
  DataNodeFaultInjector prevInjector = DataNodeFaultInjector.instance;
  DataNodeFaultInjector.instance = failureInjector;

  try {
    // The first read will try to allocate a shared memory segment and slot.
    // The shared memory segment allocation will fail because of the failure
    // injector.
    DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
    Assert.fail("expected readFileBuffer to fail, but it succeeded.");
  } catch (Throwable t) {
    GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
        "testing, but we failed to do a non-TCP read.", t);
  }

  checkNumberOfSegmentsAndSlots(0, 0,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());

  LOG.info("Clearing failure injector and performing another read...");
  DataNodeFaultInjector.instance = prevInjector;

  fs.getClient().getClientContext().getDomainSocketFactory().clearPathMap();

  // The second read should succeed.
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);

  // We should have added a new short-circuit shared memory segment and slot.
  checkNumberOfSegmentsAndSlots(1, 1,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());

  cluster.shutdown();
  sockDir.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:58,代码来源:TestShortCircuitCache.java

示例10: assertFileLength

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
static void assertFileLength(Path file, long length) throws IOException {
  byte[] data = DFSTestUtil.readFileBuffer(fs, file);
  assertEquals("Wrong data size in snapshot.", length, data.length);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:TestFileTruncate.java

示例11: testWithLayoutChangeAndRollback

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Support for layout version change with rolling upgrade was
 * added by HDFS-6800 and HDFS-6981.
 */
@Test(timeout=300000)
public void testWithLayoutChangeAndRollback() throws Exception {
  final long seed = 0x600DF00D;
  try {
    startCluster();

    Path[] paths = new Path[3];
    File[] blockFiles = new File[3];

    // Create two files in DFS.
    for (int i = 0; i < 2; ++i) {
      paths[i] = new Path("/" + GenericTestUtils.getMethodName() + "." + i + ".dat");
      DFSTestUtil.createFile(fs, paths[i], BLOCK_SIZE, (short) 1, seed);
    }

    startRollingUpgrade();

    // Delete the first file. The DN will save its block files in trash.
    blockFiles[0] = getBlockForFile(paths[0], true);
    File trashFile0 = getTrashFileForBlock(blockFiles[0], false);
    deleteAndEnsureInTrash(paths[0], blockFiles[0], trashFile0);

    // Restart the DN with a new layout version to trigger layout upgrade.
    LOG.info("Shutting down the Datanode");
    MiniDFSCluster.DataNodeProperties dnprop = cluster.stopDataNode(0);
    DFSTestUtil.addDataNodeLayoutVersion(
        DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1,
        "Test Layout for TestDataNodeRollingUpgrade");
    LOG.info("Restarting the DataNode");
    cluster.restartDataNode(dnprop, true);
    cluster.waitActive();

    dn0 = cluster.getDataNodes().get(0);
    LOG.info("The DN has been restarted");
    assertFalse(trashFile0.exists());
    assertFalse(dn0.getStorage().getBPStorage(blockPoolId).isTrashAllowed(blockFiles[0]));

    // Ensure that the block file for the first file was moved from 'trash' to 'previous'.
    assertTrue(isBlockFileInPrevious(blockFiles[0]));
    assertFalse(isTrashRootPresent());

    // Delete the second file. Ensure that its block file is in previous.
    blockFiles[1] = getBlockForFile(paths[1], true);
    fs.delete(paths[1], false);
    assertTrue(isBlockFileInPrevious(blockFiles[1]));
    assertFalse(isTrashRootPresent());

    // Create and delete a third file. Its block file should not be
    // in either trash or previous after deletion.
    paths[2] = new Path("/" + GenericTestUtils.getMethodName() + ".2.dat");
    DFSTestUtil.createFile(fs, paths[2], BLOCK_SIZE, (short) 1, seed);
    blockFiles[2] = getBlockForFile(paths[2], true);
    fs.delete(paths[2], false);
    assertFalse(isBlockFileInPrevious(blockFiles[2]));
    assertFalse(isTrashRootPresent());

    // Rollback and ensure that the first two file contents were restored.
    rollbackRollingUpgrade();
    for (int i = 0; i < 2; ++i) {
      byte[] actual = DFSTestUtil.readFileBuffer(fs, paths[i]);
      byte[] calculated = DFSTestUtil.calculateFileContentsFromSeed(seed, BLOCK_SIZE);
      assertArrayEquals(actual, calculated);
    }

    // And none of the block files must be in previous or trash.
    assertFalse(isTrashRootPresent());
    for (int i = 0; i < 3; ++i) {
      assertFalse(isBlockFileInPrevious(blockFiles[i]));
    }
  } finally {
    shutdownCluster();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:78,代码来源:TestDataNodeRollingUpgrade.java


注:本文中的org.apache.hadoop.hdfs.DFSTestUtil.readFileBuffer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。