当前位置: 首页>>代码示例>>Java>>正文


Java DFSTestUtil.verifyExpectedCacheUsage方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil.verifyExpectedCacheUsage方法的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil.verifyExpectedCacheUsage方法的具体用法?Java DFSTestUtil.verifyExpectedCacheUsage怎么用?Java DFSTestUtil.verifyExpectedCacheUsage使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DFSTestUtil的用法示例。


在下文中一共展示了DFSTestUtil.verifyExpectedCacheUsage方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testPageRounder

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testPageRounder() throws Exception {
  // Write a small file
  Path fileName = new Path("/testPageRounder");
  final int smallBlocks = 512; // This should be smaller than the page size
  assertTrue("Page size should be greater than smallBlocks!",
      PAGE_SIZE > smallBlocks);
  final int numBlocks = 5;
  final int fileLen = smallBlocks * numBlocks;
  FSDataOutputStream out =
      fs.create(fileName, false, 4096, (short)1, smallBlocks);
  out.write(new byte[fileLen]);
  out.close();
  HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.getFileBlockLocations(
      fileName, 0, fileLen);
  // Cache the file and check the sizes match the page size
  setHeartbeatResponse(cacheBlocks(locs));
  DFSTestUtil.verifyExpectedCacheUsage(PAGE_SIZE * numBlocks, numBlocks, fsd);
  // Uncache and check that it decrements by the page size too
  setHeartbeatResponse(uncacheBlocks(locs));
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestFsDatasetCache.java

示例2: tearDown

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@After
public void tearDown() throws Exception {
  // Verify that each test uncached whatever it cached.  This cleanup is
  // required so that file descriptors are not leaked across tests.
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
  if (fs != null) {
    fs.close();
  }
  if (cluster != null) {
    cluster.shutdown();
  }
  // Restore the original CacheManipulator
  NativeIO.POSIX.setCacheManipulator(prevCacheManipulator);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestFsDatasetCache.java

示例3: testFilesExceedMaxLockedMemory

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test(timeout=600000)
public void testFilesExceedMaxLockedMemory() throws Exception {
  LOG.info("beginning testFilesExceedMaxLockedMemory");

  // Create some test files that will exceed total cache capacity
  final int numFiles = 5;
  final long fileSize = CACHE_CAPACITY / (numFiles-1);

  final Path[] testFiles = new Path[numFiles];
  final HdfsBlockLocation[][] fileLocs = new HdfsBlockLocation[numFiles][];
  final long[] fileSizes = new long[numFiles];
  for (int i=0; i<numFiles; i++) {
    testFiles[i] = new Path("/testFilesExceedMaxLockedMemory-" + i);
    DFSTestUtil.createFile(fs, testFiles[i], fileSize, (short)1, 0xDFAl);
    fileLocs[i] = (HdfsBlockLocation[])fs.getFileBlockLocations(
        testFiles[i], 0, fileSize);
    // Get the file size (sum of blocks)
    long[] sizes = getBlockSizes(fileLocs[i]);
    for (int j=0; j<sizes.length; j++) {
      fileSizes[i] += sizes[j];
    }
  }

  // Cache the first n-1 files
  long total = 0;
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
  for (int i=0; i<numFiles-1; i++) {
    setHeartbeatResponse(cacheBlocks(fileLocs[i]));
    total = DFSTestUtil.verifyExpectedCacheUsage(
        rounder.round(total + fileSizes[i]), 4 * (i + 1), fsd);
  }

  // nth file should hit a capacity exception
  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);
  setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1]));

  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      int lines = appender.countLinesWithMessage(
          "more bytes in the cache: " +
          DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
      return lines > 0;
    }
  }, 500, 30000);
  // Also check the metrics for the failure
  assertTrue("Expected more than 0 failed cache attempts",
      fsd.getNumBlocksFailedToCache() > 0);

  // Uncache the n-1 files
  int curCachedBlocks = 16;
  for (int i=0; i<numFiles-1; i++) {
    setHeartbeatResponse(uncacheBlocks(fileLocs[i]));
    long uncachedBytes = rounder.round(fileSizes[i]);
    total -= uncachedBytes;
    curCachedBlocks -= uncachedBytes / BLOCK_SIZE;
    DFSTestUtil.verifyExpectedCacheUsage(total, curCachedBlocks, fsd);
  }
  LOG.info("finishing testFilesExceedMaxLockedMemory");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:63,代码来源:TestFsDatasetCache.java

示例4: testPinning

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test that when a client has a replica mmapped, we will not un-mlock that
 * replica for a reasonable amount of time, even if an uncache request
 * occurs.
 */
@Test(timeout=120000)
public void testPinning() throws Exception {
  assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
  Configuration conf = getDefaultConf();
  // Set a really long revocation timeout, so that we won't reach it during
  // this test.
  conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS,
      1800000L);
  // Poll very often
  conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS, 2L);
  MiniDFSCluster cluster = null;
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem dfs = cluster.getFileSystem();

  // Create and cache a file.
  final String TEST_FILE = "/test_file";
  DFSTestUtil.createFile(dfs, new Path(TEST_FILE),
      BLOCK_SIZE, (short)1, 0xcafe);
  dfs.addCachePool(new CachePoolInfo("pool"));
  long cacheDirectiveId =
    dfs.addCacheDirective(new CacheDirectiveInfo.Builder().
      setPool("pool").setPath(new Path(TEST_FILE)).
        setReplication((short) 1).build());
  FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
  DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd);

  // Mmap the file.
  FSDataInputStream in = dfs.open(new Path(TEST_FILE));
  ByteBuffer buf =
      in.read(null, BLOCK_SIZE, EnumSet.noneOf(ReadOption.class));

  // Attempt to uncache file.  The file should still be cached.
  dfs.removeCacheDirective(cacheDirectiveId);
  Thread.sleep(500);
  DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd);

  // Un-mmap the file.  The file should be uncached after this.
  in.releaseBuffer(buf);
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);

  // Cleanup
  in.close();
  cluster.shutdown();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:51,代码来源:TestFsDatasetCacheRevocation.java

示例5: testRevocation

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test that when we have an uncache request, and the client refuses to release
 * the replica for a long time, we will un-mlock it.
 */
@Test(timeout=120000)
public void testRevocation() throws Exception {
  assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
  BlockReaderTestUtil.enableHdfsCachingTracing();
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  Configuration conf = getDefaultConf();
  // Set a really short revocation timeout.
  conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS, 250L);
  // Poll very often
  conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS, 2L);
  MiniDFSCluster cluster = null;
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem dfs = cluster.getFileSystem();

  // Create and cache a file.
  final String TEST_FILE = "/test_file2";
  DFSTestUtil.createFile(dfs, new Path(TEST_FILE),
      BLOCK_SIZE, (short)1, 0xcafe);
  dfs.addCachePool(new CachePoolInfo("pool"));
  long cacheDirectiveId =
      dfs.addCacheDirective(new CacheDirectiveInfo.Builder().
          setPool("pool").setPath(new Path(TEST_FILE)).
          setReplication((short) 1).build());
  FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
  DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd);

  // Mmap the file.
  FSDataInputStream in = dfs.open(new Path(TEST_FILE));
  ByteBuffer buf =
      in.read(null, BLOCK_SIZE, EnumSet.noneOf(ReadOption.class));

  // Attempt to uncache file.  The file should get uncached.
  LOG.info("removing cache directive {}", cacheDirectiveId);
  dfs.removeCacheDirective(cacheDirectiveId);
  LOG.info("finished removing cache directive {}", cacheDirectiveId);
  Thread.sleep(1000);
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);

  // Cleanup
  in.releaseBuffer(buf);
  in.close();
  cluster.shutdown();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:TestFsDatasetCacheRevocation.java


注:本文中的org.apache.hadoop.hdfs.DFSTestUtil.verifyExpectedCacheUsage方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。