当前位置: 首页>>代码示例>>Java>>正文


Java CacheDirectiveIterator类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator的典型用法代码示例。如果您正苦于以下问题:Java CacheDirectiveIterator类的具体用法?Java CacheDirectiveIterator怎么用?Java CacheDirectiveIterator使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


CacheDirectiveIterator类属于org.apache.hadoop.hdfs.protocol包,在下文中一共展示了CacheDirectiveIterator类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: listCacheDirectives

import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator; //导入依赖的package包/类
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
    CacheDirectiveInfo filter) throws IOException {
  return new CacheDirectiveIterator(namenode, filter, traceSampler);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:DFSClient.java

示例2: testWaitForCachedReplicas

import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator; //导入依赖的package包/类
@Test(timeout=120000)
public void testWaitForCachedReplicas() throws Exception {
  FileSystemTestHelper helper = new FileSystemTestHelper();
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      return ((namenode.getNamesystem().getCacheCapacity() ==
          (NUM_DATANODES * CACHE_CAPACITY)) &&
            (namenode.getNamesystem().getCacheUsed() == 0));
    }
  }, 500, 60000);

  // Send a cache report referring to a bogus block.  It is important that
  // the NameNode be robust against this.
  NamenodeProtocols nnRpc = namenode.getRpcServer();
  DataNode dn0 = cluster.getDataNodes().get(0);
  String bpid = cluster.getNamesystem().getBlockPoolId();
  LinkedList<Long> bogusBlockIds = new LinkedList<Long> ();
  bogusBlockIds.add(999999L);
  nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid), bpid, bogusBlockIds);

  Path rootDir = helper.getDefaultWorkingDirectory(dfs);
  // Create the pool
  final String pool = "friendlyPool";
  nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
  // Create some test files
  final int numFiles = 2;
  final int numBlocksPerFile = 2;
  final List<String> paths = new ArrayList<String>(numFiles);
  for (int i=0; i<numFiles; i++) {
    Path p = new Path(rootDir, "testCachePaths-" + i);
    FileSystemTestHelper.createFile(dfs, p, numBlocksPerFile,
        (int)BLOCK_SIZE);
    paths.add(p.toUri().getPath());
  }
  // Check the initial statistics at the namenode
  waitForCachedBlocks(namenode, 0, 0, "testWaitForCachedReplicas:0");
  // Cache and check each path in sequence
  int expected = 0;
  for (int i=0; i<numFiles; i++) {
    CacheDirectiveInfo directive =
        new CacheDirectiveInfo.Builder().
          setPath(new Path(paths.get(i))).
          setPool(pool).
          build();
    nnRpc.addCacheDirective(directive, EnumSet.noneOf(CacheFlag.class));
    expected += numBlocksPerFile;
    waitForCachedBlocks(namenode, expected, expected,
        "testWaitForCachedReplicas:1");
  }

  // Check that the datanodes have the right cache values
  DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
  assertEquals("Unexpected number of live nodes", NUM_DATANODES, live.length);
  long totalUsed = 0;
  for (DatanodeInfo dn : live) {
    final long cacheCapacity = dn.getCacheCapacity();
    final long cacheUsed = dn.getCacheUsed();
    final long cacheRemaining = dn.getCacheRemaining();
    assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
    assertEquals("Capacity not equal to used + remaining",
        cacheCapacity, cacheUsed + cacheRemaining);
    assertEquals("Remaining not equal to capacity - used",
        cacheCapacity - cacheUsed, cacheRemaining);
    totalUsed += cacheUsed;
  }
  assertEquals(expected*BLOCK_SIZE, totalUsed);

  // Uncache and check each path in sequence
  RemoteIterator<CacheDirectiveEntry> entries =
    new CacheDirectiveIterator(nnRpc, null, Sampler.NEVER);
  for (int i=0; i<numFiles; i++) {
    CacheDirectiveEntry entry = entries.next();
    nnRpc.removeCacheDirective(entry.getInfo().getId());
    expected -= numBlocksPerFile;
    waitForCachedBlocks(namenode, expected, expected,
        "testWaitForCachedReplicas:2");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:80,代码来源:TestCacheDirectives.java

示例3: listCacheDirectives

import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator; //导入依赖的package包/类
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
    CacheDirectiveInfo filter) throws IOException {
  checkOpen();
  return new CacheDirectiveIterator(namenode, filter, tracer);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:6,代码来源:DFSClient.java

示例4: testWaitForCachedReplicas

import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator; //导入依赖的package包/类
@Test(timeout=120000)
public void testWaitForCachedReplicas() throws Exception {
  FileSystemTestHelper helper = new FileSystemTestHelper();
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      return ((namenode.getNamesystem().getCacheCapacity() ==
          (NUM_DATANODES * CACHE_CAPACITY)) &&
            (namenode.getNamesystem().getCacheUsed() == 0));
    }
  }, 500, 60000);

  // Send a cache report referring to a bogus block.  It is important that
  // the NameNode be robust against this.
  NamenodeProtocols nnRpc = namenode.getRpcServer();
  DataNode dn0 = cluster.getDataNodes().get(0);
  String bpid = cluster.getNamesystem().getBlockPoolId();
  LinkedList<Long> bogusBlockIds = new LinkedList<Long> ();
  bogusBlockIds.add(999999L);
  nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid), bpid, bogusBlockIds);

  Path rootDir = helper.getDefaultWorkingDirectory(dfs);
  // Create the pool
  final String pool = "friendlyPool";
  nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
  // Create some test files
  final int numFiles = 2;
  final int numBlocksPerFile = 2;
  final List<String> paths = new ArrayList<String>(numFiles);
  for (int i=0; i<numFiles; i++) {
    Path p = new Path(rootDir, "testCachePaths-" + i);
    FileSystemTestHelper.createFile(dfs, p, numBlocksPerFile,
        (int)BLOCK_SIZE);
    paths.add(p.toUri().getPath());
  }
  // Check the initial statistics at the namenode
  waitForCachedBlocks(namenode, 0, 0, "testWaitForCachedReplicas:0");
  // Cache and check each path in sequence
  int expected = 0;
  for (int i=0; i<numFiles; i++) {
    CacheDirectiveInfo directive =
        new CacheDirectiveInfo.Builder().
          setPath(new Path(paths.get(i))).
          setPool(pool).
          build();
    nnRpc.addCacheDirective(directive, EnumSet.noneOf(CacheFlag.class));
    expected += numBlocksPerFile;
    waitForCachedBlocks(namenode, expected, expected,
        "testWaitForCachedReplicas:1");
  }

  // Check that the datanodes have the right cache values
  DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
  assertEquals("Unexpected number of live nodes", NUM_DATANODES, live.length);
  long totalUsed = 0;
  for (DatanodeInfo dn : live) {
    final long cacheCapacity = dn.getCacheCapacity();
    final long cacheUsed = dn.getCacheUsed();
    final long cacheRemaining = dn.getCacheRemaining();
    assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
    assertEquals("Capacity not equal to used + remaining",
        cacheCapacity, cacheUsed + cacheRemaining);
    assertEquals("Remaining not equal to capacity - used",
        cacheCapacity - cacheUsed, cacheRemaining);
    totalUsed += cacheUsed;
  }
  assertEquals(expected*BLOCK_SIZE, totalUsed);

  // Uncache and check each path in sequence
  RemoteIterator<CacheDirectiveEntry> entries =
    new CacheDirectiveIterator(nnRpc, null, FsTracer.get(conf));
  for (int i=0; i<numFiles; i++) {
    CacheDirectiveEntry entry = entries.next();
    nnRpc.removeCacheDirective(entry.getInfo().getId());
    expected -= numBlocksPerFile;
    waitForCachedBlocks(namenode, expected, expected,
        "testWaitForCachedReplicas:2");
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:80,代码来源:TestCacheDirectives.java

示例5: listCacheDirectives

import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator; //导入依赖的package包/类
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
    CacheDirectiveInfo filter) throws IOException {
  return new CacheDirectiveIterator(namenode, filter);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:5,代码来源:DFSClient.java

示例6: testWaitForCachedReplicas

import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator; //导入依赖的package包/类
@Test(timeout=120000)
public void testWaitForCachedReplicas() throws Exception {
  FileSystemTestHelper helper = new FileSystemTestHelper();
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      return ((namenode.getNamesystem().getCacheCapacity() ==
          (NUM_DATANODES * CACHE_CAPACITY)) &&
            (namenode.getNamesystem().getCacheUsed() == 0));
    }
  }, 500, 60000);

  // Send a cache report referring to a bogus block.  It is important that
  // the NameNode be robust against this.
  NamenodeProtocols nnRpc = namenode.getRpcServer();
  DataNode dn0 = cluster.getDataNodes().get(0);
  String bpid = cluster.getNamesystem().getBlockPoolId();
  LinkedList<Long> bogusBlockIds = new LinkedList<Long> ();
  bogusBlockIds.add(999999L);
  nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid), bpid, bogusBlockIds);

  Path rootDir = helper.getDefaultWorkingDirectory(dfs);
  // Create the pool
  final String pool = "friendlyPool";
  nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
  // Create some test files
  final int numFiles = 2;
  final int numBlocksPerFile = 2;
  final List<String> paths = new ArrayList<String>(numFiles);
  for (int i=0; i<numFiles; i++) {
    Path p = new Path(rootDir, "testCachePaths-" + i);
    FileSystemTestHelper.createFile(dfs, p, numBlocksPerFile,
        (int)BLOCK_SIZE);
    paths.add(p.toUri().getPath());
  }
  // Check the initial statistics at the namenode
  waitForCachedBlocks(namenode, 0, 0, "testWaitForCachedReplicas:0");
  // Cache and check each path in sequence
  int expected = 0;
  for (int i=0; i<numFiles; i++) {
    CacheDirectiveInfo directive =
        new CacheDirectiveInfo.Builder().
          setPath(new Path(paths.get(i))).
          setPool(pool).
          build();
    nnRpc.addCacheDirective(directive, EnumSet.noneOf(CacheFlag.class));
    expected += numBlocksPerFile;
    waitForCachedBlocks(namenode, expected, expected,
        "testWaitForCachedReplicas:1");
  }

  // Check that the datanodes have the right cache values
  DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
  assertEquals("Unexpected number of live nodes", NUM_DATANODES, live.length);
  long totalUsed = 0;
  for (DatanodeInfo dn : live) {
    final long cacheCapacity = dn.getCacheCapacity();
    final long cacheUsed = dn.getCacheUsed();
    final long cacheRemaining = dn.getCacheRemaining();
    assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
    assertEquals("Capacity not equal to used + remaining",
        cacheCapacity, cacheUsed + cacheRemaining);
    assertEquals("Remaining not equal to capacity - used",
        cacheCapacity - cacheUsed, cacheRemaining);
    totalUsed += cacheUsed;
  }
  assertEquals(expected*BLOCK_SIZE, totalUsed);

  // Uncache and check each path in sequence
  RemoteIterator<CacheDirectiveEntry> entries =
    new CacheDirectiveIterator(nnRpc, null);
  for (int i=0; i<numFiles; i++) {
    CacheDirectiveEntry entry = entries.next();
    nnRpc.removeCacheDirective(entry.getInfo().getId());
    expected -= numBlocksPerFile;
    waitForCachedBlocks(namenode, expected, expected,
        "testWaitForCachedReplicas:2");
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:80,代码来源:TestCacheDirectives.java


注:本文中的org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。