当前位置: 首页>>代码示例>>Java>>正文


Java FSImageTestUtil.countEditLogOpTypes方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.countEditLogOpTypes方法的典型用法代码示例。如果您正苦于以下问题:Java FSImageTestUtil.countEditLogOpTypes方法的具体用法?Java FSImageTestUtil.countEditLogOpTypes怎么用?Java FSImageTestUtil.countEditLogOpTypes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil的用法示例。


在下文中一共展示了FSImageTestUtil.countEditLogOpTypes方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testAppendRestart

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
 * Regression test for HDFS-2991. Creates and appends to files
 * where blocks start/end on block boundaries.
 */
@Test
public void testAppendRestart() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  // Turn off persistent IPC, so that the DFSClient can survive NN restart
  conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      0);
  MiniDFSCluster cluster = null;

  FSDataOutputStream stream = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    FileSystem fs = cluster.getFileSystem();
    File editLog =
      new File(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(0),
          NNStorage.getInProgressEditsFileName(1));
    EnumMap<FSEditLogOpCodes, Holder<Integer>> counts;
    
    Path p1 = new Path("/block-boundaries");
    writeAndAppend(fs, p1, BLOCK_SIZE, BLOCK_SIZE);

    counts = FSImageTestUtil.countEditLogOpTypes(editLog);
    // OP_ADD to create file
    // OP_ADD_BLOCK for first block
    // OP_CLOSE to close file
    // OP_APPEND to reopen file
    // OP_ADD_BLOCK for second block
    // OP_CLOSE to close file
    assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_ADD).held);
    assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_APPEND).held);
    assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
    assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);

    Path p2 = new Path("/not-block-boundaries");
    writeAndAppend(fs, p2, BLOCK_SIZE/2, BLOCK_SIZE);
    counts = FSImageTestUtil.countEditLogOpTypes(editLog);
    // OP_ADD to create file
    // OP_ADD_BLOCK for first block
    // OP_CLOSE to close file
    // OP_APPEND to re-establish the lease
    // OP_UPDATE_BLOCKS from the updatePipeline call (increments genstamp of last block)
    // OP_ADD_BLOCK at the start of the second block
    // OP_CLOSE to close file
    // Total: 2 OP_ADDs, 1 OP_UPDATE_BLOCKS, 2 OP_ADD_BLOCKs, and 2 OP_CLOSEs
     //       in addition to the ones above
    assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_ADD).held);
    assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_APPEND).held);
    assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_UPDATE_BLOCKS).held);
    assertEquals(2+2, (int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
    assertEquals(2+2, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);
    
    cluster.restartNameNode();
    
    AppendTestUtil.check(fs, p1, 2*BLOCK_SIZE);
    AppendTestUtil.check(fs, p2, 3*BLOCK_SIZE/2);
  } finally {
    IOUtils.closeStream(stream);
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:65,代码来源:TestFileAppendRestart.java

示例2: testAppendRestart

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
 * Regression test for HDFS-2991. Creates and appends to files
 * where blocks start/end on block boundaries.
 */
@Test
public void testAppendRestart() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  // Turn off persistent IPC, so that the DFSClient can survive NN restart
  conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      0);
  MiniDFSCluster cluster = null;

  FSDataOutputStream stream = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    FileSystem fs = cluster.getFileSystem();
    File editLog =
      new File(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(0),
          NNStorage.getInProgressEditsFileName(1));
    EnumMap<FSEditLogOpCodes, Holder<Integer>> counts;
    
    Path p1 = new Path("/block-boundaries");
    writeAndAppend(fs, p1, BLOCK_SIZE, BLOCK_SIZE);

    counts = FSImageTestUtil.countEditLogOpTypes(editLog);
    // OP_ADD to create file
    // OP_ADD_BLOCK for first block
    // OP_CLOSE to close file
    // OP_ADD to reopen file
    // OP_ADD_BLOCK for second block
    // OP_CLOSE to close file
    assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_ADD).held);
    assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
    assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);

    Path p2 = new Path("/not-block-boundaries");
    writeAndAppend(fs, p2, BLOCK_SIZE/2, BLOCK_SIZE);
    counts = FSImageTestUtil.countEditLogOpTypes(editLog);
    // OP_ADD to create file
    // OP_ADD_BLOCK for first block
    // OP_CLOSE to close file
    // OP_ADD to re-establish the lease
    // OP_UPDATE_BLOCKS from the updatePipeline call (increments genstamp of last block)
    // OP_ADD_BLOCK at the start of the second block
    // OP_CLOSE to close file
    // Total: 2 OP_ADDs, 1 OP_UPDATE_BLOCKS, 2 OP_ADD_BLOCKs, and 2 OP_CLOSEs
     //       in addition to the ones above
    assertEquals(2+2, (int)counts.get(FSEditLogOpCodes.OP_ADD).held);
    assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_UPDATE_BLOCKS).held);
    assertEquals(2+2, (int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
    assertEquals(2+2, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);
    
    cluster.restartNameNode();
    
    AppendTestUtil.check(fs, p1, 2*BLOCK_SIZE);
    AppendTestUtil.check(fs, p2, 3*BLOCK_SIZE/2);
  } finally {
    IOUtils.closeStream(stream);
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:63,代码来源:TestFileAppendRestart.java

示例3: testAppendRestart

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
 * Regression test for HDFS-2991. Creates and appends to files
 * where blocks start/end on block boundaries.
 */
@Test
public void testAppendRestart() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  // Turn off persistent IPC, so that the DFSClient can survive NN restart
  conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      0);
  MiniDFSCluster cluster = null;

  FSDataOutputStream stream = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    FileSystem fs = cluster.getFileSystem();
    File editLog =
      new File(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(0),
          NNStorage.getInProgressEditsFileName(1));
    EnumMap<FSEditLogOpCodes, Holder<Integer>> counts;
    
    Path p1 = new Path("/block-boundaries");
    writeAndAppend(fs, p1, BLOCK_SIZE, BLOCK_SIZE);

    counts = FSImageTestUtil.countEditLogOpTypes(editLog);
    // OP_ADD to create file
    // OP_UPDATE_BLOCKS for first block
    // OP_CLOSE to close file
    // OP_ADD to reopen file
    // OP_UPDATE_BLOCKS for second block
    // OP_CLOSE to close file
    assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_ADD).held);
    assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_UPDATE_BLOCKS).held);
    assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);

    Path p2 = new Path("/not-block-boundaries");
    writeAndAppend(fs, p2, BLOCK_SIZE/2, BLOCK_SIZE);
    counts = FSImageTestUtil.countEditLogOpTypes(editLog);
    // OP_ADD to create file
    // OP_UPDATE_BLOCKS for first block
    // OP_CLOSE to close file
    // OP_ADD to re-establish the lease
    // OP_UPDATE_BLOCKS from the updatePipeline call (increments genstamp of last block)
    // OP_UPDATE_BLOCKS at the start of the second block
    // OP_CLOSE to close file
    // Total: 2 OP_ADDs, 3 OP_UPDATE_BLOCKS, and 2 OP_CLOSEs in addition
    //        to the ones above
    assertEquals(2+2, (int)counts.get(FSEditLogOpCodes.OP_ADD).held);
    assertEquals(2+3, (int)counts.get(FSEditLogOpCodes.OP_UPDATE_BLOCKS).held);
    assertEquals(2+2, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);
    
    cluster.restartNameNode();
    
    AppendTestUtil.check(fs, p1, 2*BLOCK_SIZE);
    AppendTestUtil.check(fs, p2, 3*BLOCK_SIZE/2);
  } finally {
    IOUtils.closeStream(stream);
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:62,代码来源:TestFileAppendRestart.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.countEditLogOpTypes方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。