当前位置: 首页>>代码示例>>Java>>正文


Java FSImageTestUtil.assertReasonableNameCurrentDir方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.assertReasonableNameCurrentDir方法的典型用法代码示例。如果您正苦于以下问题:Java FSImageTestUtil.assertReasonableNameCurrentDir方法的具体用法?Java FSImageTestUtil.assertReasonableNameCurrentDir怎么用?Java FSImageTestUtil.assertReasonableNameCurrentDir使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil的用法示例。


在下文中一共展示了FSImageTestUtil.assertReasonableNameCurrentDir方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkResult

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      assertEquals(
          UpgradeUtilities.checksumContents(nodeType, curDir, false),
          UpgradeUtilities.checksumMasterDataNodeContents());
      break;
    }
  }
  
  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestDFSRollback.java

示例2: checkResult

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
 * Verify that the current directory exists and that the previous directory
 * does not exist.  Verify that current hasn't been modified by comparing 
 * the checksum of all it's containing files with their original checksum.
 * Note that we do not check that previous is removed on the DataNode
 * because its removal is asynchronous therefore we have no reliable
 * way to know when it will happen.  
 */
static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs) throws Exception {
  List<File> dirs = Lists.newArrayList();
  for (int i = 0; i < nameNodeDirs.length; i++) {
    File curDir = new File(nameNodeDirs[i], "current");
    dirs.add(curDir);
    FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
  }
  
  FSImageTestUtil.assertParallelFilesAreIdentical(
      dirs, Collections.<String>emptySet());
  
  for (int i = 0; i < dataNodeDirs.length; i++) {
    assertEquals(
                 UpgradeUtilities.checksumContents(
                                                   DATA_NODE, new File(dataNodeDirs[i],"current")),
                 UpgradeUtilities.checksumMasterDataNodeContents());
  }
  for (int i = 0; i < nameNodeDirs.length; i++) {
    assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:30,代码来源:TestDFSFinalize.java

示例3: checkResult

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      assertEquals(
          UpgradeUtilities.checksumContents(nodeType, curDir),
          UpgradeUtilities.checksumMasterDataNodeContents());
      break;
    }
  }
  
  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:29,代码来源:TestDFSRollback.java

示例4: checkResult

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
 * Verify that the current directory exists and that the previous directory
 * does not exist.  Verify that current hasn't been modified by comparing 
 * the checksum of all it's containing files with their original checksum.
 */
static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs,
  String bpid) throws Exception {
  List<File> dirs = Lists.newArrayList();
  for (int i = 0; i < nameNodeDirs.length; i++) {
    File curDir = new File(nameNodeDirs[i], "current");
    dirs.add(curDir);
    FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
  }
  
  FSImageTestUtil.assertParallelFilesAreIdentical(
      dirs, Collections.<String>emptySet());
  
  File dnCurDirs[] = new File[dataNodeDirs.length];
  for (int i = 0; i < dataNodeDirs.length; i++) {
    dnCurDirs[i] = new File(dataNodeDirs[i],"current");
    assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i],
            false), UpgradeUtilities.checksumMasterDataNodeContents());
  }
  for (int i = 0; i < nameNodeDirs.length; i++) {
    assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
  }

  if (bpid == null) {
    for (int i = 0; i < dataNodeDirs.length; i++) {
      assertFalse(new File(dataNodeDirs[i],"previous").isDirectory());
    }
  } else {
    for (int i = 0; i < dataNodeDirs.length; i++) {
      File bpRoot = BlockPoolSliceStorage.getBpRoot(bpid, dnCurDirs[i]);
      assertFalse(new File(bpRoot,"previous").isDirectory());
      
      File bpCurFinalizeDir = new File(bpRoot,"current/"+DataStorage.STORAGE_DIR_FINALIZED);
      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
              bpCurFinalizeDir, true),
              UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:44,代码来源:TestDFSFinalize.java

示例5: checkResult

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
 * Verify that the current directory exists and that the previous directory
 * does not exist.  Verify that current hasn't been modified by comparing 
 * the checksum of all it's containing files with their original checksum.
 * Note that we do not check that previous is removed on the DataNode
 * because its removal is asynchronous therefore we have no reliable
 * way to know when it will happen.  
 */
static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs) throws Exception {
  List<File> dirs = Lists.newArrayList();
  for (int i = 0; i < nameNodeDirs.length; i++) {
    File curDir = new File(nameNodeDirs[i], "current");
    dirs.add(curDir);
    FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
  }
  
  FSImageTestUtil.assertParallelFilesAreIdentical(
      dirs, Collections.<String>emptySet());
  
  for (int i = 0; i < dataNodeDirs.length; i++) {
    assertEquals(
                 UpgradeUtilities.checksumContents(
                                                   DATA_NODE, new File(dataNodeDirs[i],"current")),
                 UpgradeUtilities.checksumMasterContents(DATA_NODE));
    File nsBaseDir= NameSpaceSliceStorage.getNsRoot(UpgradeUtilities.getCurrentNamespaceID(cluster), new File(dataNodeDirs[i], "current"));
    assertEquals(
                 UpgradeUtilities.checksumContents(DATA_NODE, new File(nsBaseDir,
                     MiniDFSCluster.FINALIZED_DIR_NAME)), 
                 UpgradeUtilities.checksumDatanodeNSStorageContents());
  }
  for (int i = 0; i < nameNodeDirs.length; i++) {
    assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:35,代码来源:TestDFSFinalize.java

示例6: checkResult

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      for (int i = 0; i < baseDirs.length; i++) {
        assertEquals(
                     UpgradeUtilities.checksumContents(
                                                       nodeType, new File(baseDirs[i],"current")),
                     UpgradeUtilities.checksumMasterContents(nodeType));
        File nsBaseDir= NameSpaceSliceStorage.getNsRoot(UpgradeUtilities.getCurrentNamespaceID(cluster), new File(baseDirs[i], "current"));
        assertEquals(
                     UpgradeUtilities.checksumContents(nodeType, new File(nsBaseDir,
                         MiniDFSCluster.FINALIZED_DIR_NAME)), 
                     UpgradeUtilities.checksumDatanodeNSStorageContents());
      }
      break;
    }
  }
  
  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:37,代码来源:TestDFSRollback.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.assertReasonableNameCurrentDir方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。