本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.assertReasonableNameCurrentDir方法的典型用法代码示例。如果您正苦于以下问题:Java FSImageTestUtil.assertReasonableNameCurrentDir方法的具体用法?Java FSImageTestUtil.assertReasonableNameCurrentDir怎么用?Java FSImageTestUtil.assertReasonableNameCurrentDir使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil
的用法示例。
在下文中一共展示了FSImageTestUtil.assertReasonableNameCurrentDir方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkResult
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
* Verify that the new current directory is the old previous.
* It is assumed that the server has recovered and rolled back.
*/
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
List<File> curDirs = Lists.newArrayList();
for (String baseDir : baseDirs) {
File curDir = new File(baseDir, "current");
curDirs.add(curDir);
switch (nodeType) {
case NAME_NODE:
FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
break;
case DATA_NODE:
assertEquals(
UpgradeUtilities.checksumContents(nodeType, curDir, false),
UpgradeUtilities.checksumMasterDataNodeContents());
break;
}
}
FSImageTestUtil.assertParallelFilesAreIdentical(
curDirs, Collections.<String>emptySet());
for (int i = 0; i < baseDirs.length; i++) {
assertFalse(new File(baseDirs[i],"previous").isDirectory());
}
}
示例2: checkResult
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
* Verify that the current directory exists and that the previous directory
* does not exist. Verify that current hasn't been modified by comparing
* the checksum of all it's containing files with their original checksum.
* Note that we do not check that previous is removed on the DataNode
* because its removal is asynchronous therefore we have no reliable
* way to know when it will happen.
*/
static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs) throws Exception {
List<File> dirs = Lists.newArrayList();
for (int i = 0; i < nameNodeDirs.length; i++) {
File curDir = new File(nameNodeDirs[i], "current");
dirs.add(curDir);
FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
}
FSImageTestUtil.assertParallelFilesAreIdentical(
dirs, Collections.<String>emptySet());
for (int i = 0; i < dataNodeDirs.length; i++) {
assertEquals(
UpgradeUtilities.checksumContents(
DATA_NODE, new File(dataNodeDirs[i],"current")),
UpgradeUtilities.checksumMasterDataNodeContents());
}
for (int i = 0; i < nameNodeDirs.length; i++) {
assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
}
}
示例3: checkResult
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
* Verify that the new current directory is the old previous.
* It is assumed that the server has recovered and rolled back.
*/
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
List<File> curDirs = Lists.newArrayList();
for (String baseDir : baseDirs) {
File curDir = new File(baseDir, "current");
curDirs.add(curDir);
switch (nodeType) {
case NAME_NODE:
FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
break;
case DATA_NODE:
assertEquals(
UpgradeUtilities.checksumContents(nodeType, curDir),
UpgradeUtilities.checksumMasterDataNodeContents());
break;
}
}
FSImageTestUtil.assertParallelFilesAreIdentical(
curDirs, Collections.<String>emptySet());
for (int i = 0; i < baseDirs.length; i++) {
assertFalse(new File(baseDirs[i],"previous").isDirectory());
}
}
示例4: checkResult
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
* Verify that the current directory exists and that the previous directory
* does not exist. Verify that current hasn't been modified by comparing
* the checksum of all it's containing files with their original checksum.
*/
static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs,
String bpid) throws Exception {
List<File> dirs = Lists.newArrayList();
for (int i = 0; i < nameNodeDirs.length; i++) {
File curDir = new File(nameNodeDirs[i], "current");
dirs.add(curDir);
FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
}
FSImageTestUtil.assertParallelFilesAreIdentical(
dirs, Collections.<String>emptySet());
File dnCurDirs[] = new File[dataNodeDirs.length];
for (int i = 0; i < dataNodeDirs.length; i++) {
dnCurDirs[i] = new File(dataNodeDirs[i],"current");
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i],
false), UpgradeUtilities.checksumMasterDataNodeContents());
}
for (int i = 0; i < nameNodeDirs.length; i++) {
assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
}
if (bpid == null) {
for (int i = 0; i < dataNodeDirs.length; i++) {
assertFalse(new File(dataNodeDirs[i],"previous").isDirectory());
}
} else {
for (int i = 0; i < dataNodeDirs.length; i++) {
File bpRoot = BlockPoolSliceStorage.getBpRoot(bpid, dnCurDirs[i]);
assertFalse(new File(bpRoot,"previous").isDirectory());
File bpCurFinalizeDir = new File(bpRoot,"current/"+DataStorage.STORAGE_DIR_FINALIZED);
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
bpCurFinalizeDir, true),
UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
}
}
}
示例5: checkResult
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
* Verify that the current directory exists and that the previous directory
* does not exist. Verify that current hasn't been modified by comparing
* the checksum of all it's containing files with their original checksum.
* Note that we do not check that previous is removed on the DataNode
* because its removal is asynchronous therefore we have no reliable
* way to know when it will happen.
*/
static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs) throws Exception {
List<File> dirs = Lists.newArrayList();
for (int i = 0; i < nameNodeDirs.length; i++) {
File curDir = new File(nameNodeDirs[i], "current");
dirs.add(curDir);
FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
}
FSImageTestUtil.assertParallelFilesAreIdentical(
dirs, Collections.<String>emptySet());
for (int i = 0; i < dataNodeDirs.length; i++) {
assertEquals(
UpgradeUtilities.checksumContents(
DATA_NODE, new File(dataNodeDirs[i],"current")),
UpgradeUtilities.checksumMasterContents(DATA_NODE));
File nsBaseDir= NameSpaceSliceStorage.getNsRoot(UpgradeUtilities.getCurrentNamespaceID(cluster), new File(dataNodeDirs[i], "current"));
assertEquals(
UpgradeUtilities.checksumContents(DATA_NODE, new File(nsBaseDir,
MiniDFSCluster.FINALIZED_DIR_NAME)),
UpgradeUtilities.checksumDatanodeNSStorageContents());
}
for (int i = 0; i < nameNodeDirs.length; i++) {
assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
}
}
示例6: checkResult
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
* Verify that the new current directory is the old previous.
* It is assumed that the server has recovered and rolled back.
*/
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
List<File> curDirs = Lists.newArrayList();
for (String baseDir : baseDirs) {
File curDir = new File(baseDir, "current");
curDirs.add(curDir);
switch (nodeType) {
case NAME_NODE:
FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
break;
case DATA_NODE:
for (int i = 0; i < baseDirs.length; i++) {
assertEquals(
UpgradeUtilities.checksumContents(
nodeType, new File(baseDirs[i],"current")),
UpgradeUtilities.checksumMasterContents(nodeType));
File nsBaseDir= NameSpaceSliceStorage.getNsRoot(UpgradeUtilities.getCurrentNamespaceID(cluster), new File(baseDirs[i], "current"));
assertEquals(
UpgradeUtilities.checksumContents(nodeType, new File(nsBaseDir,
MiniDFSCluster.FINALIZED_DIR_NAME)),
UpgradeUtilities.checksumDatanodeNSStorageContents());
}
break;
}
}
FSImageTestUtil.assertParallelFilesAreIdentical(
curDirs, Collections.<String>emptySet());
for (int i = 0; i < baseDirs.length; i++) {
assertFalse(new File(baseDirs[i],"previous").isDirectory());
}
}