本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.assertParallelFilesAreIdentical方法的典型用法代码示例。如果您正苦于以下问题:Java FSImageTestUtil.assertParallelFilesAreIdentical方法的具体用法?Java FSImageTestUtil.assertParallelFilesAreIdentical怎么用?Java FSImageTestUtil.assertParallelFilesAreIdentical使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil
的用法示例。
在下文中一共展示了FSImageTestUtil.assertParallelFilesAreIdentical方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkResult
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
* Verify that the new current directory is the old previous.
* It is assumed that the server has recovered and rolled back.
*/
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
List<File> curDirs = Lists.newArrayList();
for (String baseDir : baseDirs) {
File curDir = new File(baseDir, "current");
curDirs.add(curDir);
switch (nodeType) {
case NAME_NODE:
FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
break;
case DATA_NODE:
assertEquals(
UpgradeUtilities.checksumContents(nodeType, curDir, false),
UpgradeUtilities.checksumMasterDataNodeContents());
break;
}
}
FSImageTestUtil.assertParallelFilesAreIdentical(
curDirs, Collections.<String>emptySet());
for (int i = 0; i < baseDirs.length; i++) {
assertFalse(new File(baseDirs[i],"previous").isDirectory());
}
}
示例2: checkResult
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
* Verify that the current directory exists and that the previous directory
* does not exist. Verify that current hasn't been modified by comparing
* the checksum of all it's containing files with their original checksum.
* Note that we do not check that previous is removed on the DataNode
* because its removal is asynchronous therefore we have no reliable
* way to know when it will happen.
*/
static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs) throws Exception {
List<File> dirs = Lists.newArrayList();
for (int i = 0; i < nameNodeDirs.length; i++) {
File curDir = new File(nameNodeDirs[i], "current");
dirs.add(curDir);
FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
}
FSImageTestUtil.assertParallelFilesAreIdentical(
dirs, Collections.<String>emptySet());
for (int i = 0; i < dataNodeDirs.length; i++) {
assertEquals(
UpgradeUtilities.checksumContents(
DATA_NODE, new File(dataNodeDirs[i],"current")),
UpgradeUtilities.checksumMasterDataNodeContents());
}
for (int i = 0; i < nameNodeDirs.length; i++) {
assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
}
}
示例3: testBothNodesInStandbyState
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
* Test for the case when both of the NNs in the cluster are
* in the standby state, and thus are both creating checkpoints
* and uploading them to each other.
* In this circumstance, they should receive the error from the
* other node indicating that the other node already has a
* checkpoint for the given txid, but this should not cause
* an abort, etc.
*/
@Test
public void testBothNodesInStandbyState() throws Exception {
doEdits(0, 10);
cluster.transitionToStandby(0);
// Transitioning to standby closed the edit log on the active,
// so the standby will catch up. Then, both will be in standby mode
// with enough uncheckpointed txns to cause a checkpoint, and they
// will each try to take a checkpoint and upload to each other.
HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12));
assertEquals(12, nn0.getNamesystem().getFSImage()
.getMostRecentCheckpointTxId());
assertEquals(12, nn1.getNamesystem().getFSImage()
.getMostRecentCheckpointTxId());
List<File> dirs = Lists.newArrayList();
dirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0));
dirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 1));
FSImageTestUtil.assertParallelFilesAreIdentical(dirs, ImmutableSet.<String>of());
}
示例4: checkResult
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
* Verify that the new current directory is the old previous.
* It is assumed that the server has recovered and rolled back.
*/
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
List<File> curDirs = Lists.newArrayList();
for (String baseDir : baseDirs) {
File curDir = new File(baseDir, "current");
curDirs.add(curDir);
switch (nodeType) {
case NAME_NODE:
FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
break;
case DATA_NODE:
assertEquals(
UpgradeUtilities.checksumContents(nodeType, curDir),
UpgradeUtilities.checksumMasterDataNodeContents());
break;
}
}
FSImageTestUtil.assertParallelFilesAreIdentical(
curDirs, Collections.<String>emptySet());
for (int i = 0; i < baseDirs.length; i++) {
assertFalse(new File(baseDirs[i],"previous").isDirectory());
}
}
示例5: checkResult
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
* Verify that the current directory exists and that the previous directory
* does not exist. Verify that current hasn't been modified by comparing
* the checksum of all it's containing files with their original checksum.
*/
static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs,
String bpid) throws Exception {
List<File> dirs = Lists.newArrayList();
for (int i = 0; i < nameNodeDirs.length; i++) {
File curDir = new File(nameNodeDirs[i], "current");
dirs.add(curDir);
FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
}
FSImageTestUtil.assertParallelFilesAreIdentical(
dirs, Collections.<String>emptySet());
File dnCurDirs[] = new File[dataNodeDirs.length];
for (int i = 0; i < dataNodeDirs.length; i++) {
dnCurDirs[i] = new File(dataNodeDirs[i],"current");
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i],
false), UpgradeUtilities.checksumMasterDataNodeContents());
}
for (int i = 0; i < nameNodeDirs.length; i++) {
assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
}
if (bpid == null) {
for (int i = 0; i < dataNodeDirs.length; i++) {
assertFalse(new File(dataNodeDirs[i],"previous").isDirectory());
}
} else {
for (int i = 0; i < dataNodeDirs.length; i++) {
File bpRoot = BlockPoolSliceStorage.getBpRoot(bpid, dnCurDirs[i]);
assertFalse(new File(bpRoot,"previous").isDirectory());
File bpCurFinalizeDir = new File(bpRoot,"current/"+DataStorage.STORAGE_DIR_FINALIZED);
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
bpCurFinalizeDir, true),
UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
}
}
}
示例6: checkResult
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
* Verify that the current directory exists and that the previous directory
* does not exist. Verify that current hasn't been modified by comparing
* the checksum of all it's containing files with their original checksum.
* Note that we do not check that previous is removed on the DataNode
* because its removal is asynchronous therefore we have no reliable
* way to know when it will happen.
*/
static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs) throws Exception {
List<File> dirs = Lists.newArrayList();
for (int i = 0; i < nameNodeDirs.length; i++) {
File curDir = new File(nameNodeDirs[i], "current");
dirs.add(curDir);
FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
}
FSImageTestUtil.assertParallelFilesAreIdentical(
dirs, Collections.<String>emptySet());
for (int i = 0; i < dataNodeDirs.length; i++) {
assertEquals(
UpgradeUtilities.checksumContents(
DATA_NODE, new File(dataNodeDirs[i],"current")),
UpgradeUtilities.checksumMasterContents(DATA_NODE));
File nsBaseDir= NameSpaceSliceStorage.getNsRoot(UpgradeUtilities.getCurrentNamespaceID(cluster), new File(dataNodeDirs[i], "current"));
assertEquals(
UpgradeUtilities.checksumContents(DATA_NODE, new File(nsBaseDir,
MiniDFSCluster.FINALIZED_DIR_NAME)),
UpgradeUtilities.checksumDatanodeNSStorageContents());
}
for (int i = 0; i < nameNodeDirs.length; i++) {
assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
}
}
示例7: checkResult
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
* Verify that the new current directory is the old previous.
* It is assumed that the server has recovered and rolled back.
*/
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
List<File> curDirs = Lists.newArrayList();
for (String baseDir : baseDirs) {
File curDir = new File(baseDir, "current");
curDirs.add(curDir);
switch (nodeType) {
case NAME_NODE:
FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
break;
case DATA_NODE:
for (int i = 0; i < baseDirs.length; i++) {
assertEquals(
UpgradeUtilities.checksumContents(
nodeType, new File(baseDirs[i],"current")),
UpgradeUtilities.checksumMasterContents(nodeType));
File nsBaseDir= NameSpaceSliceStorage.getNsRoot(UpgradeUtilities.getCurrentNamespaceID(cluster), new File(baseDirs[i], "current"));
assertEquals(
UpgradeUtilities.checksumContents(nodeType, new File(nsBaseDir,
MiniDFSCluster.FINALIZED_DIR_NAME)),
UpgradeUtilities.checksumDatanodeNSStorageContents());
}
break;
}
}
FSImageTestUtil.assertParallelFilesAreIdentical(
curDirs, Collections.<String>emptySet());
for (int i = 0; i < baseDirs.length; i++) {
assertFalse(new File(baseDirs[i],"previous").isDirectory());
}
}
示例8: assertNNFilesMatch
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
private void assertNNFilesMatch() throws Exception {
List<File> curDirs = Lists.newArrayList();
curDirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0));
curDirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 1));
// Ignore seen_txid file, since the newly bootstrapped standby
// will have a higher seen_txid than the one it bootstrapped from.
Set<String> ignoredFiles = ImmutableSet.of("seen_txid");
FSImageTestUtil.assertParallelFilesAreIdentical(curDirs,
ignoredFiles);
}