本文整理汇总了Java中org.apache.hadoop.hdfs.RaidDFSUtil.codes方法的典型用法代码示例。如果您正苦于以下问题:Java RaidDFSUtil.codes方法的具体用法?Java RaidDFSUtil.codes怎么用?Java RaidDFSUtil.codes使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.RaidDFSUtil
的用法示例。
在下文中一共展示了RaidDFSUtil.codes方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testSmallFileDirectory
import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
public void testSmallFileDirectory() throws Exception {
mySetup();
int stripeLength = 4;
long blockSize = 8192L;
try {
for (String code: RaidDFSUtil.codes) {
LOG.info("testSmallFileDirectory: Test code " + code);
Path sourceDir = new Path("/user/raid");
validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
new long[]{1000L, 4000L, 1000L}, blockSize, 4096L);
validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
new long[]{2000L, 3000L, 2000L, 3000L}, blockSize, 3072L);
validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
new long[]{3000L, 3000L, 3000L, 3000L}, blockSize, 3072L);
validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
new long[]{511L, 3584L, 3000L, 1234L, 512L, 1234L, 3000L,
3234L, 511L}, blockSize, 3584L);
}
} finally {
myTearDown();
}
}
示例2: testDifferentBlockSizeFileDirectory
import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
public void testDifferentBlockSizeFileDirectory() throws Exception {
mySetup();
int stripeLength = 3;
long blockSize = 8192L;
try {
for (String code: RaidDFSUtil.codes) {
LOG.info("testDifferentBlockSizeFileDirectory: Test code " + code);
Path sourceDir = new Path("/user/raid");
validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
new long[] {1000, blockSize, 2*blockSize, 2*blockSize + 1},
new long[] {blockSize, blockSize, 2*blockSize, blockSize},
2*blockSize);
validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
new long[] {blockSize, 2*blockSize, 3*blockSize, 4*blockSize},
new long[] {blockSize, 2*blockSize, 3*blockSize, blockSize},
3*blockSize);
validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
new long[] {blockSize+1, 9*blockSize+1, 2*blockSize+1,
blockSize+1}, new long[]{blockSize, 2*blockSize, 3*blockSize,
blockSize}, 2*blockSize+512);
}
} finally {
myTearDown();
}
}
示例3: testIdenticalBlockSizeFileDirectory
import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
public void testIdenticalBlockSizeFileDirectory() throws Exception {
mySetup();
int stripeLength = 4;
long blockSize = 8192L;
try {
for (String code: RaidDFSUtil.codes) {
LOG.info("testIdenticalBlockSizeFileDirectory: Test code " + code);
Path sourceDir = new Path("/user/raid");
validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
new long[] {1000L, blockSize, 2*blockSize, 4000L}, blockSize,
blockSize);
validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
new long[] {blockSize, 2*blockSize, 3*blockSize, 4*blockSize},
blockSize, blockSize);
int halfBlock = (int)blockSize/2;
validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
new long[] {blockSize + halfBlock, 2*blockSize + halfBlock,
3*blockSize + halfBlock, 4*blockSize + halfBlock},
blockSize, blockSize);
validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
new long[] {blockSize+1, 9*blockSize+1, 2*blockSize+1,
3*blockSize+1}, blockSize, blockSize);
}
} finally {
myTearDown();
}
}
示例4: testOneFileDirectory
import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
public void testOneFileDirectory() throws Exception {
mySetup();
int stripeLength = 4;
try {
for (String code: RaidDFSUtil.codes) {
LOG.info("testOneFileDirectory: Test code " + code);
Codec codec = loadTestCodecs(code, stripeLength, true);
Path sourceDir = new Path("/user/raid", code);
assertTrue(fileSys.mkdirs(sourceDir));
Path twoBlockFile = new Path(sourceDir, "twoBlockFile");;
LOG.info("Test one file with 2 blocks");
TestRaidDfs.createTestFile(fileSys, twoBlockFile, 2, 2, 8192L);
assertTrue(fileSys.exists(twoBlockFile));
assertFalse("Not enough blocks in the directory",
RaidNode.doRaid(conf, fileSys.getFileStatus(sourceDir),
new Path(codec.parityDirectory), codec,
new RaidNode.Statistics(), RaidUtils.NULL_PROGRESSABLE,
false, 1, 1));
fileSys.delete(twoBlockFile, true);
LOG.info("Test one file with blocks less than one stripe");
validateSingleFile(code, fileSys, sourceDir, stripeLength, 3,
false);
validateSingleFile(code, fileSys, sourceDir, stripeLength, 3,
true);
LOG.info("Test one file with one stripe blocks");
validateSingleFile(code, fileSys, sourceDir, stripeLength,
stripeLength, false);
validateSingleFile(code, fileSys, sourceDir, stripeLength,
stripeLength, true);
LOG.info("Test one file with more than one stripe blocks");
validateSingleFile(code, fileSys, sourceDir, stripeLength,
stripeLength + 2, false);
validateSingleFile(code, fileSys, sourceDir, stripeLength,
stripeLength + 2, true);
}
} finally {
myTearDown();
}
}
示例5: testTooManyErrorsDecode
import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
public void testTooManyErrorsDecode() throws Exception {
LOG.info("testTooManyErrorsDecode start");
long blockSize = 8192L;
stripeLength = 3;
mySetup("xor", 1);
long[][] fsizes = {{2000L, 3000L, 2000L},
{blockSize + 1, blockSize + 1},
{2*blockSize, blockSize + blockSize/2}};
long[][] bsizes = {{blockSize, blockSize, blockSize},
{blockSize, blockSize},
{2*blockSize, blockSize}};
Integer[][] corrupts = {{0, 1}, {0, 2}, {1, 2}};
try {
for (String code: RaidDFSUtil.codes) {
Codec curCodec = Codec.getCodec(code);
Path srcDir = new Path("/user/dhruba/" + code);
for (int i = 0; i < corrupts.length; i++) {
for (int j = 0; j < fsizes.length; j++) {
long[] crcs = new long[fsizes[j].length];
int[] seeds = new int[fsizes[j].length];
Path parityDir = new Path(codec.parityDirectory);
RaidDFSUtil.cleanUp(fileSys, srcDir);
RaidDFSUtil.cleanUp(fileSys, parityDir);
TestRaidDfs.createTestFiles(srcDir, fsizes[j],
bsizes[j], crcs, seeds, fileSys, (short)1);
assertTrue(RaidNode.doRaid(conf,
fileSys.getFileStatus(srcDir),
new Path(curCodec.parityDirectory), curCodec,
new RaidNode.Statistics(),
RaidUtils.NULL_PROGRESSABLE,
false, 1, 1));
boolean expectedExceptionThrown = false;
try {
corruptBlocksInDirectory(conf, srcDir,
crcs, corrupts[i], fileSys, dfs, true, false);
// Should not reach.
} catch (IOException e) {
LOG.info("Expected exception caught" + e);
expectedExceptionThrown = true;
}
assertTrue(expectedExceptionThrown);
}
}
}
LOG.info("testTooManyErrorsDecode complete");
} finally {
myTearDown();
}
}
示例6: testTooManyErrorsEncode
import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
public void testTooManyErrorsEncode() throws Exception {
LOG.info("testTooManyErrorsEncode complete");
stripeLength = 3;
mySetup("xor", 1);
// Encoding should fail when even one block is corrupt.
Random rand = new Random();
try {
for (String code: RaidDFSUtil.codes) {
Codec curCodec = Codec.getCodec(code);
Path srcDir = new Path("/user/dhruba/" + code);
for (int j = 0; j < fileSizes.length; j++) {
long[] crcs = new long[fileSizes[j].length];
int[] seeds = new int[fileSizes[j].length];
Path parityDir = new Path(codec.parityDirectory);
RaidDFSUtil.cleanUp(fileSys, srcDir);
RaidDFSUtil.cleanUp(fileSys, parityDir);
TestRaidDfs.createTestFiles(srcDir, fileSizes[j],
blockSizes[j], crcs, seeds, fileSys, (short)1);
corruptBlocksInDirectory(conf, srcDir,
crcs, new Integer[]{rand.nextInt() % 3},
fileSys, dfs, false, false);
boolean expectedExceptionThrown = false;
try {
RaidNode.doRaid(conf, fileSys.getFileStatus(srcDir),
new Path(curCodec.parityDirectory), curCodec,
new RaidNode.Statistics(),
RaidUtils.NULL_PROGRESSABLE,
false, 1, 1);
// Should not reach.
} catch (IOException e) {
LOG.info("Expected exception caught" + e);
expectedExceptionThrown = true;
}
assertTrue(expectedExceptionThrown);
}
}
LOG.info("testTooManyErrorsEncode complete");
} finally {
myTearDown();
}
}