本文整理汇总了Java中org.apache.hadoop.fs.ChecksumFileSystem类的典型用法代码示例。如果您正苦于以下问题:Java ChecksumFileSystem类的具体用法?Java ChecksumFileSystem怎么用?Java ChecksumFileSystem使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ChecksumFileSystem类属于org.apache.hadoop.fs包,在下文中一共展示了ChecksumFileSystem类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testChecker
import org.apache.hadoop.fs.ChecksumFileSystem; //导入依赖的package包/类
/**
* Tests read/seek/getPos/skipped opeation for input stream.
*/
private void testChecker(ChecksumFileSystem fileSys, boolean readCS)
throws Exception {
Path file = new Path("try.dat");
if( readCS ) {
writeFile(fileSys, file);
} else {
writeFile(fileSys.getRawFileSystem(), file);
}
stm = fileSys.open(file);
checkReadAndGetPos();
checkSeek();
checkSkip();
//checkMark
assertFalse(stm.markSupported());
stm.close();
cleanupFile(fileSys, file);
}
示例2: smallReadSeek
import org.apache.hadoop.fs.ChecksumFileSystem; //导入依赖的package包/类
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
if (fileSys instanceof ChecksumFileSystem) {
fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
}
// Make the buffer size small to trigger code for HADOOP-922
FSDataInputStream stmRaw = fileSys.open(name, 1);
byte[] expected = new byte[ONEMB];
Random rand = new Random(seed);
rand.nextBytes(expected);
// Issue a simple read first.
byte[] actual = new byte[128];
stmRaw.seek(100000);
stmRaw.read(actual, 0, actual.length);
checkAndEraseData(actual, 100000, expected, "First Small Read Test");
// now do a small seek of 4 bytes, within the same block.
int newpos1 = 100000 + 128 + 4;
stmRaw.seek(newpos1);
stmRaw.read(actual, 0, actual.length);
checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");
// seek another 256 bytes this time
int newpos2 = newpos1 + 256;
stmRaw.seek(newpos2);
stmRaw.read(actual, 0, actual.length);
checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");
// all done
stmRaw.close();
}
示例3: testFSInputChecker
import org.apache.hadoop.fs.ChecksumFileSystem; //导入依赖的package包/类
public void testFSInputChecker() throws Exception {
Configuration conf = new Configuration();
conf.setLong("dfs.block.size", BLOCK_SIZE);
conf.setInt("io.bytes.per.checksum", BYTES_PER_SUM);
conf.set("fs.hdfs.impl",
"org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");
rand.nextBytes(expected);
// test DFS
MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
ChecksumFileSystem fileSys = (ChecksumFileSystem)cluster.getFileSystem();
try {
testChecker(fileSys, true);
testChecker(fileSys, false);
testSeekAndRead(fileSys);
} finally {
fileSys.close();
cluster.shutdown();
}
// test Local FS
fileSys = FileSystem.getLocal(conf);
try {
testChecker(fileSys, true);
testChecker(fileSys, false);
testFileCorruption((LocalFileSystem)fileSys);
testSeekAndRead(fileSys);
}finally {
fileSys.close();
}
}
示例4: testSeekAndRead
import org.apache.hadoop.fs.ChecksumFileSystem; //导入依赖的package包/类
private void testSeekAndRead(ChecksumFileSystem fileSys)
throws IOException {
Path file = new Path("try.dat");
writeFile(fileSys, file);
stm = fileSys.open(file,
fileSys.getConf().getInt("io.file.buffer.size", 4096));
checkSeekAndRead();
stm.close();
cleanupFile(fileSys, file);
}
示例5: smallReadSeek
import org.apache.hadoop.fs.ChecksumFileSystem; //导入依赖的package包/类
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
if (fileSys instanceof ChecksumFileSystem) {
fileSys = ((ChecksumFileSystem) fileSys).getRawFileSystem();
}
// Make the buffer size small to trigger code for HADOOP-922
FSDataInputStream stmRaw = fileSys.open(name, 1);
byte[] expected = new byte[ONEMB];
Random rand = new Random(seed);
rand.nextBytes(expected);
// Issue a simple read first.
byte[] actual = new byte[128];
stmRaw.seek(100000);
stmRaw.read(actual, 0, actual.length);
checkAndEraseData(actual, 100000, expected, "First Small Read Test");
// now do a small seek of 4 bytes, within the same block.
int newpos1 = 100000 + 128 + 4;
stmRaw.seek(newpos1);
stmRaw.read(actual, 0, actual.length);
checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");
// seek another 256 bytes this time
int newpos2 = newpos1 + 256;
stmRaw.seek(newpos2);
stmRaw.read(actual, 0, actual.length);
checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");
// all done
stmRaw.close();
}
示例6: create
import org.apache.hadoop.fs.ChecksumFileSystem; //导入依赖的package包/类
private FSDataOutputStream create(String filename, boolean noChecksum)
throws IOException {
Path filePath = qualifiedPath(filename);
// even though it was qualified using the default FS, it may not be in it
FileSystem fs = filePath.getFileSystem(getConf());
if (noChecksum && fs instanceof ChecksumFileSystem) {
fs = ((ChecksumFileSystem) fs).getRawFileSystem();
}
return fs.create(filePath, true /* overwrite */);
}