本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.setVerifyChecksum方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.setVerifyChecksum方法的具体用法?Java FileSystem.setVerifyChecksum怎么用?Java FileSystem.setVerifyChecksum使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.FileSystem
的用法示例。
在下文中一共展示了FileSystem.setVerifyChecksum方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testChecker
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
* Tests read/seek/getPos/skipped opeation for input stream.
*/
private void testChecker(FileSystem fileSys, boolean readCS)
throws Exception {
Path file = new Path("try.dat");
writeFile(fileSys, file);
try {
if (!readCS) {
fileSys.setVerifyChecksum(false);
}
stm = fileSys.open(file);
checkReadAndGetPos();
checkSeek();
checkSkip();
//checkMark
assertFalse(stm.markSupported());
stm.close();
} finally {
if (!readCS) {
fileSys.setVerifyChecksum(true);
}
cleanupFile(fileSys, file);
}
}
示例2: dfsPreadTest
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void dfsPreadTest(Configuration conf, boolean disableTransferTo, boolean verifyChecksum)
throws IOException {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
// Set short retry timeouts so this test runs faster
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0);
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
if (disableTransferTo) {
conf.setBoolean("dfs.datanode.transferTo.allowed", false);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fileSys = cluster.getFileSystem();
fileSys.setVerifyChecksum(verifyChecksum);
try {
Path file1 = new Path("preadtest.dat");
writeFile(fileSys, file1);
pReadFile(fileSys, file1);
datanodeRestartTest(cluster, fileSys, file1);
cleanupFile(fileSys, file1);
} finally {
fileSys.close();
cluster.shutdown();
}
}
示例3: run
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
* The main driver for <code>DumpTypedBytes</code>.
*/
public int run(String[] args) throws Exception {
if (args.length == 0) {
System.err.println("Too few arguments!");
printUsage();
return 1;
}
Path pattern = new Path(args[0]);
FileSystem fs = pattern.getFileSystem(getConf());
fs.setVerifyChecksum(true);
for (Path p : FileUtil.stat2Paths(fs.globStatus(pattern), pattern)) {
List<FileStatus> inputFiles = new ArrayList<FileStatus>();
FileStatus status = fs.getFileStatus(p);
if (status.isDirectory()) {
FileStatus[] files = fs.listStatus(p);
Collections.addAll(inputFiles, files);
} else {
inputFiles.add(status);
}
return dumpTypedBytes(inputFiles);
}
return -1;
}
示例4: testBlockCompressSequenceFileWriterSync
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
* This test simulates what happens when a batch of events is written to a compressed sequence
* file (and thus hsync'd to hdfs) but the file is not yet closed.
*
* When this happens, the data that we wrote should still be readable.
*/
@Test
public void testBlockCompressSequenceFileWriterSync() throws IOException, EventDeliveryException {
String hdfsPath = testPath + "/sequenceFileWriterSync";
FileSystem fs = FileSystem.get(new Configuration());
// Since we are reading a partial file we don't want to use checksums
fs.setVerifyChecksum(false);
fs.setWriteChecksum(false);
// Compression codecs that don't require native hadoop libraries
String [] codecs = {"BZip2Codec", "DeflateCodec"};
for (String codec : codecs) {
sequenceFileWriteAndVerifyEvents(fs, hdfsPath, codec, Collections.singletonList(
"single-event"
));
sequenceFileWriteAndVerifyEvents(fs, hdfsPath, codec, Arrays.asList(
"multiple-events-1",
"multiple-events-2",
"multiple-events-3",
"multiple-events-4",
"multiple-events-5"
));
}
fs.close();
}