当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.setVerifyChecksum方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.setVerifyChecksum方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.setVerifyChecksum方法的具体用法?Java FileSystem.setVerifyChecksum怎么用?Java FileSystem.setVerifyChecksum使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.setVerifyChecksum方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testChecker

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Tests read/seek/getPos/skipped opeation for input stream.
 */
private void testChecker(FileSystem fileSys, boolean readCS)
throws Exception {
  Path file = new Path("try.dat");
  writeFile(fileSys, file);

  try {
    if (!readCS) {
      fileSys.setVerifyChecksum(false);
    }

    stm = fileSys.open(file);
    checkReadAndGetPos();
    checkSeek();
    checkSkip();
    //checkMark
    assertFalse(stm.markSupported());
    stm.close();
  } finally {
    if (!readCS) {
      fileSys.setVerifyChecksum(true);
    }
    cleanupFile(fileSys, file);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestFSInputChecker.java

示例2: dfsPreadTest

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void dfsPreadTest(Configuration conf, boolean disableTransferTo, boolean verifyChecksum)
    throws IOException {
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
  conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
  // Set short retry timeouts so this test runs faster
  conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0);
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  if (disableTransferTo) {
    conf.setBoolean("dfs.datanode.transferTo.allowed", false);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  FileSystem fileSys = cluster.getFileSystem();
  fileSys.setVerifyChecksum(verifyChecksum);
  try {
    Path file1 = new Path("preadtest.dat");
    writeFile(fileSys, file1);
    pReadFile(fileSys, file1);
    datanodeRestartTest(cluster, fileSys, file1);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestPread.java

示例3: run

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * The main driver for <code>DumpTypedBytes</code>.
 */
public int run(String[] args) throws Exception {
  if (args.length == 0) {
    System.err.println("Too few arguments!");
    printUsage();
    return 1;
  }
  Path pattern = new Path(args[0]);
  FileSystem fs = pattern.getFileSystem(getConf());
  fs.setVerifyChecksum(true);
  for (Path p : FileUtil.stat2Paths(fs.globStatus(pattern), pattern)) {
    List<FileStatus> inputFiles = new ArrayList<FileStatus>();
    FileStatus status = fs.getFileStatus(p);
    if (status.isDirectory()) {
      FileStatus[] files = fs.listStatus(p);
      Collections.addAll(inputFiles, files);
    } else {
      inputFiles.add(status);
    }
    return dumpTypedBytes(inputFiles);
  }
  return -1;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:DumpTypedBytes.java

示例4: testBlockCompressSequenceFileWriterSync

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * This test simulates what happens when a batch of events is written to a compressed sequence
 * file (and thus hsync'd to hdfs) but the file is not yet closed.
 *
 * When this happens, the data that we wrote should still be readable.
 */
@Test
public void testBlockCompressSequenceFileWriterSync() throws IOException, EventDeliveryException {
  String hdfsPath = testPath + "/sequenceFileWriterSync";
  FileSystem fs = FileSystem.get(new Configuration());
  // Since we are reading a partial file we don't want to use checksums
  fs.setVerifyChecksum(false);
  fs.setWriteChecksum(false);

  // Compression codecs that don't require native hadoop libraries
  String [] codecs = {"BZip2Codec", "DeflateCodec"};

  for (String codec : codecs) {
    sequenceFileWriteAndVerifyEvents(fs, hdfsPath, codec, Collections.singletonList(
        "single-event"
    ));

    sequenceFileWriteAndVerifyEvents(fs, hdfsPath, codec, Arrays.asList(
        "multiple-events-1",
        "multiple-events-2",
        "multiple-events-3",
        "multiple-events-4",
        "multiple-events-5"
    ));
  }

  fs.close();
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:34,代码来源:TestHDFSEventSink.java


注:本文中的org.apache.hadoop.fs.FileSystem.setVerifyChecksum方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。