当前位置: 首页>>代码示例>>Java>>正文


Java FileUtil.unTar方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileUtil.unTar方法的典型用法代码示例。如果您正苦于以下问题:Java FileUtil.unTar方法的具体用法?Java FileUtil.unTar怎么用?Java FileUtil.unTar使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileUtil的用法示例。


在下文中一共展示了FileUtil.unTar方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: untar

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
static File untar(final File testdir) throws IOException {
  // Find the src data under src/test/data
  final String datafile = "TestNamespaceUpgrade";
  File srcTarFile = new File(
    System.getProperty("project.build.testSourceDirectory", "src/test") +
    File.separator + "data" + File.separator + datafile + ".tgz");
  File homedir = new File(testdir.toString());
  File tgtUntarDir = new File(homedir, "hbase");
  if (tgtUntarDir.exists()) {
    if (!FileUtil.fullyDelete(tgtUntarDir)) {
      throw new IOException("Failed delete of " + tgtUntarDir.toString());
    }
  }
  if (!srcTarFile.exists()) {
    throw new IOException(srcTarFile+" does not exist");
  }
  LOG.info("Untarring " + srcTarFile + " into " + homedir.toString());
  FileUtil.unTar(srcTarFile, homedir);
  Assert.assertTrue(tgtUntarDir.exists());
  return tgtUntarDir;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestNamespaceUpgrade.java

示例2: untar

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
private static File untar(final File testdir) throws IOException {
  // Find the src data under src/test/data
  final String datafile = "TestMetaMigrationConvertToPB";
  String srcTarFile =
    System.getProperty("project.build.testSourceDirectory", "src/test") +
    File.separator + "data" + File.separator + datafile + ".tgz";
  File homedir = new File(testdir.toString());
  File tgtUntarDir = new File(homedir, datafile);
  if (tgtUntarDir.exists()) {
    if (!FileUtil.fullyDelete(tgtUntarDir)) {
      throw new IOException("Failed delete of " + tgtUntarDir.toString());
    }
  }
  LOG.info("Untarring " + srcTarFile + " into " + homedir.toString());
  FileUtil.unTar(new File(srcTarFile), homedir);
  Assert.assertTrue(tgtUntarDir.exists());
  return tgtUntarDir;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestMetaMigrationConvertingToPB.java

示例3: testZeroBlockSize

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
/**
 * In this test case, I have created an image with a file having
 * preferredblockSize = 0. We are trying to read this image (since file with
 * preferredblockSize = 0 was allowed pre 2.1.0-beta version. The namenode 
 * after 2.6 version will not be able to read this particular file.
 * See HDFS-7788 for more information.
 * @throws Exception
 */
@Test
public void testZeroBlockSize() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-zero-block-size");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));
  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, 
      nameDir.getAbsolutePath());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(false)
      .manageDataDfsDirs(false)
      .manageNameDfsDirs(false)
      .waitSafeMode(false)
      .startupOption(StartupOption.UPGRADE)
      .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/zeroBlockFile");
    assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath));
    assertTrue("Name node didn't come up", cluster.isNameNodeUp(0));
  } finally {
    cluster.shutdown();
    //Clean up
    FileUtil.fullyDelete(dfsDir);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestFSImage.java

示例4: testLoadLogsFromBuggyEarlierVersions

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
/**
 * Earlier versions of HDFS had a bug (HDFS-2991) which caused
 * append(), when called exactly at a block boundary,
 * to not log an OP_ADD. This ensures that we can read from
 * such buggy versions correctly, by loading an image created
 * using a namesystem image created with 0.23.1-rc2 exhibiting
 * the issue.
 */
@Test
public void testLoadLogsFromBuggyEarlierVersions() throws IOException {
  final Configuration conf = new HdfsConfiguration();

  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_23_BROKEN_APPEND_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-buggy-append");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);

  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
    .format(false)
    .manageDataDfsDirs(false)
    .manageNameDfsDirs(false)
    .numDataNodes(0)
    .waitSafeMode(false)
    .startupOption(StartupOption.UPGRADE)
    .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/io_data/test_io_0");
    assertEquals(2*1024*1024, fs.getFileStatus(testPath).getLen());
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestFileAppendRestart.java

示例5: testEarlierVersionEditLog

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
/**
 * Earlier versions of HDFS didn't persist block allocation to the edit log.
 * This makes sure that we can still load an edit log when the OP_CLOSE
 * is the opcode which adds all of the blocks. This is a regression
 * test for HDFS-2773.
 * This test uses a tarred pseudo-distributed cluster from Hadoop 1.0
 * which has a multi-block file. This is similar to the tests in
 * {@link TestDFSUpgradeFromImage} but none of those images include
 * a multi-block file.
 */
@Test
public void testEarlierVersionEditLog() throws Exception {
  final Configuration conf = new HdfsConfiguration();
      
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_1_0_MULTIBLOCK_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-1.0");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);
  File dataDir = new File(dfsDir, "data");
  GenericTestUtils.assertExists(dataDir);
  
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir.getAbsolutePath());
  
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
    .format(false)
    .manageDataDfsDirs(false)
    .manageNameDfsDirs(false)
    .numDataNodes(1)
    .startupOption(StartupOption.UPGRADE)
    .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/user/todd/4blocks");
    // Read it without caring about the actual data within - we just need
    // to make sure that the block states and locations are OK.
    DFSTestUtil.readFile(fs, testPath);
    
    // Ensure that we can append to it - if the blocks were in some funny
    // state we'd get some kind of issue here. 
    FSDataOutputStream stm = fs.append(testPath);
    try {
      stm.write(1);
    } finally {
      IOUtils.closeStream(stm);
    }
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:58,代码来源:TestPersistBlocks.java


注:本文中的org.apache.hadoop.fs.FileUtil.unTar方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。