當前位置: 首頁>>代碼示例>>Java>>正文


Java FileUtil.unTar方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.FileUtil.unTar方法的典型用法代碼示例。如果您正苦於以下問題:Java FileUtil.unTar方法的具體用法?Java FileUtil.unTar怎麽用?Java FileUtil.unTar使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.FileUtil的用法示例。


在下文中一共展示了FileUtil.unTar方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: untar

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
static File untar(final File testdir) throws IOException {
  // Find the src data under src/test/data
  final String datafile = "TestNamespaceUpgrade";
  File srcTarFile = new File(
    System.getProperty("project.build.testSourceDirectory", "src/test") +
    File.separator + "data" + File.separator + datafile + ".tgz");
  File homedir = new File(testdir.toString());
  File tgtUntarDir = new File(homedir, "hbase");
  if (tgtUntarDir.exists()) {
    if (!FileUtil.fullyDelete(tgtUntarDir)) {
      throw new IOException("Failed delete of " + tgtUntarDir.toString());
    }
  }
  if (!srcTarFile.exists()) {
    throw new IOException(srcTarFile+" does not exist");
  }
  LOG.info("Untarring " + srcTarFile + " into " + homedir.toString());
  FileUtil.unTar(srcTarFile, homedir);
  Assert.assertTrue(tgtUntarDir.exists());
  return tgtUntarDir;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:22,代碼來源:TestNamespaceUpgrade.java

示例2: untar

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
private static File untar(final File testdir) throws IOException {
  // Find the src data under src/test/data
  final String datafile = "TestMetaMigrationConvertToPB";
  String srcTarFile =
    System.getProperty("project.build.testSourceDirectory", "src/test") +
    File.separator + "data" + File.separator + datafile + ".tgz";
  File homedir = new File(testdir.toString());
  File tgtUntarDir = new File(homedir, datafile);
  if (tgtUntarDir.exists()) {
    if (!FileUtil.fullyDelete(tgtUntarDir)) {
      throw new IOException("Failed delete of " + tgtUntarDir.toString());
    }
  }
  LOG.info("Untarring " + srcTarFile + " into " + homedir.toString());
  FileUtil.unTar(new File(srcTarFile), homedir);
  Assert.assertTrue(tgtUntarDir.exists());
  return tgtUntarDir;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:19,代碼來源:TestMetaMigrationConvertingToPB.java

示例3: testZeroBlockSize

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
/**
 * In this test case, I have created an image with a file having
 * preferredblockSize = 0. We are trying to read this image (since file with
 * preferredblockSize = 0 was allowed pre 2.1.0-beta version. The namenode 
 * after 2.6 version will not be able to read this particular file.
 * See HDFS-7788 for more information.
 * @throws Exception
 */
@Test
public void testZeroBlockSize() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-zero-block-size");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));
  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, 
      nameDir.getAbsolutePath());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(false)
      .manageDataDfsDirs(false)
      .manageNameDfsDirs(false)
      .waitSafeMode(false)
      .startupOption(StartupOption.UPGRADE)
      .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/zeroBlockFile");
    assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath));
    assertTrue("Name node didn't come up", cluster.isNameNodeUp(0));
  } finally {
    cluster.shutdown();
    //Clean up
    FileUtil.fullyDelete(dfsDir);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:42,代碼來源:TestFSImage.java

示例4: testLoadLogsFromBuggyEarlierVersions

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
/**
 * Earlier versions of HDFS had a bug (HDFS-2991) which caused
 * append(), when called exactly at a block boundary,
 * to not log an OP_ADD. This ensures that we can read from
 * such buggy versions correctly, by loading an image created
 * using a namesystem image created with 0.23.1-rc2 exhibiting
 * the issue.
 */
@Test
public void testLoadLogsFromBuggyEarlierVersions() throws IOException {
  final Configuration conf = new HdfsConfiguration();

  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_23_BROKEN_APPEND_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-buggy-append");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);

  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
    .format(false)
    .manageDataDfsDirs(false)
    .manageNameDfsDirs(false)
    .numDataNodes(0)
    .waitSafeMode(false)
    .startupOption(StartupOption.UPGRADE)
    .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/io_data/test_io_0");
    assertEquals(2*1024*1024, fs.getFileStatus(testPath).getLen());
  } finally {
    cluster.shutdown();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:43,代碼來源:TestFileAppendRestart.java

示例5: testEarlierVersionEditLog

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
/**
 * Earlier versions of HDFS didn't persist block allocation to the edit log.
 * This makes sure that we can still load an edit log when the OP_CLOSE
 * is the opcode which adds all of the blocks. This is a regression
 * test for HDFS-2773.
 * This test uses a tarred pseudo-distributed cluster from Hadoop 1.0
 * which has a multi-block file. This is similar to the tests in
 * {@link TestDFSUpgradeFromImage} but none of those images include
 * a multi-block file.
 */
@Test
public void testEarlierVersionEditLog() throws Exception {
  final Configuration conf = new HdfsConfiguration();
      
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_1_0_MULTIBLOCK_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-1.0");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);
  File dataDir = new File(dfsDir, "data");
  GenericTestUtils.assertExists(dataDir);
  
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir.getAbsolutePath());
  
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
    .format(false)
    .manageDataDfsDirs(false)
    .manageNameDfsDirs(false)
    .numDataNodes(1)
    .startupOption(StartupOption.UPGRADE)
    .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/user/todd/4blocks");
    // Read it without caring about the actual data within - we just need
    // to make sure that the block states and locations are OK.
    DFSTestUtil.readFile(fs, testPath);
    
    // Ensure that we can append to it - if the blocks were in some funny
    // state we'd get some kind of issue here. 
    FSDataOutputStream stm = fs.append(testPath);
    try {
      stm.write(1);
    } finally {
      IOUtils.closeStream(stm);
    }
  } finally {
    cluster.shutdown();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:58,代碼來源:TestPersistBlocks.java


注:本文中的org.apache.hadoop.fs.FileUtil.unTar方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。