當前位置: 首頁>>代碼示例>>Java>>正文


Java FileUtil.fullyDelete方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.FileUtil.fullyDelete方法的典型用法代碼示例。如果您正苦於以下問題:Java FileUtil.fullyDelete方法的具體用法?Java FileUtil.fullyDelete怎麽用?Java FileUtil.fullyDelete使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.FileUtil的用法示例。


在下文中一共展示了FileUtil.fullyDelete方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testCommandLine

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
@Test
public void testCommandLine() throws Exception {
  try {
    try {
      FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
    } catch (Exception e) {
    }

    createInput();
    boolean mayExit = false;

    // During tests, the default Configuration will use a local mapred
    // So don't specify -config or -cluster
    job = new StreamJob(genArgs(), mayExit);      
    job.go();
    File outFile = new File(OUTPUT_DIR, "part-00000").getAbsoluteFile();
    String output = StreamUtil.slurp(outFile);
    outFile.delete();
    System.err.println("outEx1=" + outputExpect);
    System.err.println("  out1=" + output);
    assertEquals(outputExpect, output);
  } finally {
    INPUT_FILE.delete();
    FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:27,代碼來源:TestStreamAggregate.java

示例2: testAuxServiceRecoverySetup

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
@Test
public void testAuxServiceRecoverySetup() throws IOException {
  Configuration conf = new YarnConfiguration();
  conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
  conf.set(YarnConfiguration.NM_RECOVERY_DIR, TEST_DIR.toString());
  conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,
      new String[] { "Asrv", "Bsrv" });
  conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Asrv"),
      RecoverableServiceA.class, Service.class);
  conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Bsrv"),
      RecoverableServiceB.class, Service.class);
  try {
    final AuxServices aux = new AuxServices();
    aux.init(conf);
    Assert.assertEquals(2, aux.getServices().size());
    File auxStorageDir = new File(TEST_DIR,
        AuxServices.STATE_STORE_ROOT_NAME);
    Assert.assertEquals(2, auxStorageDir.listFiles().length);
    aux.close();
  } finally {
    FileUtil.fullyDelete(TEST_DIR);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:24,代碼來源:TestAuxServices.java

示例3: startBackupNode

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
/**
 * Start the BackupNode
 */
public BackupNode startBackupNode(Configuration conf) throws IOException {
  // Set up testing environment directories
  hdfsDir = new File(TEST_DATA_DIR, "backupNode");
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  File currDir = new File(hdfsDir, "name2");
  File currDir2 = new File(currDir, "current");
  File currDir3 = new File(currDir, "image");
  
  assertTrue(currDir.mkdirs());
  assertTrue(currDir2.mkdirs());
  assertTrue(currDir3.mkdirs());
  
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      fileAsURI(new File(hdfsDir, "name2")).toString());
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
  
  // Start BackupNode
  String[] args = new String [] { StartupOption.BACKUP.getName() };
  BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf);

  return bu;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:29,代碼來源:TestHDFSServerPorts.java

示例4: tearDown

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
@AfterClass
public static void tearDown() throws Exception {
  if (testMiniKDC != null) {
    testMiniKDC.stop();
  }

  if (testTimelineServer != null) {
    testTimelineServer.stop();
  }

  if (withSsl) {
    KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
    File base = new File(BASEDIR);
    FileUtil.fullyDelete(base);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:17,代碼來源:TestTimelineAuthenticationFilter.java

示例5: main

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
/**
 * Launches all the tasks in order.
 */
public static void main(String[] argv) throws Exception {
  if (argv.length < 2) {
    System.err.println("Usage: TestMapReduce <range> <counts>");
    System.err.println();
    System.err.println("Note: a good test will have a <counts> value" +
        " that is substantially larger than the <range>");
    return;
  }

  int i = 0;
  range = Integer.parseInt(argv[i++]);
  counts = Integer.parseInt(argv[i++]);
  try {
    launch();
  } finally {
    FileUtil.fullyDelete(TEST_DIR);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:22,代碼來源:TestMapReduce.java

示例6: setup

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
@Before
public void setup() throws IOException {
  FileUtil.fullyDelete(TMP_DIR);
  conf = new YarnConfiguration();
  conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
  conf.set(YarnConfiguration.NM_RECOVERY_DIR, TMP_DIR.toString());
  restartStateStore();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:9,代碼來源:TestNMLeveldbStateStoreService.java

示例7: testUnconsumedInput

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
@Test
public void testUnconsumedInput() throws Exception
{
  String outFileName = "part-00000";
  File outFile = null;
  try {
    try {
      FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
    } catch (Exception e) {
    }

    createInput();

    // setup config to ignore unconsumed input
    Configuration conf = new Configuration();
    conf.set("stream.minRecWrittenToEnableSkip_", "0");

    job = new StreamJob();
    job.setConf(conf);
    int exitCode = job.run(genArgs());
    assertEquals("Job failed", 0, exitCode);
    outFile = new File(OUTPUT_DIR, outFileName).getAbsoluteFile();
    String output = StreamUtil.slurp(outFile);
    assertEquals("Output was truncated", EXPECTED_OUTPUT_SIZE,
        StringUtils.countMatches(output, "\t"));
  } finally {
    INPUT_FILE.delete();
    FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:31,代碼來源:TestUnconsumedInput.java

示例8: cleanup

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
@AfterClass
public static void cleanup() throws Exception {
  server.stop();
  FileUtil.fullyDelete(new File(BASEDIR));
  KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
  clientSslFactory.destroy();
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:8,代碼來源:TestSSLHttpServer.java

示例9: formatDataNodeDirs

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
public void formatDataNodeDirs() throws IOException {
  base_dir = new File(determineDfsBaseDir());
  data_dir = new File(base_dir, "data");
  if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
    throw new IOException("Cannot remove data directory: " + data_dir);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:MiniDFSCluster.java

示例10: deleteSnapshot

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
String deleteSnapshot(String snapshot) throws IOException {
  final StringBuilder b = new StringBuilder("deleteSnapshot: ")
      .append(snapshot).append(" from ").append(dir);
  FileUtil.fullyDelete(new File(localDir, snapshot));
  dfs.deleteSnapshot(dir, snapshot);
  snapshotPaths.remove(snapshot);
  return b.toString();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:9,代碼來源:TestAppendSnapshotTruncate.java

示例11: setUpNameDirs

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
@Before
public void setUpNameDirs() throws Exception {
  config = new HdfsConfiguration();
  hdfsDir = new File(MiniDFSCluster.getBaseDirectory()).getCanonicalFile();
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  
  hdfsDir.mkdirs();
  path1 = new File(hdfsDir, "name1");
  path2 = new File(hdfsDir, "name2");
  path3 = new File(hdfsDir, "name3");
  
  path1.mkdir(); path2.mkdir(); path3.mkdir();
  if(!path2.exists() ||  !path3.exists() || !path1.exists()) {
    throw new IOException("Couldn't create dfs.name dirs in " + hdfsDir.getAbsolutePath());
  }
  
  String dfs_name_dir = new String(path1.getPath() + "," + path2.getPath());
  System.out.println("configuring hdfsdir is " + hdfsDir.getAbsolutePath() + 
      "; dfs_name_dir = "+ dfs_name_dir + ";dfs_name_edits_dir(only)=" + path3.getPath());
  
  config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dfs_name_dir);
  config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, dfs_name_dir + "," + path3.getPath());

  config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(hdfsDir, "secondary").getPath());
 
  FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
  
  config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  
  // set the restore feature on
  config.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:35,代碼來源:TestStorageRestore.java

示例12: testGenericKeysForNameNodeFormat

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
/**
 * HDFS-3013: NameNode format command doesn't pick up
 * dfs.namenode.name.dir.NameServiceId configuration.
 */
@Test(timeout = 300000)
public void testGenericKeysForNameNodeFormat()
    throws IOException {
  Configuration conf = new HdfsConfiguration();

  // Set ephemeral ports 
  conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
      "127.0.0.1:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
      "127.0.0.1:0");
  
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
  
  // Set a nameservice-specific configuration for name dir
  File dir = new File(MiniDFSCluster.getBaseDirectory(),
      "testGenericKeysForNameNodeFormat");
  if (dir.exists()) {
    FileUtil.fullyDelete(dir);
  }
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + ".ns1",
      dir.getAbsolutePath());
  
  // Format and verify the right dir is formatted.
  DFSTestUtil.formatNameNode(conf);
  GenericTestUtils.assertExists(dir);

  // Ensure that the same dir is picked up by the running NN
  NameNode nameNode = new NameNode(conf);
  nameNode.stop();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:35,代碼來源:TestValidateConfigurationSettings.java

示例13: shutdownKdc

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
@AfterClass
public static void shutdownKdc() {
  if (kdc != null) {
    kdc.stop();
  }
  FileUtil.fullyDelete(baseDir);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:SaslDataTransferTestCase.java

示例14: recreateDir

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
private void recreateDir(File dir) throws IOException {
  if (dir.exists()) {
    FileUtil.fullyDelete(dir);
  }
  try {
    dir.mkdirs();
  } catch (SecurityException e) {
    throw new IOException("creating dir: " + dir, e);
  }
}
 
開發者ID:dremio,項目名稱:dremio-oss,代碼行數:11,代碼來源:MiniZooKeeperCluster.java

示例15: cleanup

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
@After
public void cleanup() throws IOException {
  if (stateStore != null) {
    stateStore.close();
  }
  FileUtil.fullyDelete(TMP_DIR);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:TestNMLeveldbStateStoreService.java


注:本文中的org.apache.hadoop.fs.FileUtil.fullyDelete方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。