当前位置: 首页>>代码示例>>Java>>正文


Java FileUtil.fullyDelete方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileUtil.fullyDelete方法的典型用法代码示例。如果您正苦于以下问题:Java FileUtil.fullyDelete方法的具体用法?Java FileUtil.fullyDelete怎么用?Java FileUtil.fullyDelete使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileUtil的用法示例。


在下文中一共展示了FileUtil.fullyDelete方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testCommandLine

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@Test
public void testCommandLine() throws Exception {
  try {
    try {
      FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
    } catch (Exception e) {
    }

    createInput();
    boolean mayExit = false;

    // During tests, the default Configuration will use a local mapred
    // So don't specify -config or -cluster
    job = new StreamJob(genArgs(), mayExit);      
    job.go();
    File outFile = new File(OUTPUT_DIR, "part-00000").getAbsoluteFile();
    String output = StreamUtil.slurp(outFile);
    outFile.delete();
    System.err.println("outEx1=" + outputExpect);
    System.err.println("  out1=" + output);
    assertEquals(outputExpect, output);
  } finally {
    INPUT_FILE.delete();
    FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestStreamAggregate.java

示例2: testAuxServiceRecoverySetup

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@Test
public void testAuxServiceRecoverySetup() throws IOException {
  Configuration conf = new YarnConfiguration();
  conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
  conf.set(YarnConfiguration.NM_RECOVERY_DIR, TEST_DIR.toString());
  conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,
      new String[] { "Asrv", "Bsrv" });
  conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Asrv"),
      RecoverableServiceA.class, Service.class);
  conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Bsrv"),
      RecoverableServiceB.class, Service.class);
  try {
    final AuxServices aux = new AuxServices();
    aux.init(conf);
    Assert.assertEquals(2, aux.getServices().size());
    File auxStorageDir = new File(TEST_DIR,
        AuxServices.STATE_STORE_ROOT_NAME);
    Assert.assertEquals(2, auxStorageDir.listFiles().length);
    aux.close();
  } finally {
    FileUtil.fullyDelete(TEST_DIR);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestAuxServices.java

示例3: startBackupNode

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
/**
 * Start the BackupNode
 */
public BackupNode startBackupNode(Configuration conf) throws IOException {
  // Set up testing environment directories
  hdfsDir = new File(TEST_DATA_DIR, "backupNode");
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  File currDir = new File(hdfsDir, "name2");
  File currDir2 = new File(currDir, "current");
  File currDir3 = new File(currDir, "image");
  
  assertTrue(currDir.mkdirs());
  assertTrue(currDir2.mkdirs());
  assertTrue(currDir3.mkdirs());
  
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      fileAsURI(new File(hdfsDir, "name2")).toString());
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
  
  // Start BackupNode
  String[] args = new String [] { StartupOption.BACKUP.getName() };
  BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf);

  return bu;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestHDFSServerPorts.java

示例4: tearDown

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@AfterClass
public static void tearDown() throws Exception {
  if (testMiniKDC != null) {
    testMiniKDC.stop();
  }

  if (testTimelineServer != null) {
    testTimelineServer.stop();
  }

  if (withSsl) {
    KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
    File base = new File(BASEDIR);
    FileUtil.fullyDelete(base);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestTimelineAuthenticationFilter.java

示例5: main

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
/**
 * Launches all the tasks in order.
 */
public static void main(String[] argv) throws Exception {
  if (argv.length < 2) {
    System.err.println("Usage: TestMapReduce <range> <counts>");
    System.err.println();
    System.err.println("Note: a good test will have a <counts> value" +
        " that is substantially larger than the <range>");
    return;
  }

  int i = 0;
  range = Integer.parseInt(argv[i++]);
  counts = Integer.parseInt(argv[i++]);
  try {
    launch();
  } finally {
    FileUtil.fullyDelete(TEST_DIR);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestMapReduce.java

示例6: setup

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@Before
public void setup() throws IOException {
  FileUtil.fullyDelete(TMP_DIR);
  conf = new YarnConfiguration();
  conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
  conf.set(YarnConfiguration.NM_RECOVERY_DIR, TMP_DIR.toString());
  restartStateStore();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:TestNMLeveldbStateStoreService.java

示例7: testUnconsumedInput

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@Test
public void testUnconsumedInput() throws Exception
{
  String outFileName = "part-00000";
  File outFile = null;
  try {
    try {
      FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
    } catch (Exception e) {
    }

    createInput();

    // setup config to ignore unconsumed input
    Configuration conf = new Configuration();
    conf.set("stream.minRecWrittenToEnableSkip_", "0");

    job = new StreamJob();
    job.setConf(conf);
    int exitCode = job.run(genArgs());
    assertEquals("Job failed", 0, exitCode);
    outFile = new File(OUTPUT_DIR, outFileName).getAbsoluteFile();
    String output = StreamUtil.slurp(outFile);
    assertEquals("Output was truncated", EXPECTED_OUTPUT_SIZE,
        StringUtils.countMatches(output, "\t"));
  } finally {
    INPUT_FILE.delete();
    FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestUnconsumedInput.java

示例8: cleanup

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@AfterClass
public static void cleanup() throws Exception {
  server.stop();
  FileUtil.fullyDelete(new File(BASEDIR));
  KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
  clientSslFactory.destroy();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:8,代码来源:TestSSLHttpServer.java

示例9: formatDataNodeDirs

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
public void formatDataNodeDirs() throws IOException {
  base_dir = new File(determineDfsBaseDir());
  data_dir = new File(base_dir, "data");
  if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
    throw new IOException("Cannot remove data directory: " + data_dir);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:MiniDFSCluster.java

示例10: deleteSnapshot

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
String deleteSnapshot(String snapshot) throws IOException {
  final StringBuilder b = new StringBuilder("deleteSnapshot: ")
      .append(snapshot).append(" from ").append(dir);
  FileUtil.fullyDelete(new File(localDir, snapshot));
  dfs.deleteSnapshot(dir, snapshot);
  snapshotPaths.remove(snapshot);
  return b.toString();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:TestAppendSnapshotTruncate.java

示例11: setUpNameDirs

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@Before
public void setUpNameDirs() throws Exception {
  config = new HdfsConfiguration();
  hdfsDir = new File(MiniDFSCluster.getBaseDirectory()).getCanonicalFile();
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  
  hdfsDir.mkdirs();
  path1 = new File(hdfsDir, "name1");
  path2 = new File(hdfsDir, "name2");
  path3 = new File(hdfsDir, "name3");
  
  path1.mkdir(); path2.mkdir(); path3.mkdir();
  if(!path2.exists() ||  !path3.exists() || !path1.exists()) {
    throw new IOException("Couldn't create dfs.name dirs in " + hdfsDir.getAbsolutePath());
  }
  
  String dfs_name_dir = new String(path1.getPath() + "," + path2.getPath());
  System.out.println("configuring hdfsdir is " + hdfsDir.getAbsolutePath() + 
      "; dfs_name_dir = "+ dfs_name_dir + ";dfs_name_edits_dir(only)=" + path3.getPath());
  
  config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dfs_name_dir);
  config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, dfs_name_dir + "," + path3.getPath());

  config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(hdfsDir, "secondary").getPath());
 
  FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
  
  config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  
  // set the restore feature on
  config.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestStorageRestore.java

示例12: testGenericKeysForNameNodeFormat

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
/**
 * HDFS-3013: NameNode format command doesn't pick up
 * dfs.namenode.name.dir.NameServiceId configuration.
 */
@Test(timeout = 300000)
public void testGenericKeysForNameNodeFormat()
    throws IOException {
  Configuration conf = new HdfsConfiguration();

  // Set ephemeral ports 
  conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
      "127.0.0.1:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
      "127.0.0.1:0");
  
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
  
  // Set a nameservice-specific configuration for name dir
  File dir = new File(MiniDFSCluster.getBaseDirectory(),
      "testGenericKeysForNameNodeFormat");
  if (dir.exists()) {
    FileUtil.fullyDelete(dir);
  }
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + ".ns1",
      dir.getAbsolutePath());
  
  // Format and verify the right dir is formatted.
  DFSTestUtil.formatNameNode(conf);
  GenericTestUtils.assertExists(dir);

  // Ensure that the same dir is picked up by the running NN
  NameNode nameNode = new NameNode(conf);
  nameNode.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestValidateConfigurationSettings.java

示例13: shutdownKdc

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@AfterClass
public static void shutdownKdc() {
  if (kdc != null) {
    kdc.stop();
  }
  FileUtil.fullyDelete(baseDir);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:SaslDataTransferTestCase.java

示例14: recreateDir

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
private void recreateDir(File dir) throws IOException {
  if (dir.exists()) {
    FileUtil.fullyDelete(dir);
  }
  try {
    dir.mkdirs();
  } catch (SecurityException e) {
    throw new IOException("creating dir: " + dir, e);
  }
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:11,代码来源:MiniZooKeeperCluster.java

示例15: cleanup

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@After
public void cleanup() throws IOException {
  if (stateStore != null) {
    stateStore.close();
  }
  FileUtil.fullyDelete(TMP_DIR);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestNMLeveldbStateStoreService.java


注:本文中的org.apache.hadoop.fs.FileUtil.fullyDelete方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。