本文整理汇总了Java中org.apache.hadoop.fs.FileUtil.fullyDelete方法的典型用法代码示例。如果您正苦于以下问题:Java FileUtil.fullyDelete方法的具体用法?Java FileUtil.fullyDelete怎么用?Java FileUtil.fullyDelete使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.FileUtil
的用法示例。
在下文中一共展示了FileUtil.fullyDelete方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testCommandLine
import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@Test
public void testCommandLine() throws Exception {
try {
try {
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
} catch (Exception e) {
}
createInput();
boolean mayExit = false;
// During tests, the default Configuration will use a local mapred
// So don't specify -config or -cluster
job = new StreamJob(genArgs(), mayExit);
job.go();
File outFile = new File(OUTPUT_DIR, "part-00000").getAbsoluteFile();
String output = StreamUtil.slurp(outFile);
outFile.delete();
System.err.println("outEx1=" + outputExpect);
System.err.println(" out1=" + output);
assertEquals(outputExpect, output);
} finally {
INPUT_FILE.delete();
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
}
示例2: testAuxServiceRecoverySetup
import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@Test
public void testAuxServiceRecoverySetup() throws IOException {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
conf.set(YarnConfiguration.NM_RECOVERY_DIR, TEST_DIR.toString());
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,
new String[] { "Asrv", "Bsrv" });
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Asrv"),
RecoverableServiceA.class, Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Bsrv"),
RecoverableServiceB.class, Service.class);
try {
final AuxServices aux = new AuxServices();
aux.init(conf);
Assert.assertEquals(2, aux.getServices().size());
File auxStorageDir = new File(TEST_DIR,
AuxServices.STATE_STORE_ROOT_NAME);
Assert.assertEquals(2, auxStorageDir.listFiles().length);
aux.close();
} finally {
FileUtil.fullyDelete(TEST_DIR);
}
}
示例3: startBackupNode
import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
/**
* Start the BackupNode
*/
public BackupNode startBackupNode(Configuration conf) throws IOException {
// Set up testing environment directories
hdfsDir = new File(TEST_DATA_DIR, "backupNode");
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
}
File currDir = new File(hdfsDir, "name2");
File currDir2 = new File(currDir, "current");
File currDir3 = new File(currDir, "image");
assertTrue(currDir.mkdirs());
assertTrue(currDir2.mkdirs());
assertTrue(currDir3.mkdirs());
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
fileAsURI(new File(hdfsDir, "name2")).toString());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
"${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
// Start BackupNode
String[] args = new String [] { StartupOption.BACKUP.getName() };
BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf);
return bu;
}
示例4: tearDown
import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@AfterClass
public static void tearDown() throws Exception {
if (testMiniKDC != null) {
testMiniKDC.stop();
}
if (testTimelineServer != null) {
testTimelineServer.stop();
}
if (withSsl) {
KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
}
}
示例5: main
import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
/**
* Launches all the tasks in order.
*/
public static void main(String[] argv) throws Exception {
if (argv.length < 2) {
System.err.println("Usage: TestMapReduce <range> <counts>");
System.err.println();
System.err.println("Note: a good test will have a <counts> value" +
" that is substantially larger than the <range>");
return;
}
int i = 0;
range = Integer.parseInt(argv[i++]);
counts = Integer.parseInt(argv[i++]);
try {
launch();
} finally {
FileUtil.fullyDelete(TEST_DIR);
}
}
示例6: setup
import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@Before
public void setup() throws IOException {
FileUtil.fullyDelete(TMP_DIR);
conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
conf.set(YarnConfiguration.NM_RECOVERY_DIR, TMP_DIR.toString());
restartStateStore();
}
示例7: testUnconsumedInput
import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@Test
public void testUnconsumedInput() throws Exception
{
String outFileName = "part-00000";
File outFile = null;
try {
try {
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
} catch (Exception e) {
}
createInput();
// setup config to ignore unconsumed input
Configuration conf = new Configuration();
conf.set("stream.minRecWrittenToEnableSkip_", "0");
job = new StreamJob();
job.setConf(conf);
int exitCode = job.run(genArgs());
assertEquals("Job failed", 0, exitCode);
outFile = new File(OUTPUT_DIR, outFileName).getAbsoluteFile();
String output = StreamUtil.slurp(outFile);
assertEquals("Output was truncated", EXPECTED_OUTPUT_SIZE,
StringUtils.countMatches(output, "\t"));
} finally {
INPUT_FILE.delete();
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
}
示例8: cleanup
import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@AfterClass
public static void cleanup() throws Exception {
server.stop();
FileUtil.fullyDelete(new File(BASEDIR));
KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
clientSslFactory.destroy();
}
示例9: formatDataNodeDirs
import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
public void formatDataNodeDirs() throws IOException {
base_dir = new File(determineDfsBaseDir());
data_dir = new File(base_dir, "data");
if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
throw new IOException("Cannot remove data directory: " + data_dir);
}
}
示例10: deleteSnapshot
import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
String deleteSnapshot(String snapshot) throws IOException {
final StringBuilder b = new StringBuilder("deleteSnapshot: ")
.append(snapshot).append(" from ").append(dir);
FileUtil.fullyDelete(new File(localDir, snapshot));
dfs.deleteSnapshot(dir, snapshot);
snapshotPaths.remove(snapshot);
return b.toString();
}
示例11: setUpNameDirs
import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@Before
public void setUpNameDirs() throws Exception {
config = new HdfsConfiguration();
hdfsDir = new File(MiniDFSCluster.getBaseDirectory()).getCanonicalFile();
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
}
hdfsDir.mkdirs();
path1 = new File(hdfsDir, "name1");
path2 = new File(hdfsDir, "name2");
path3 = new File(hdfsDir, "name3");
path1.mkdir(); path2.mkdir(); path3.mkdir();
if(!path2.exists() || !path3.exists() || !path1.exists()) {
throw new IOException("Couldn't create dfs.name dirs in " + hdfsDir.getAbsolutePath());
}
String dfs_name_dir = new String(path1.getPath() + "," + path2.getPath());
System.out.println("configuring hdfsdir is " + hdfsDir.getAbsolutePath() +
"; dfs_name_dir = "+ dfs_name_dir + ";dfs_name_edits_dir(only)=" + path3.getPath());
config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dfs_name_dir);
config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, dfs_name_dir + "," + path3.getPath());
config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(hdfsDir, "secondary").getPath());
FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
// set the restore feature on
config.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
}
示例12: testGenericKeysForNameNodeFormat
import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
/**
* HDFS-3013: NameNode format command doesn't pick up
* dfs.namenode.name.dir.NameServiceId configuration.
*/
@Test(timeout = 300000)
public void testGenericKeysForNameNodeFormat()
throws IOException {
Configuration conf = new HdfsConfiguration();
// Set ephemeral ports
conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
"127.0.0.1:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
"127.0.0.1:0");
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
// Set a nameservice-specific configuration for name dir
File dir = new File(MiniDFSCluster.getBaseDirectory(),
"testGenericKeysForNameNodeFormat");
if (dir.exists()) {
FileUtil.fullyDelete(dir);
}
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + ".ns1",
dir.getAbsolutePath());
// Format and verify the right dir is formatted.
DFSTestUtil.formatNameNode(conf);
GenericTestUtils.assertExists(dir);
// Ensure that the same dir is picked up by the running NN
NameNode nameNode = new NameNode(conf);
nameNode.stop();
}
示例13: shutdownKdc
import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@AfterClass
public static void shutdownKdc() {
if (kdc != null) {
kdc.stop();
}
FileUtil.fullyDelete(baseDir);
}
示例14: recreateDir
import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
private void recreateDir(File dir) throws IOException {
if (dir.exists()) {
FileUtil.fullyDelete(dir);
}
try {
dir.mkdirs();
} catch (SecurityException e) {
throw new IOException("creating dir: " + dir, e);
}
}
示例15: cleanup
import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@After
public void cleanup() throws IOException {
if (stateStore != null) {
stateStore.close();
}
FileUtil.fullyDelete(TMP_DIR);
}