本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.delete方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.delete方法的具体用法?Java FileSystem.delete怎么用?Java FileSystem.delete使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.FileSystem
的用法示例。
在下文中一共展示了FileSystem.delete方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initAndStartStore
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void initAndStartStore(final FileSystem fs) throws IOException,
URISyntaxException {
Configuration conf = new Configuration();
fs.initialize(new URI("/"), conf);
fsWorkingPath =
new Path("target",
TestFileSystemApplicationHistoryStore.class.getSimpleName());
fs.delete(fsWorkingPath, true);
conf.set(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI,
fsWorkingPath.toString());
store = new FileSystemApplicationHistoryStore() {
@Override
protected FileSystem getFileSystem(Path path, Configuration conf) {
return fs;
}
};
store.init(conf);
store.start();
}
示例2: testTaskEnv
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
* Test to test if the user set env variables reflect in the child
* processes. Mainly
* - x=y (x can be a already existing env variable or a new variable)
* - x=$x:y (replace $x with the current value of x)
*/
@Test
public void testTaskEnv(){
try {
JobConf conf = new JobConf(mr.getConfig());
// initialize input, output directories
Path inDir = new Path("testing/wc/input1");
Path outDir = new Path("testing/wc/output1");
FileSystem outFs = outDir.getFileSystem(conf);
runTestTaskEnv(conf, inDir, outDir, false);
outFs.delete(outDir, true);
} catch(Exception e) {
e.printStackTrace();
fail("Exception in testing child env");
tearDown();
}
}
示例3: configure
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public void configure(String keySpec, int expect) throws Exception {
Path testdir = new Path(TEST_DIR.getAbsolutePath());
Path inDir = new Path(testdir, "in");
Path outDir = new Path(testdir, "out");
FileSystem fs = getFileSystem();
fs.delete(testdir, true);
conf.setInputFormat(TextInputFormat.class);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(LongWritable.class);
conf.setNumMapTasks(1);
conf.setNumReduceTasks(1);
conf.setOutputFormat(TextOutputFormat.class);
conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
conf.setKeyFieldComparatorOptions(keySpec);
conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
conf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
conf.setMapperClass(InverseMapper.class);
conf.setReducerClass(IdentityReducer.class);
if (!fs.mkdirs(testdir)) {
throw new IOException("Mkdirs failed to create " + testdir.toString());
}
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
// set up input data in 2 files
Path inFile = new Path(inDir, "part0");
FileOutputStream fos = new FileOutputStream(inFile.toString());
fos.write((line1 + "\n").getBytes());
fos.write((line2 + "\n").getBytes());
fos.close();
JobClient jc = new JobClient(conf);
RunningJob r_job = jc.submitJob(conf);
while (!r_job.isComplete()) {
Thread.sleep(1000);
}
if (!r_job.isSuccessful()) {
fail("Oops! The job broke due to an unexpected error");
}
Path[] outputFiles = FileUtil.stat2Paths(
getFileSystem().listStatus(outDir,
new Utils.OutputFileUtils.OutputFilesFilter()));
if (outputFiles.length > 0) {
InputStream is = getFileSystem().open(outputFiles[0]);
BufferedReader reader = new BufferedReader(new InputStreamReader(is));
String line = reader.readLine();
//make sure we get what we expect as the first line, and also
//that we have two lines
if (expect == 1) {
assertTrue(line.startsWith(line1));
} else if (expect == 2) {
assertTrue(line.startsWith(line2));
}
line = reader.readLine();
if (expect == 1) {
assertTrue(line.startsWith(line2));
} else if (expect == 2) {
assertTrue(line.startsWith(line1));
}
reader.close();
}
}
示例4: cleanUpPartialOutputForTask
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Override
public void cleanUpPartialOutputForTask(TaskAttemptContext context)
throws IOException {
// we double check this is never invoked from a non-preemptable subclass.
// This should never happen, since the invoking codes is checking it too,
// but it is safer to double check. Errors handling this would produce
// inconsistent output.
if (!this.getClass().isAnnotationPresent(Checkpointable.class)) {
throw new IllegalStateException("Invoking cleanUpPartialOutputForTask() " +
"from non @Preemptable class");
}
FileSystem fs =
fsFor(getTaskAttemptPath(context), context.getConfiguration());
LOG.info("cleanUpPartialOutputForTask: removing everything belonging to " +
context.getTaskAttemptID().getTaskID() + " in: " +
getCommittedTaskPath(context).getParent());
final TaskAttemptID taid = context.getTaskAttemptID();
final TaskID tid = taid.getTaskID();
Path pCommit = getCommittedTaskPath(context).getParent();
// remove any committed output
for (int i = 0; i < taid.getId(); ++i) {
TaskAttemptID oldId = new TaskAttemptID(tid, i);
Path pTask = new Path(pCommit, oldId.toString());
if (fs.exists(pTask) && !fs.delete(pTask, true)) {
throw new IOException("Failed to delete " + pTask);
}
}
}
示例5: cleanUp
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@AfterClass
public static void cleanUp() {
System.gc();
Configuration configuration = new Configuration();
FileSystem fileSystem = null;
try {
fileSystem = FileSystem.get(configuration);
Path deletingFilePath = new Path("testData/MetaData/");
if (!fileSystem.exists(deletingFilePath)) {
throw new PathNotFoundException(deletingFilePath.toString());
} else {
boolean isDeleted = fileSystem.delete(deletingFilePath, true);
if (isDeleted) {
fileSystem.deleteOnExit(deletingFilePath);
}
}
fileSystem.close();
} catch (IOException e) {
e.printStackTrace();
}
}
示例6: cleanAndCreateInput
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void cleanAndCreateInput(FileSystem fs) throws IOException {
fs.delete(INPUT_DIR, true);
fs.delete(OUTPUT_DIR, true);
OutputStream os = fs.create(INPUT_FILE);
Writer wr = new OutputStreamWriter(os);
wr.write("hello1\n");
wr.write("hello2\n");
wr.write("hello3\n");
wr.write("hello4\n");
wr.close();
}
示例7: createFSOutput
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static FSDataOutputStream createFSOutput(Path name, FileSystem fs)
throws IOException {
if (fs.exists(name)) {
fs.delete(name, true);
}
FSDataOutputStream fout = fs.create(name);
return fout;
}
示例8: createDirectory
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/** Creates a new, empty directory at dirPath and always overwrites */
public static void createDirectory(FileSystem fs, Path dirPath) throws IOException {
fs.delete(dirPath, true);
boolean created = fs.mkdirs(dirPath);
if (!created) {
LOG.warn("Could not create directory " + dirPath + " this might cause test failures.");
}
}
示例9: sortListing
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
* Sort sequence file containing FileStatus and Text as key and value respecitvely
*
* @param fs File System
* @param conf Configuration
* @param sourceListing Source listing file
* @return Path of the sorted file. Is source file with _sorted appended to the name
* @throws IOException Any exception during sort.
*/
private static Path sortListing(FileSystem fs, Configuration conf, Path sourceListing) throws IOException {
SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs, Text.class, CopyListingFileStatus.class, conf);
Path output = new Path(sourceListing.toString() + "_sorted");
if (fs.exists(output)) {
fs.delete(output, false);
}
sorter.sort(sourceListing, output);
return output;
}
示例10: delete
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static void delete(FileSystem fs, String path) {
try {
if (fs != null) {
if (path != null) {
fs.delete(new Path(path), true);
}
}
} catch (IOException e) {
LOG.warn("Exception encountered ", e);
}
}
示例11: assertDeleted
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static void assertDeleted(FileSystem fs,
Path file,
boolean recursive) throws IOException {
assertPathExists(fs, "about to be deleted file", file);
boolean deleted = fs.delete(file, recursive);
String dir = ls(fs, file.getParent());
assertTrue("Delete failed on " + file + ": " + dir, deleted);
assertPathDoesNotExist(fs, "Deleted file", file);
}
示例12: testExternalSubdir
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public void testExternalSubdir() throws IOException {
final byte [] DATA = { 1, 2, 3, 4, 5 };
final String FILENAME = "_lob/blobdata";
try {
doExternalTest(DATA, FILENAME);
} finally {
// remove dir we made.
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
String tmpDir = System.getProperty("test.build.data", "/tmp/");
Path lobDir = new Path(new Path(tmpDir), "_lob");
fs.delete(lobDir, true);
}
}
示例13: testFsckError
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/** Test if fsck can return -1 in case of failure
*
* @throws Exception
*/
@Test
public void testFsckError() throws Exception {
MiniDFSCluster cluster = null;
try {
// bring up a one-node cluster
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).build();
String fileName = "/test.txt";
Path filePath = new Path(fileName);
FileSystem fs = cluster.getFileSystem();
// create a one-block file
DFSTestUtil.createFile(fs, filePath, 1L, (short)1, 1L);
DFSTestUtil.waitReplication(fs, filePath, (short)1);
// intentionally corrupt NN data structure
INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode
(fileName, true);
final BlockInfoContiguous[] blocks = node.getBlocks();
assertEquals(blocks.length, 1);
blocks[0].setNumBytes(-1L); // set the block length to be negative
// run fsck and expect a failure with -1 as the error code
String outStr = runFsck(conf, -1, true, fileName);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));
// clean up file system
fs.delete(filePath, true);
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
示例14: deleteMissing
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void deleteMissing(Configuration conf) throws IOException {
LOG.info("-delete option is enabled. About to remove entries from " +
"target that are missing in source");
// Sort the source-file listing alphabetically.
Path sourceListing = new Path(conf.get(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH));
FileSystem clusterFS = sourceListing.getFileSystem(conf);
Path sortedSourceListing = DistCpUtils.sortListing(clusterFS, conf, sourceListing);
// Similarly, create the listing of target-files. Sort alphabetically.
Path targetListing = new Path(sourceListing.getParent(), "targetListing.seq");
CopyListing target = new GlobbedCopyListing(new Configuration(conf), null);
List<Path> targets = new ArrayList<Path>(1);
Path targetFinalPath = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
targets.add(targetFinalPath);
DistCpOptions options = new DistCpOptions(targets, new Path("/NONE"));
//
// Set up options to be the same from the CopyListing.buildListing's perspective,
// so to collect similar listings as when doing the copy
//
options.setOverwrite(overwrite);
options.setSyncFolder(syncFolder);
options.setTargetPathExists(targetPathExists);
target.buildListing(targetListing, options);
Path sortedTargetListing = DistCpUtils.sortListing(clusterFS, conf, targetListing);
long totalLen = clusterFS.getFileStatus(sortedTargetListing).getLen();
SequenceFile.Reader sourceReader = new SequenceFile.Reader(conf,
SequenceFile.Reader.file(sortedSourceListing));
SequenceFile.Reader targetReader = new SequenceFile.Reader(conf,
SequenceFile.Reader.file(sortedTargetListing));
// Walk both source and target file listings.
// Delete all from target that doesn't also exist on source.
long deletedEntries = 0;
try {
CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
Text srcRelPath = new Text();
CopyListingFileStatus trgtFileStatus = new CopyListingFileStatus();
Text trgtRelPath = new Text();
FileSystem targetFS = targetFinalPath.getFileSystem(conf);
boolean srcAvailable = sourceReader.next(srcRelPath, srcFileStatus);
while (targetReader.next(trgtRelPath, trgtFileStatus)) {
// Skip sources that don't exist on target.
while (srcAvailable && trgtRelPath.compareTo(srcRelPath) > 0) {
srcAvailable = sourceReader.next(srcRelPath, srcFileStatus);
}
if (srcAvailable && trgtRelPath.equals(srcRelPath)) continue;
// Target doesn't exist at source. Delete.
boolean result = (!targetFS.exists(trgtFileStatus.getPath()) ||
targetFS.delete(trgtFileStatus.getPath(), true));
if (result) {
LOG.info("Deleted " + trgtFileStatus.getPath() + " - Missing at source");
deletedEntries++;
} else {
throw new IOException("Unable to delete " + trgtFileStatus.getPath());
}
taskAttemptContext.progress();
taskAttemptContext.setStatus("Deleting missing files from target. [" +
targetReader.getPosition() * 100 / totalLen + "%]");
}
} finally {
IOUtils.closeStream(sourceReader);
IOUtils.closeStream(targetReader);
}
LOG.info("Deleted " + deletedEntries + " from target: " + targets.get(0));
}
示例15: slowAppendTestHelper
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void slowAppendTestHelper(long appendTimeout)
throws InterruptedException, IOException, LifecycleException, EventDeliveryException,
IOException {
final String fileName = "FlumeData";
final long rollCount = 5;
final long batchSize = 2;
final int numBatches = 2;
String newPath = testPath + "/singleBucket";
int totalEvents = 0;
int i = 1, j = 1;
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
// create HDFS sink with slow writer
HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
sink = new HDFSEventSink(badWriterFactory);
Context context = new Context();
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);
context.put("hdfs.appendTimeout", String.valueOf(appendTimeout));
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (i = 0; i < numBatches; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
event.getHeaders().put("slow", "1500");
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
totalEvents++;
}
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
sink.stop();
// loop through all the files generated and check their contains
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] fList = FileUtil.stat2Paths(dirStat);
// check that the roll happened correctly for the given data
// Note that we'll end up with two files with only a head
long expectedFiles = totalEvents / rollCount;
if (totalEvents % rollCount > 0) expectedFiles++;
Assert.assertEquals("num files wrong, found: " +
Lists.newArrayList(fList), expectedFiles, fList.length);
verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}