当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.delete方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.delete方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.delete方法的具体用法?Java FileSystem.delete怎么用?Java FileSystem.delete使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.delete方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: initAndStartStore

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void initAndStartStore(final FileSystem fs) throws IOException,
    URISyntaxException {
  Configuration conf = new Configuration();
  fs.initialize(new URI("/"), conf);
  fsWorkingPath =
      new Path("target",
        TestFileSystemApplicationHistoryStore.class.getSimpleName());
  fs.delete(fsWorkingPath, true);
  conf.set(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI,
    fsWorkingPath.toString());
  store = new FileSystemApplicationHistoryStore() {
    @Override
    protected FileSystem getFileSystem(Path path, Configuration conf) {
      return fs;
    }
  };
  store.init(conf);
  store.start();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestFileSystemApplicationHistoryStore.java

示例2: testTaskEnv

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Test to test if the user set env variables reflect in the child
 * processes. Mainly
 *   - x=y (x can be a already existing env variable or a new variable)
 *   - x=$x:y (replace $x with the current value of x)
 */
@Test
public void testTaskEnv(){
  try {
    JobConf conf = new JobConf(mr.getConfig());
    // initialize input, output directories
    Path inDir = new Path("testing/wc/input1");
    Path outDir = new Path("testing/wc/output1");
    FileSystem outFs = outDir.getFileSystem(conf);
    runTestTaskEnv(conf, inDir, outDir, false);
    outFs.delete(outDir, true);
  } catch(Exception e) {
    e.printStackTrace();
    fail("Exception in testing child env");
    tearDown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestMiniMRChildTask.java

示例3: configure

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public void configure(String keySpec, int expect) throws Exception {
  Path testdir = new Path(TEST_DIR.getAbsolutePath());
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = getFileSystem();
  fs.delete(testdir, true);
  conf.setInputFormat(TextInputFormat.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(LongWritable.class);

  conf.setNumMapTasks(1);
  conf.setNumReduceTasks(1);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
  conf.setKeyFieldComparatorOptions(keySpec);
  conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
  conf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
  conf.setMapperClass(InverseMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  if (!fs.mkdirs(testdir)) {
    throw new IOException("Mkdirs failed to create " + testdir.toString());
  }
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  // set up input data in 2 files 
  Path inFile = new Path(inDir, "part0");
  FileOutputStream fos = new FileOutputStream(inFile.toString());
  fos.write((line1 + "\n").getBytes());
  fos.write((line2 + "\n").getBytes());
  fos.close();
  JobClient jc = new JobClient(conf);
  RunningJob r_job = jc.submitJob(conf);
  while (!r_job.isComplete()) {
    Thread.sleep(1000);
  }
  
  if (!r_job.isSuccessful()) {
    fail("Oops! The job broke due to an unexpected error");
  }
  Path[] outputFiles = FileUtil.stat2Paths(
      getFileSystem().listStatus(outDir,
      new Utils.OutputFileUtils.OutputFilesFilter()));
  if (outputFiles.length > 0) {
    InputStream is = getFileSystem().open(outputFiles[0]);
    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
    String line = reader.readLine();
    //make sure we get what we expect as the first line, and also
    //that we have two lines
    if (expect == 1) {
      assertTrue(line.startsWith(line1));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line2));
    }
    line = reader.readLine();
    if (expect == 1) {
      assertTrue(line.startsWith(line2));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line1));
    }
    reader.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:67,代码来源:TestKeyFieldBasedComparator.java

示例4: cleanUpPartialOutputForTask

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Override
public void cleanUpPartialOutputForTask(TaskAttemptContext context)
    throws IOException {

  // we double check this is never invoked from a non-preemptable subclass.
  // This should never happen, since the invoking codes is checking it too,
  // but it is safer to double check. Errors handling this would produce
  // inconsistent output.

  if (!this.getClass().isAnnotationPresent(Checkpointable.class)) {
    throw new IllegalStateException("Invoking cleanUpPartialOutputForTask() " +
        "from non @Preemptable class");
  }
  FileSystem fs =
    fsFor(getTaskAttemptPath(context), context.getConfiguration());

  LOG.info("cleanUpPartialOutputForTask: removing everything belonging to " +
      context.getTaskAttemptID().getTaskID() + " in: " +
      getCommittedTaskPath(context).getParent());

  final TaskAttemptID taid = context.getTaskAttemptID();
  final TaskID tid = taid.getTaskID();
  Path pCommit = getCommittedTaskPath(context).getParent();
  // remove any committed output
  for (int i = 0; i < taid.getId(); ++i) {
    TaskAttemptID oldId = new TaskAttemptID(tid, i);
    Path pTask = new Path(pCommit, oldId.toString());
    if (fs.exists(pTask) && !fs.delete(pTask, true)) {
      throw new IOException("Failed to delete " + pTask);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:PartialFileOutputCommitter.java

示例5: cleanUp

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@AfterClass
public static void cleanUp() {
	System.gc();
	Configuration configuration = new Configuration();
	FileSystem fileSystem = null;

	try {
		fileSystem = FileSystem.get(configuration);
		Path deletingFilePath = new Path("testData/MetaData/");
		if (!fileSystem.exists(deletingFilePath)) {
			throw new PathNotFoundException(deletingFilePath.toString());
		} else {

			boolean isDeleted = fileSystem.delete(deletingFilePath, true);
			if (isDeleted) {
				fileSystem.deleteOnExit(deletingFilePath);
			}
		}
		fileSystem.close();
	} catch (IOException e) {
		e.printStackTrace();
	}
}
 
开发者ID:capitalone,项目名称:Hydrograph,代码行数:24,代码来源:LingualSchemaCreatorTest.java

示例6: cleanAndCreateInput

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void cleanAndCreateInput(FileSystem fs) throws IOException {
  fs.delete(INPUT_DIR, true);
  fs.delete(OUTPUT_DIR, true);

  OutputStream os = fs.create(INPUT_FILE);

  Writer wr = new OutputStreamWriter(os);
  wr.write("hello1\n");
  wr.write("hello2\n");
  wr.write("hello3\n");
  wr.write("hello4\n");
  wr.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestUserDefinedCounters.java

示例7: createFSOutput

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static FSDataOutputStream createFSOutput(Path name, FileSystem fs)
  throws IOException {
  if (fs.exists(name)) {
    fs.delete(name, true);
  }
  FSDataOutputStream fout = fs.create(name);
  return fout;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:TestHFileSeek.java

示例8: createDirectory

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/** Creates a new, empty directory at dirPath and always overwrites */
public static void createDirectory(FileSystem fs, Path dirPath) throws IOException {
  fs.delete(dirPath, true);
  boolean created = fs.mkdirs(dirPath);
  if (!created) {
    LOG.warn("Could not create directory " + dirPath + " this might cause test failures.");
  }
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:9,代码来源:S3MapReduceCpTestUtils.java

示例9: sortListing

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Sort sequence file containing FileStatus and Text as key and value respecitvely
 *
 * @param fs File System
 * @param conf Configuration
 * @param sourceListing Source listing file
 * @return Path of the sorted file. Is source file with _sorted appended to the name
 * @throws IOException Any exception during sort.
 */
private static Path sortListing(FileSystem fs, Configuration conf, Path sourceListing) throws IOException {
  SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs, Text.class, CopyListingFileStatus.class, conf);
  Path output = new Path(sourceListing.toString() + "_sorted");

  if (fs.exists(output)) {
    fs.delete(output, false);
  }

  sorter.sort(sourceListing, output);
  return output;
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:21,代码来源:CopyListing.java

示例10: delete

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static void delete(FileSystem fs, String path) {
  try {
    if (fs != null) {
      if (path != null) {
        fs.delete(new Path(path), true);
      }
    }
  } catch (IOException e) {
    LOG.warn("Exception encountered ", e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TestDistCpUtils.java

示例11: assertDeleted

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static void assertDeleted(FileSystem fs,
                                 Path file,
                                 boolean recursive) throws IOException {
  assertPathExists(fs, "about to be deleted file", file);
  boolean deleted = fs.delete(file, recursive);
  String dir = ls(fs, file.getParent());
  assertTrue("Delete failed on " + file + ": " + dir, deleted);
  assertPathDoesNotExist(fs, "Deleted file", file);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:SwiftTestUtils.java

示例12: testExternalSubdir

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public void testExternalSubdir() throws IOException {
  final byte [] DATA = { 1, 2, 3, 4, 5 };
  final String FILENAME = "_lob/blobdata";

  try {
    doExternalTest(DATA, FILENAME);
  } finally {
    // remove dir we made.
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.getLocal(conf);
    String tmpDir = System.getProperty("test.build.data", "/tmp/");
    Path lobDir = new Path(new Path(tmpDir), "_lob");
    fs.delete(lobDir, true);
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:16,代码来源:TestBlobRef.java

示例13: testFsckError

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/** Test if fsck can return -1 in case of failure
 * 
 * @throws Exception
 */
@Test
public void testFsckError() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    // bring up a one-node cluster
    Configuration conf = new HdfsConfiguration();
    cluster = new MiniDFSCluster.Builder(conf).build();
    String fileName = "/test.txt";
    Path filePath = new Path(fileName);
    FileSystem fs = cluster.getFileSystem();
    
    // create a one-block file
    DFSTestUtil.createFile(fs, filePath, 1L, (short)1, 1L);
    DFSTestUtil.waitReplication(fs, filePath, (short)1);
    
    // intentionally corrupt NN data structure
    INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode
        (fileName, true);
    final BlockInfoContiguous[] blocks = node.getBlocks();
    assertEquals(blocks.length, 1);
    blocks[0].setNumBytes(-1L);  // set the block length to be negative
    
    // run fsck and expect a failure with -1 as the error code
    String outStr = runFsck(conf, -1, true, fileName);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));
    
    // clean up file system
    fs.delete(filePath, true);
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:TestFsck.java

示例14: deleteMissing

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void deleteMissing(Configuration conf) throws IOException {
  LOG.info("-delete option is enabled. About to remove entries from " +
      "target that are missing in source");

  // Sort the source-file listing alphabetically.
  Path sourceListing = new Path(conf.get(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH));
  FileSystem clusterFS = sourceListing.getFileSystem(conf);
  Path sortedSourceListing = DistCpUtils.sortListing(clusterFS, conf, sourceListing);

  // Similarly, create the listing of target-files. Sort alphabetically.
  Path targetListing = new Path(sourceListing.getParent(), "targetListing.seq");
  CopyListing target = new GlobbedCopyListing(new Configuration(conf), null);

  List<Path> targets = new ArrayList<Path>(1);
  Path targetFinalPath = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
  targets.add(targetFinalPath);
  DistCpOptions options = new DistCpOptions(targets, new Path("/NONE"));
  //
  // Set up options to be the same from the CopyListing.buildListing's perspective,
  // so to collect similar listings as when doing the copy
  //
  options.setOverwrite(overwrite);
  options.setSyncFolder(syncFolder);
  options.setTargetPathExists(targetPathExists);
  
  target.buildListing(targetListing, options);
  Path sortedTargetListing = DistCpUtils.sortListing(clusterFS, conf, targetListing);
  long totalLen = clusterFS.getFileStatus(sortedTargetListing).getLen();

  SequenceFile.Reader sourceReader = new SequenceFile.Reader(conf,
                               SequenceFile.Reader.file(sortedSourceListing));
  SequenceFile.Reader targetReader = new SequenceFile.Reader(conf,
                               SequenceFile.Reader.file(sortedTargetListing));

  // Walk both source and target file listings.
  // Delete all from target that doesn't also exist on source.
  long deletedEntries = 0;
  try {
    CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
    Text srcRelPath = new Text();
    CopyListingFileStatus trgtFileStatus = new CopyListingFileStatus();
    Text trgtRelPath = new Text();

    FileSystem targetFS = targetFinalPath.getFileSystem(conf);
    boolean srcAvailable = sourceReader.next(srcRelPath, srcFileStatus);
    while (targetReader.next(trgtRelPath, trgtFileStatus)) {
      // Skip sources that don't exist on target.
      while (srcAvailable && trgtRelPath.compareTo(srcRelPath) > 0) {
        srcAvailable = sourceReader.next(srcRelPath, srcFileStatus);
      }

      if (srcAvailable && trgtRelPath.equals(srcRelPath)) continue;

      // Target doesn't exist at source. Delete.
      boolean result = (!targetFS.exists(trgtFileStatus.getPath()) ||
          targetFS.delete(trgtFileStatus.getPath(), true));
      if (result) {
        LOG.info("Deleted " + trgtFileStatus.getPath() + " - Missing at source");
        deletedEntries++;
      } else {
        throw new IOException("Unable to delete " + trgtFileStatus.getPath());
      }
      taskAttemptContext.progress();
      taskAttemptContext.setStatus("Deleting missing files from target. [" +
          targetReader.getPosition() * 100 / totalLen + "%]");
    }
  } finally {
    IOUtils.closeStream(sourceReader);
    IOUtils.closeStream(targetReader);
  }
  LOG.info("Deleted " + deletedEntries + " from target: " + targets.get(0));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:73,代码来源:CopyCommitter.java

示例15: slowAppendTestHelper

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void slowAppendTestHelper(long appendTimeout)
    throws InterruptedException, IOException, LifecycleException, EventDeliveryException,
           IOException {
  final String fileName = "FlumeData";
  final long rollCount = 5;
  final long batchSize = 2;
  final int numBatches = 2;
  String newPath = testPath + "/singleBucket";
  int totalEvents = 0;
  int i = 1, j = 1;

  // clear the test directory
  Configuration conf = new Configuration();
  FileSystem fs = FileSystem.get(conf);
  Path dirPath = new Path(newPath);
  fs.delete(dirPath, true);
  fs.mkdirs(dirPath);

  // create HDFS sink with slow writer
  HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
  sink = new HDFSEventSink(badWriterFactory);

  Context context = new Context();
  context.put("hdfs.path", newPath);
  context.put("hdfs.filePrefix", fileName);
  context.put("hdfs.rollCount", String.valueOf(rollCount));
  context.put("hdfs.batchSize", String.valueOf(batchSize));
  context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);
  context.put("hdfs.appendTimeout", String.valueOf(appendTimeout));
  Configurables.configure(sink, context);

  Channel channel = new MemoryChannel();
  Configurables.configure(channel, context);

  sink.setChannel(channel);
  sink.start();

  Calendar eventDate = Calendar.getInstance();
  List<String> bodies = Lists.newArrayList();
  // push the event batches into channel
  for (i = 0; i < numBatches; i++) {
    Transaction txn = channel.getTransaction();
    txn.begin();
    for (j = 1; j <= batchSize; j++) {
      Event event = new SimpleEvent();
      eventDate.clear();
      eventDate.set(2011, i, i, i, 0); // yy mm dd
      event.getHeaders().put("timestamp",
          String.valueOf(eventDate.getTimeInMillis()));
      event.getHeaders().put("hostname", "Host" + i);
      event.getHeaders().put("slow", "1500");
      String body = "Test." + i + "." + j;
      event.setBody(body.getBytes());
      bodies.add(body);
      channel.put(event);
      totalEvents++;
    }
    txn.commit();
    txn.close();

    // execute sink to process the events
    sink.process();
  }

  sink.stop();

  // loop through all the files generated and check their contains
  FileStatus[] dirStat = fs.listStatus(dirPath);
  Path[] fList = FileUtil.stat2Paths(dirStat);

  // check that the roll happened correctly for the given data
  // Note that we'll end up with two files with only a head
  long expectedFiles = totalEvents / rollCount;
  if (totalEvents % rollCount > 0) expectedFiles++;
  Assert.assertEquals("num files wrong, found: " +
      Lists.newArrayList(fList), expectedFiles, fList.length);
  verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:79,代码来源:TestHDFSEventSink.java


注:本文中的org.apache.hadoop.fs.FileSystem.delete方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。