当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.deleteOnExit方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.deleteOnExit方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.deleteOnExit方法的具体用法?Java FileSystem.deleteOnExit怎么用?Java FileSystem.deleteOnExit使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.deleteOnExit方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: cleanUp

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@AfterClass
public static void cleanUp() {
	System.gc();
	Configuration configuration = new Configuration();
	FileSystem fileSystem = null;

	try {
		fileSystem = FileSystem.get(configuration);
		Path deletingFilePath = new Path("testData/MetaData/");
		if (!fileSystem.exists(deletingFilePath)) {
			throw new PathNotFoundException(deletingFilePath.toString());
		} else {

			boolean isDeleted = fileSystem.delete(deletingFilePath, true);
			if (isDeleted) {
				fileSystem.deleteOnExit(deletingFilePath);
			}
		}
		fileSystem.close();
	} catch (IOException e) {
		e.printStackTrace();
	}
}
 
开发者ID:capitalone,项目名称:Hydrograph,代码行数:24,代码来源:LingualSchemaCreatorTest.java

示例2: registerApplicationMaster

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Override
public ApplicationMasterRegisterResponse registerApplicationMaster(
    ApplicationMasterRegisterRequest request) throws IOException {
  String amHost = request.getHost();
  int amRpcPort = request.getPort();
  String trackingUrl = request.getTrackingUrl();
  
  int jobid = appAttemptId.getApplicationId().getId();

  String jobStatusFileName = jobid + "__" + amRpcPort + "__" + amHost + "__"
      + URLEncoder.encode(trackingUrl, HPCConfiguration.CHAR_ENCODING);
  String jobStatusLocation = conf.get(
      YARN_APPLICATION_HPC_PBS_JOB_STATUS_FILES_LOCATION,
      DEFAULT_YARN_APPLICATION_HPC_PBS_JOB_STATUS_FILES_LOCATION);
  FileSystem fileSystem = FileSystem.get(conf);
  Path statusFile = new Path(jobStatusLocation, jobStatusFileName);
  fileSystem.createNewFile(statusFile);
  fileSystem.deleteOnExit(statusFile);

  ApplicationMasterRegisterResponse response = new ApplicationMasterRegisterResponse();
  response.setMaxCapability(getMaxCapability());
  response.setQueue("default");
  return response;
}
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:25,代码来源:PBSApplicationMaster.java

示例3: configurePartitioner

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Configure <code>job</code> with a TotalOrderPartitioner, partitioning against
 * <code>splitPoints</code>. Cleans up the partitions file after job exists.
 */
static void configurePartitioner(Job job, List<ImmutableBytesWritable> splitPoints)
    throws IOException {
  Configuration conf = job.getConfiguration();
  // create the partitions file
  FileSystem fs = FileSystem.get(conf);
  String hbaseTmpFsDir =
      conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY,
        HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY);
  Path partitionsPath = new Path(hbaseTmpFsDir, "partitions_" + UUID.randomUUID());
  fs.makeQualified(partitionsPath);
  writePartitions(conf, partitionsPath, splitPoints);
  fs.deleteOnExit(partitionsPath);

  // configure job to use it
  job.setPartitionerClass(TotalOrderPartitioner.class);
  TotalOrderPartitioner.setPartitionFile(conf, partitionsPath);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:HFileOutputFormat2.java

示例4: deleteFromHdfs

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**从HDFS上删除文件*/
public static void deleteFromHdfs(String fileName) throws IOException {
    String dst = NodeConfig.HDFS_PATH + fileName;
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(URI.create(dst), conf);
    fs.deleteOnExit(new Path(dst));
    fs.close();
}
 
开发者ID:cuiods,项目名称:WIFIProbe,代码行数:9,代码来源:HDFSTool.java

示例5: writeGlobalCleanerPidFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * To ensure there are not multiple instances of the SCM running on a given
 * cluster, a global pid file is used. This file contains the hostname of the
 * machine that owns the pid file.
 *
 * @return true if the pid file was written, false otherwise
 * @throws YarnException
 */
private boolean writeGlobalCleanerPidFile() throws YarnException {
  String root =
      conf.get(YarnConfiguration.SHARED_CACHE_ROOT,
          YarnConfiguration.DEFAULT_SHARED_CACHE_ROOT);
  Path pidPath = new Path(root, GLOBAL_CLEANER_PID);
  try {
    FileSystem fs = FileSystem.get(this.conf);

    if (fs.exists(pidPath)) {
      return false;
    }

    FSDataOutputStream os = fs.create(pidPath, false);
    // write the hostname and the process id in the global cleaner pid file
    final String ID = ManagementFactory.getRuntimeMXBean().getName();
    os.writeUTF(ID);
    os.close();
    // add it to the delete-on-exit to ensure it gets deleted when the JVM
    // exits
    fs.deleteOnExit(pidPath);
  } catch (IOException e) {
    throw new YarnException(e);
  }
  LOG.info("Created the global cleaner pid file at " + pidPath.toString());
  return true;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:CleanerService.java

示例6: createFileSystem

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public FileSystem createFileSystem() throws IOException {
  FileSystem fs = FileSystemWrapper.get(location, fsConf);
  fs.mkdirs(new Path(location.getPath()), DEFAULT_PERMISSIONS);
  fs.mkdirs(stagingDir, DEFAULT_PERMISSIONS);
  fs.mkdirs(uploadsDir, DEFAULT_PERMISSIONS);
  fs.deleteOnExit(stagingDir);
  return fs;
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:9,代码来源:HomeFileConfig.java

示例7: getTestDir

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public Path getTestDir(String testName, String subdir) throws IOException {
  Path testDir = util.getDataTestDirOnTestFS(testName);
  FileSystem fs = FileSystem.get(getConf());
  fs.deleteOnExit(testDir);

  return new Path(new Path(testDir, testName), subdir);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:IntegrationTestLoadAndVerify.java

示例8: testDeleteOnExit

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Test deleteOnExit
 */
@Test
public void testDeleteOnExit() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  FileSystem localfs = FileSystem.getLocal(conf);

  try {

    // Creates files in HDFS and local file system.
    //
    Path file1 = new Path("filestatus.dat");
    Path file2 = new Path("filestatus2.dat");
    Path file3 = new Path("filestatus3.dat");
    FSDataOutputStream stm1 = createFile(fs, file1, 1);
    FSDataOutputStream stm2 = createFile(fs, file2, 1);
    FSDataOutputStream stm3 = createFile(localfs, file3, 1);
    System.out.println("DeleteOnExit: Created files.");

    // write to files and close. Purposely, do not close file2.
    writeFile(stm1);
    writeFile(stm3);
    stm1.close();
    stm2.close();
    stm3.close();

    // set delete on exit flag on files.
    fs.deleteOnExit(file1);
    fs.deleteOnExit(file2);
    localfs.deleteOnExit(file3);

    // close the file system. This should make the above files
    // disappear.
    fs.close();
    localfs.close();
    fs = null;
    localfs = null;

    // reopen file system and verify that file does not exist.
    fs = cluster.getFileSystem();
    localfs = FileSystem.getLocal(conf);

    assertTrue(file1 + " still exists inspite of deletOnExit set.",
               !fs.exists(file1));
    assertTrue(file2 + " still exists inspite of deletOnExit set.",
               !fs.exists(file2));
    assertTrue(file3 + " still exists inspite of deletOnExit set.",
               !localfs.exists(file3));
    System.out.println("DeleteOnExit successful.");

  } finally {
    IOUtils.closeStream(fs);
    IOUtils.closeStream(localfs);
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:63,代码来源:TestFileCreation.java

示例9: writeDistCacheFilesList

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Write the list of distributed cache files in the decreasing order of
 * file sizes into the sequence file. This file will be input to the job
 * {@link GenerateDistCacheData}.
 * Also validates if -generate option is missing and distributed cache files
 * are missing.
 * @return exit code
 * @throws IOException
 */
private int writeDistCacheFilesList()
    throws IOException {
  // Sort the distributed cache files in the decreasing order of file sizes.
  List dcFiles = new ArrayList(distCacheFiles.entrySet());
  Collections.sort(dcFiles, new Comparator() {
    public int compare(Object dc1, Object dc2) {
      return ((Comparable) ((Map.Entry) (dc2)).getValue())
          .compareTo(((Map.Entry) (dc1)).getValue());
    }
  });

  // write the sorted distributed cache files to the sequence file
  FileSystem fs = FileSystem.get(conf);
  Path distCacheFilesList = new Path(distCachePath, "_distCacheFiles.txt");
  conf.set(GenerateDistCacheData.GRIDMIX_DISTCACHE_FILE_LIST,
      distCacheFilesList.toString());
  SequenceFile.Writer src_writer = SequenceFile.createWriter(fs, conf,
      distCacheFilesList, LongWritable.class, BytesWritable.class,
      SequenceFile.CompressionType.NONE);

  // Total number of unique distributed cache files
  int fileCount = dcFiles.size();
  long byteCount = 0;// Total size of all distributed cache files
  long bytesSync = 0;// Bytes after previous sync;used to add sync marker

  for (Iterator it = dcFiles.iterator(); it.hasNext();) {
    Map.Entry entry = (Map.Entry)it.next();
    LongWritable fileSize =
        new LongWritable(Long.parseLong(entry.getValue().toString()));
    BytesWritable filePath =
        new BytesWritable(
        entry.getKey().toString().getBytes(charsetUTF8));

    byteCount += fileSize.get();
    bytesSync += fileSize.get();
    if (bytesSync > AVG_BYTES_PER_MAP) {
      src_writer.sync();
      bytesSync = fileSize.get();
    }
    src_writer.append(fileSize, filePath);
  }
  if (src_writer != null) {
    src_writer.close();
  }
  // Set delete on exit for 'dist cache files list' as it is not needed later.
  fs.deleteOnExit(distCacheFilesList);

  conf.setInt(GenerateDistCacheData.GRIDMIX_DISTCACHE_FILE_COUNT, fileCount);
  conf.setLong(GenerateDistCacheData.GRIDMIX_DISTCACHE_BYTE_COUNT, byteCount);
  LOG.info("Number of HDFS based distributed cache files to be generated is "
      + fileCount + ". Total size of HDFS based distributed cache files "
      + "to be generated is " + byteCount);

  if (!shouldGenerateDistCacheData() && fileCount > 0) {
    LOG.error("Missing " + fileCount + " distributed cache files under the "
        + " directory\n" + distCachePath + "\nthat are needed for gridmix"
        + " to emulate distributed cache load. Either use -generate\noption"
        + " to generate distributed cache data along with input data OR "
        + "disable\ndistributed cache emulation by configuring '"
        + DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE
        + "' to false.");
    return Gridmix.MISSING_DIST_CACHE_FILES_ERROR;
  }
  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:75,代码来源:DistributedCacheEmulator.java


注:本文中的org.apache.hadoop.fs.FileSystem.deleteOnExit方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。