当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.createNewFile方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.createNewFile方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.createNewFile方法的具体用法?Java FileSystem.createNewFile怎么用?Java FileSystem.createNewFile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.createNewFile方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: configureTestSimple

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static List<Path> configureTestSimple(Configuration conf, FileSystem localFs)
    throws IOException {
  Path base1 = new Path(TEST_ROOT_DIR, "input1");
  Path base2 = new Path(TEST_ROOT_DIR, "input2");
  conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
      localFs.makeQualified(base1) + "," + localFs.makeQualified(base2));
  localFs.mkdirs(base1);
  localFs.mkdirs(base2);

  Path in1File1 = new Path(base1, "file1");
  Path in1File2 = new Path(base1, "file2");
  localFs.createNewFile(in1File1);
  localFs.createNewFile(in1File2);

  Path in2File1 = new Path(base2, "file1");
  Path in2File2 = new Path(base2, "file2");
  localFs.createNewFile(in2File1);
  localFs.createNewFile(in2File2);
  List<Path> expectedPaths = Lists.newArrayList(in1File1, in1File2, in2File1,
      in2File2);
  return expectedPaths;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestFileInputFormat.java

示例2: configureTestErrorOnNonExistantDir

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static List<Path> configureTestErrorOnNonExistantDir(Configuration conf,
    FileSystem localFs) throws IOException {
  Path base1 = new Path(TEST_ROOT_DIR, "input1");
  Path base2 = new Path(TEST_ROOT_DIR, "input2");
  conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
      localFs.makeQualified(base1) + "," + localFs.makeQualified(base2));
  conf.setBoolean(
      org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_RECURSIVE,
      true);
  localFs.mkdirs(base1);

  Path inFile1 = new Path(base1, "file1");
  Path inFile2 = new Path(base1, "file2");

  localFs.createNewFile(inFile1);
  localFs.createNewFile(inFile2);

  List<Path> expectedPaths = Lists.newArrayList();
  return expectedPaths;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestFileInputFormat.java

示例3: testContainerChecksWithSas

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testContainerChecksWithSas() throws Exception {
  testAccount = AzureBlobStorageTestAccount.create("",
      EnumSet.of(CreateOptions.UseSas));
  assumeNotNull(testAccount);
  CloudBlobContainer container = testAccount.getRealContainer();
  FileSystem fs = testAccount.getFileSystem();

  // The container shouldn't be there
  assertFalse(container.exists());

  // A write should just fail
  try {
    fs.createNewFile(new Path("/foo"));
    assertFalse("Should've thrown.", true);
  } catch (AzureException ex) {
  }
  assertFalse(container.exists());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestContainerChecks.java

示例4: registerApplicationMaster

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Override
public ApplicationMasterRegisterResponse registerApplicationMaster(
    ApplicationMasterRegisterRequest request) throws IOException {
  String amHost = request.getHost();
  int amRpcPort = request.getPort();
  String trackingUrl = request.getTrackingUrl();
  
  int jobid = appAttemptId.getApplicationId().getId();

  String jobStatusFileName = jobid + "__" + amRpcPort + "__" + amHost + "__"
      + URLEncoder.encode(trackingUrl, HPCConfiguration.CHAR_ENCODING);
  String jobStatusLocation = conf.get(
      YARN_APPLICATION_HPC_PBS_JOB_STATUS_FILES_LOCATION,
      DEFAULT_YARN_APPLICATION_HPC_PBS_JOB_STATUS_FILES_LOCATION);
  FileSystem fileSystem = FileSystem.get(conf);
  Path statusFile = new Path(jobStatusLocation, jobStatusFileName);
  fileSystem.createNewFile(statusFile);
  fileSystem.deleteOnExit(statusFile);

  ApplicationMasterRegisterResponse response = new ApplicationMasterRegisterResponse();
  response.setMaxCapability(getMaxCapability());
  response.setQueue("default");
  return response;
}
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:25,代码来源:PBSApplicationMaster.java

示例5: testTTLCleaner

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testTTLCleaner() throws IOException, InterruptedException {
  FileSystem fs = UTIL.getDFSCluster().getFileSystem();
  Path root = UTIL.getDataTestDirOnTestFS();
  Path file = new Path(root, "file");
  fs.createNewFile(file);
  long createTime = System.currentTimeMillis();
  assertTrue("Test file not created!", fs.exists(file));
  TimeToLiveHFileCleaner cleaner = new TimeToLiveHFileCleaner();
  // update the time info for the file, so the cleaner removes it
  fs.setTimes(file, createTime - 100, -1);
  Configuration conf = UTIL.getConfiguration();
  conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, 100);
  cleaner.setConf(conf);
  assertTrue("File not set deletable - check mod time:" + getFileStats(file, fs)
      + " with create time:" + createTime, cleaner.isFileDeletable(fs.getFileStatus(file)));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TestHFileCleaner.java

示例6: stop

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Override
public void stop(CoprocessorEnvironment env) throws IOException {
  String fileName = null;

  if (env instanceof MasterCoprocessorEnvironment) {
    // if running on HMaster
    fileName = MASTER_FILE;
  } else if (env instanceof RegionServerCoprocessorEnvironment) {
    fileName = REGIONSERVER_FILE;
  } else if (env instanceof RegionCoprocessorEnvironment) {
    LOG.error("on RegionCoprocessorEnvironment!!");
  }

  Configuration conf = UTIL.getConfiguration();
  Path resultFile = new Path(UTIL.getDataTestDirOnTestFS(), fileName);
  FileSystem fs = FileSystem.get(conf);

  boolean result = fs.createNewFile(resultFile);
  LOG.info("create file " + resultFile + " return rc " + result);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:TestCoprocessorStop.java

示例7: createNewFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * 此方法用于创建新文件
 * <p>
 * alluxio是否能穿透到hdfs要根据它的配置情况而定
 *
 * @param fileSystemInfo
 *            文件系统信息
 * @param path
 *            文件路径
 * @return 创建新文件是否成功
 */
public static boolean createNewFile(FileSystemInfo fileSystemInfo, String path) {
	FileSystem fs = getFileSystem(fileSystemInfo);
	Path uri = new Path(path);
	try {
		if (!fs.exists(uri)) {
			return fs.createNewFile(uri);
		}
	} catch (IOException e) {
		e.printStackTrace();
	} finally {
		closeFileSystem(fs);
	}
	return false;
}
 
开发者ID:zhangjunfang,项目名称:alluxio,代码行数:26,代码来源:HdfsAndAlluxioUtils_update.java

示例8: configureTestNestedRecursive

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static List<Path> configureTestNestedRecursive(Configuration conf,
    FileSystem localFs) throws IOException {
  Path base1 = new Path(TEST_ROOT_DIR, "input1");
  conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
      localFs.makeQualified(base1).toString());
  conf.setBoolean(
      org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_RECURSIVE,
      true);
  localFs.mkdirs(base1);

  Path inDir1 = new Path(base1, "dir1");
  Path inDir2 = new Path(base1, "dir2");
  Path inFile1 = new Path(base1, "file1");

  Path dir1File1 = new Path(inDir1, "file1");
  Path dir1File2 = new Path(inDir1, "file2");

  Path dir2File1 = new Path(inDir2, "file1");
  Path dir2File2 = new Path(inDir2, "file2");

  localFs.mkdirs(inDir1);
  localFs.mkdirs(inDir2);

  localFs.createNewFile(inFile1);
  localFs.createNewFile(dir1File1);
  localFs.createNewFile(dir1File2);
  localFs.createNewFile(dir2File1);
  localFs.createNewFile(dir2File2);

  List<Path> expectedPaths = Lists.newArrayList(inFile1, dir1File1,
      dir1File2, dir2File1, dir2File2);
  return expectedPaths;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestFileInputFormat.java

示例9: configureTestNestedNonRecursive

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static List<Path> configureTestNestedNonRecursive(Configuration conf,
    FileSystem localFs) throws IOException {
  Path base1 = new Path(TEST_ROOT_DIR, "input1");
  conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
      localFs.makeQualified(base1).toString());
  conf.setBoolean(
      org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_RECURSIVE,
      false);
  localFs.mkdirs(base1);

  Path inDir1 = new Path(base1, "dir1");
  Path inDir2 = new Path(base1, "dir2");
  Path inFile1 = new Path(base1, "file1");

  Path dir1File1 = new Path(inDir1, "file1");
  Path dir1File2 = new Path(inDir1, "file2");

  Path dir2File1 = new Path(inDir2, "file1");
  Path dir2File2 = new Path(inDir2, "file2");

  localFs.mkdirs(inDir1);
  localFs.mkdirs(inDir2);

  localFs.createNewFile(inFile1);
  localFs.createNewFile(dir1File1);
  localFs.createNewFile(dir1File2);
  localFs.createNewFile(dir2File1);
  localFs.createNewFile(dir2File2);

  List<Path> expectedPaths = Lists.newArrayList(inFile1, inDir1, inDir2);
  return expectedPaths;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:TestFileInputFormat.java

示例10: markCorrupted

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static void markCorrupted(Path rootdir, String logFileName,
    FileSystem fs) {
  Path file = new Path(getSplitLogDir(rootdir, logFileName), "corrupt");
  try {
    fs.createNewFile(file);
  } catch (IOException e) {
    LOG.warn("Could not flag a log file as corrupted. Failed to create " +
        file, e);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:ZKSplitLog.java

示例11: testFindsSnapshotFilesWhenCleaning

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testFindsSnapshotFilesWhenCleaning() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
  Path rootDir = FSUtils.getRootDir(conf);
  Path archivedHfileDir = new Path(TEST_UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);

  FileSystem fs = FileSystem.get(conf);
  SnapshotHFileCleaner cleaner = new SnapshotHFileCleaner();
  cleaner.setConf(conf);

  // write an hfile to the snapshot directory
  String snapshotName = "snapshot";
  byte[] snapshot = Bytes.toBytes(snapshotName);
  TableName tableName = TableName.valueOf("table");
  Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
  HRegionInfo mockRegion = new HRegionInfo(tableName);
  Path regionSnapshotDir = new Path(snapshotDir, mockRegion.getEncodedName());
  Path familyDir = new Path(regionSnapshotDir, "family");
  // create a reference to a supposedly valid hfile
  String hfile = "fd1e73e8a96c486090c5cec07b4894c4";
  Path refFile = new Path(familyDir, hfile);

  // make sure the reference file exists
  fs.create(refFile);

  // create the hfile in the archive
  fs.mkdirs(archivedHfileDir);
  fs.createNewFile(new Path(archivedHfileDir, hfile));

  // make sure that the file isn't deletable
  assertFalse(cleaner.isFileDeletable(fs.getFileStatus(refFile)));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:TestSnapshotHFileCleaner.java

示例12: testTempAndCommit

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testTempAndCommit() throws IOException {
  Path rootDir = TEST_UTIL.getDataTestDirOnTestFS("testTempAndCommit");
  FileSystem fs = TEST_UTIL.getTestFileSystem();
  Configuration conf = TEST_UTIL.getConfiguration();

  // Create a Region
  String familyName = "cf";
  HRegionInfo hri = new HRegionInfo(TableName.valueOf("TestTable"));
  HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, rootDir, hri);

  // New region, no store files
  Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(familyName);
  assertEquals(0, storeFiles != null ? storeFiles.size() : 0);

  // Create a new file in temp (no files in the family)
  Path buildPath = regionFs.createTempName();
  fs.createNewFile(buildPath);
  storeFiles = regionFs.getStoreFiles(familyName);
  assertEquals(0, storeFiles != null ? storeFiles.size() : 0);

  // commit the file
  Path dstPath = regionFs.commitStoreFile(familyName, buildPath);
  storeFiles = regionFs.getStoreFiles(familyName);
  assertEquals(0, storeFiles != null ? storeFiles.size() : 0);
  assertFalse(fs.exists(buildPath));

  fs.delete(rootDir, true);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:TestHRegionFileSystem.java

示例13: setUp

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Override
public void setUp() throws Exception {
  // setup config values necessary for store
  this.conf = TEST_UTIL.getConfiguration();
  this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0);
  this.conf.setInt("hbase.hstore.compaction.min", minFiles);
  this.conf.setInt("hbase.hstore.compaction.max", maxFiles);
  this.conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, minSize);
  this.conf.setLong("hbase.hstore.compaction.max.size", maxSize);
  this.conf.setFloat("hbase.hstore.compaction.ratio", 1.0F);

  //Setting up a Store
  final String id = TestDefaultCompactSelection.class.getName();
  Path basedir = new Path(DIR);
  final Path logdir = new Path(basedir, DefaultWALProvider.getWALDirectoryName(id));
  HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("family"));
  FileSystem fs = FileSystem.get(conf);

  fs.delete(logdir, true);

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("table")));
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);

  final Configuration walConf = new Configuration(conf);
  FSUtils.setRootDir(walConf, basedir);
  wals = new WALFactory(walConf, null, id);
  region = HRegion.createHRegion(info, basedir, conf, htd);
  HRegion.closeHRegion(region);
  Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
  region = new HRegion(tableDir, wals.getWAL(info.getEncodedNameAsBytes()), fs, conf, info, htd,
      null);

  store = new HStore(region, hcd, conf);

  TEST_FILE = region.getRegionFileSystem().createTempName();
  fs.createNewFile(TEST_FILE);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TestDefaultCompactSelection.java

示例14: testLogCleaning

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 10000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationQueues repQueues =
      ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server);
  repQueues.init(server.getServerName().toString());
  final Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  final FileSystem fs = FileSystem.get(conf);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      repQueues.addLog(fakeMachineName, fileName.getName());
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);

  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  TEST_UTIL.waitFor(1000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return 5 == fs.listStatus(oldLogDir).length;
    }
  });

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:76,代码来源:TestLogsCleaner.java

示例15: testHFileLinkCleaning

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testHFileLinkCleaning() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
  conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, HFileLinkCleaner.class.getName());
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = FileSystem.get(conf);

  final TableName tableName = TableName.valueOf("test-table");
  final TableName tableLinkName = TableName.valueOf("test-link");
  final String hfileName = "1234567890";
  final String familyName = "cf";

  HRegionInfo hri = new HRegionInfo(tableName);
  HRegionInfo hriLink = new HRegionInfo(tableLinkName);

  Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
  Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
        tableName, hri.getEncodedName(), familyName);
  Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
        tableLinkName, hriLink.getEncodedName(), familyName);

  // Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
  Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);
  fs.mkdirs(familyPath);
  Path hfilePath = new Path(familyPath, hfileName);
  fs.createNewFile(hfilePath);

  // Create link to hfile
  Path familyLinkPath = getFamilyDirPath(rootDir, tableLinkName,
                                      hriLink.getEncodedName(), familyName);
  fs.mkdirs(familyLinkPath);
  HFileLink.create(conf, fs, familyLinkPath, hri, hfileName);
  Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName);
  assertTrue(fs.exists(linkBackRefDir));
  FileStatus[] backRefs = fs.listStatus(linkBackRefDir);
  assertEquals(1, backRefs.length);
  Path linkBackRef = backRefs[0].getPath();

  // Initialize cleaner
  final long ttl = 1000;
  conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
  Server server = new DummyServer();
  HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archiveDir);

  // Link backref cannot be removed
  cleaner.chore();
  assertTrue(fs.exists(linkBackRef));
  assertTrue(fs.exists(hfilePath));

  // Link backref can be removed
  fs.rename(FSUtils.getTableDir(rootDir, tableLinkName),
      FSUtils.getTableDir(archiveDir, tableLinkName));
  cleaner.chore();
  assertFalse("Link should be deleted", fs.exists(linkBackRef));

  // HFile can be removed
  Thread.sleep(ttl * 2);
  cleaner.chore();
  assertFalse("HFile should be deleted", fs.exists(hfilePath));

  // Remove everything
  for (int i = 0; i < 4; ++i) {
    Thread.sleep(ttl * 2);
    cleaner.chore();
  }
  assertFalse("HFile should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableName)));
  assertFalse("Link should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableLinkName)));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:70,代码来源:TestHFileLinkCleaner.java


注:本文中的org.apache.hadoop.fs.FileSystem.createNewFile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。