当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.exists方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.exists方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.exists方法的具体用法?Java FileSystem.exists怎么用?Java FileSystem.exists使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.exists方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createJob

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static Job createJob(Configuration conf, Path inDir, Path outDir, 
    int numInputFiles, int numReds, String input) throws IOException {
  Job job = Job.getInstance(conf);
  FileSystem fs = FileSystem.get(conf);
  if (fs.exists(outDir)) {
    fs.delete(outDir, true);
  }
  if (fs.exists(inDir)) {
    fs.delete(inDir, true);
  }
  fs.mkdirs(inDir);
  for (int i = 0; i < numInputFiles; ++i) {
    DataOutputStream file = fs.create(new Path(inDir, "part-" + i));
    file.writeBytes(input);
    file.close();
  }    

  FileInputFormat.setInputPaths(job, inDir);
  FileOutputFormat.setOutputPath(job, outDir);
  job.setNumReduceTasks(numReds);
  return job;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:MapReduceTestUtil.java

示例2: checkDirectoryPermissions

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private boolean checkDirectoryPermissions(FileSystem fs, String targetBase,
                                          FsPermission sourcePerm) throws IOException {
  Path base = new Path(targetBase);

  Stack<Path> stack = new Stack<Path>();
  stack.push(base);
  while (!stack.isEmpty()) {
    Path file = stack.pop();
    if (!fs.exists(file)) continue;
    FileStatus[] fStatus = fs.listStatus(file);
    if (fStatus == null || fStatus.length == 0) continue;

    for (FileStatus status : fStatus) {
      if (status.isDirectory()) {
        stack.push(status.getPath());
        Assert.assertEquals(status.getPermission(), sourcePerm);
      }
    }
  }
  return true;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestCopyCommitter.java

示例3: readKeysToSearch

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
static SortedSet<byte []> readKeysToSearch(final Configuration conf)
throws IOException, InterruptedException {
  Path keysInputDir = new Path(conf.get(SEARCHER_INPUTDIR_KEY));
  FileSystem fs = FileSystem.get(conf);
  SortedSet<byte []> result = new TreeSet<byte []>(Bytes.BYTES_COMPARATOR);
  if (!fs.exists(keysInputDir)) {
    throw new FileNotFoundException(keysInputDir.toString());
  }
  if (!fs.isDirectory(keysInputDir)) {
    throw new UnsupportedOperationException("TODO");
  } else {
    RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(keysInputDir, false);
    while(iterator.hasNext()) {
      LocatedFileStatus keyFileStatus = iterator.next();
      // Skip "_SUCCESS" file.
      if (keyFileStatus.getPath().getName().startsWith("_")) continue;
      result.addAll(readFileToSearch(conf, fs, keyFileStatus));
    }
  }
  return result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:IntegrationTestBigLinkedList.java

示例4: deleteRegionFromFileSystem

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Remove the region from the table directory, archiving the region's hfiles.
 *
 * @param conf       the {@link Configuration} to use
 * @param fs         {@link FileSystem} from which to remove the region
 * @param tableDir   {@link Path} to where the table is being stored
 * @param regionInfo {@link HRegionInfo} for region to be deleted
 * @throws IOException if the request cannot be completed
 */
public static void deleteRegionFromFileSystem(final Configuration conf, final FileSystem fs,
    final Path tableDir, final HRegionInfo regionInfo) throws IOException {
  HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
  Path regionDir = regionFs.getRegionDir();

  if (!fs.exists(regionDir)) {
    LOG.warn("Trying to delete a region that do not exists on disk: " + regionDir);
    return;
  }

  if (LOG.isDebugEnabled()) {
    LOG.debug("DELETING region " + regionDir);
  }

  // Archive region
  Path rootDir = FSUtils.getRootDir(conf);
  HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir);

  // Delete empty region dir
  if (!fs.delete(regionDir, true)) {
    LOG.warn("Failed delete of " + regionDir);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:HRegionFileSystem.java

示例5: checkIfFoldersAreInSync

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static boolean checkIfFoldersAreInSync(FileSystem fs, String targetBase, String sourceBase)
  throws IOException {
  Path base = new Path(targetBase);

  Stack<Path> stack = new Stack<>();
  stack.push(base);
  while (!stack.isEmpty()) {
    Path file = stack.pop();
    if (!fs.exists(file)) {
      continue;
    }
    FileStatus[] fStatus = fs.listStatus(file);
    if (fStatus == null || fStatus.length == 0) {
      continue;
    }

    for (FileStatus status : fStatus) {
      if (status.isDirectory()) {
        stack.push(status.getPath());
      }
      assertTrue(
          fs.exists(new Path(sourceBase + "/" + PathUtil.getRelativePath(new Path(targetBase), status.getPath()))));
    }
  }
  return true;
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:27,代码来源:S3MapReduceCpTestUtils.java

示例6: createContainerLogInLocalDir

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static void createContainerLogInLocalDir(Path appLogsDir,
    ContainerId containerId, FileSystem fs) throws Exception {
  Path containerLogsDir = new Path(appLogsDir, containerId.toString());
  if (fs.exists(containerLogsDir)) {
    fs.delete(containerLogsDir, true);
  }
  assertTrue(fs.mkdirs(containerLogsDir));
  Writer writer =
      new FileWriter(new File(containerLogsDir.toString(), "sysout"));
  writer.write("Hello " + containerId + "!");
  writer.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestLogsCLI.java

示例7: testInitialization

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
static long testInitialization(String id, Configuration conf) 
throws IOException {
  Path testPath = getInitFilePath(id);
  FileSystem fs = FileSystem.getLocal(conf);
  return fs.exists(testPath) 
         ? fs.getFileStatus(testPath).getModificationTime() 
         : 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:TestResourceUsageEmulators.java

示例8: testReplayWorksThoughLotsOfFlushing

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * HBASE-12782 ITBLL fails for me if generator does anything but 5M per maptask.
 * Create a region. Close it. Then copy into place a file to replay, one that is bigger than
 * configured flush size so we bring on lots of flushes.  Then reopen and confirm all edits
 * made it in.
 * @throws IOException
 */
@Test (timeout=60000)
public void testReplayWorksThoughLotsOfFlushing() throws IOException {
  Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
  // Set it so we flush every 1M or so.  Thats a lot.
  conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
  // The file of recovered edits has a column family of 'meta'. Also has an encoded regionname
  // of 4823016d8fca70b25503ee07f4c6d79f which needs to match on replay.
  final String encodedRegionName = "4823016d8fca70b25503ee07f4c6d79f";
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(testName.getMethodName()));
  final String columnFamily = "meta";
  byte [][] columnFamilyAsByteArray = new byte [][] {Bytes.toBytes(columnFamily)};
  htd.addFamily(new HColumnDescriptor(columnFamily));
  HRegionInfo hri = new HRegionInfo(htd.getTableName()) {
    @Override
    public synchronized String getEncodedName() {
      return encodedRegionName;
    }

    // Cache the name because lots of lookups.
    private byte [] encodedRegionNameAsBytes = null;
    @Override
    public synchronized byte[] getEncodedNameAsBytes() {
      if (encodedRegionNameAsBytes == null) {
        this.encodedRegionNameAsBytes = Bytes.toBytes(getEncodedName());
      }
      return this.encodedRegionNameAsBytes;
    }
  };
  Path hbaseRootDir = TEST_UTIL.getDataTestDir();
  FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
  Path tableDir = FSUtils.getTableDir(hbaseRootDir, htd.getTableName());
  HRegionFileSystem hrfs =
      new HRegionFileSystem(TEST_UTIL.getConfiguration(), fs, tableDir, hri);
  if (fs.exists(hrfs.getRegionDir())) {
    LOG.info("Region directory already exists. Deleting.");
    fs.delete(hrfs.getRegionDir(), true);
  }
  HRegion region = HRegion.createHRegion(hri, hbaseRootDir, conf, htd, null);
  assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName());
  List<String> storeFiles = region.getStoreFileList(columnFamilyAsByteArray);
  // There should be no store files.
  assertTrue(storeFiles.isEmpty());
  region.close();
  Path regionDir = region.getRegionDir(hbaseRootDir, hri);
  Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regionDir);
  // This is a little fragile getting this path to a file of 10M of edits.
  Path recoveredEditsFile = new Path(
    System.getProperty("test.build.classes", "target/test-classes"),
      "0000000000000016310");
  // Copy this file under the region's recovered.edits dir so it is replayed on reopen.
  Path destination = new Path(recoveredEditsDir, recoveredEditsFile.getName());
  fs.copyToLocalFile(recoveredEditsFile, destination);
  assertTrue(fs.exists(destination));
  // Now the file 0000000000000016310 is under recovered.edits, reopen the region to replay.
  region = HRegion.openHRegion(region, null);
  assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName());
  storeFiles = region.getStoreFileList(columnFamilyAsByteArray);
  // Our 0000000000000016310 is 10MB. Most of the edits are for one region. Lets assume that if
  // we flush at 1MB, that there are at least 3 flushed files that are there because of the
  // replay of edits.
  assertTrue("Files count=" + storeFiles.size(), storeFiles.size() > 10);
  // Now verify all edits made it into the region.
  int count = verifyAllEditsMadeItIn(fs, conf, recoveredEditsFile, region);
  LOG.info("Checked " + count + " edits made it in");
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:73,代码来源:TestRecoveredEdits.java

示例9: createInput

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private Path createInput() throws IOException {
  Configuration conf = new Configuration();
  FileSystem fs = FileSystem.getLocal(conf);
  Path inputPath = getInputPath();

  // Clear the input directory if it exists, first.
  if (fs.exists(inputPath)) {
    fs.delete(inputPath, true);
  }

  // Create an input file
  createInputFile(inputPath, 0, 10);

  return inputPath;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:TestMapperReducerCleanup.java

示例10: addDependencyJars

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Add the jars containing the given classes to the job's configuration
 * such that JobClient will ship them to the cluster and add them to
 * the DistributedCache.
 */
public static void addDependencyJars(Configuration conf,
    Class<?>... classes) throws IOException {

  FileSystem localFs = FileSystem.getLocal(conf);
  Set<String> jars = new HashSet<String>();
  // Add jars that are already in the tmpjars variable
  jars.addAll(conf.getStringCollection("tmpjars"));

  // add jars as we find them to a map of contents jar name so that we can avoid
  // creating new jars for classes that have already been packaged.
  Map<String, String> packagedClasses = new HashMap<String, String>();

  // Add jars containing the specified classes
  for (Class<?> clazz : classes) {
    if (clazz == null) continue;

    Path path = findOrCreateJar(clazz, localFs, packagedClasses);
    if (path == null) {
      LOG.warn("Could not find jar for class " + clazz +
               " in order to ship it to the cluster.");
      continue;
    }
    if (!localFs.exists(path)) {
      LOG.warn("Could not validate jar file " + path + " for class "
               + clazz);
      continue;
    }
    jars.add(path.toString());
  }
  if (jars.isEmpty()) return;

  conf.set("tmpjars", StringUtils.arrayToString(jars.toArray(new String[jars.size()])));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TableMapReduceUtil.java

示例11: writeGlobalCleanerPidFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * To ensure there are not multiple instances of the SCM running on a given
 * cluster, a global pid file is used. This file contains the hostname of the
 * machine that owns the pid file.
 *
 * @return true if the pid file was written, false otherwise
 * @throws YarnException
 */
private boolean writeGlobalCleanerPidFile() throws YarnException {
  String root =
      conf.get(YarnConfiguration.SHARED_CACHE_ROOT,
          YarnConfiguration.DEFAULT_SHARED_CACHE_ROOT);
  Path pidPath = new Path(root, GLOBAL_CLEANER_PID);
  try {
    FileSystem fs = FileSystem.get(this.conf);

    if (fs.exists(pidPath)) {
      return false;
    }

    FSDataOutputStream os = fs.create(pidPath, false);
    // write the hostname and the process id in the global cleaner pid file
    final String ID = ManagementFactory.getRuntimeMXBean().getName();
    os.writeUTF(ID);
    os.close();
    // add it to the delete-on-exit to ensure it gets deleted when the JVM
    // exits
    fs.deleteOnExit(pidPath);
  } catch (IOException e) {
    throw new YarnException(e);
  }
  LOG.info("Created the global cleaner pid file at " + pidPath.toString());
  return true;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:CleanerService.java

示例12: delete

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * 此方法用于删除文件
 *
 * @param fileSystemInfo
 *            文件系统信息
 * @param path
 *            文件路径
 * @return 删除文件是否成功
 */
public static boolean delete(FileSystemInfo fileSystemInfo, String path) {
	FileSystem fs = getFileSystem(fileSystemInfo);
	Path uri = new Path(path);
	try {
		if (fs.exists(uri)) {
			return fs.delete(uri, true);
		}
	} catch (IOException e) {
		e.printStackTrace();
	} finally {
		closeFileSystem(fs);
	}
	return false;
}
 
开发者ID:zhangjunfang,项目名称:alluxio,代码行数:24,代码来源:HdfsAndAlluxioUtils_update.java

示例13: writeTestDataToFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static void writeTestDataToFile(FileSystem fs) throws IOException {
  OutputStream out = null;
  if (!fs.exists(TEST_PATH)) {
    out = fs.create(TEST_PATH);
  } else {
    out = fs.append(TEST_PATH);
  }
  out.write(PLAIN_TEXT.getBytes());
  out.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestEncryptedTransfer.java

示例14: testRemovesEmptyDirectories

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testRemovesEmptyDirectories() throws Exception {
  Configuration conf = UTIL.getConfiguration();
  // no cleaner policies = delete all files
  conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, "");
  Server server = new DummyServer();
  Path archivedHfileDir = new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);

  // setup the cleaner
  FileSystem fs = UTIL.getDFSCluster().getFileSystem();
  HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);

  // make all the directories for archiving files
  Path table = new Path(archivedHfileDir, "table");
  Path region = new Path(table, "regionsomthing");
  Path family = new Path(region, "fam");
  Path file = new Path(family, "file12345");
  fs.mkdirs(family);
  if (!fs.exists(family)) throw new RuntimeException("Couldn't create test family:" + family);
  fs.create(file).close();
  if (!fs.exists(file)) throw new RuntimeException("Test file didn't get created:" + file);

  // run the chore to cleanup the files (and the directories above it)
  cleaner.chore();

  // make sure all the parent directories get removed
  assertFalse("family directory not removed for empty directory", fs.exists(family));
  assertFalse("region directory not removed for empty directory", fs.exists(region));
  assertFalse("table directory not removed for empty directory", fs.exists(table));
  assertTrue("archive directory", fs.exists(archivedHfileDir));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:32,代码来源:TestHFileCleaner.java

示例15: run

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * The main driver for <code>LoadTypedBytes</code>.
 */
public int run(String[] args) throws Exception {
  if (args.length == 0) {
    System.err.println("Too few arguments!");
    printUsage();
    return 1;
  }
  Path path = new Path(args[0]);
  FileSystem fs = path.getFileSystem(getConf());
  if (fs.exists(path)) {
    System.err.println("given path exists already!");
    return -1;
  }
  TypedBytesInput tbinput = new TypedBytesInput(new DataInputStream(System.in));
  SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, path,
    TypedBytesWritable.class, TypedBytesWritable.class);
  try {
    TypedBytesWritable key = new TypedBytesWritable();
    TypedBytesWritable value = new TypedBytesWritable();
    byte[] rawKey = tbinput.readRaw();
    while (rawKey != null) {
      byte[] rawValue = tbinput.readRaw();
      key.set(rawKey, 0, rawKey.length);
      value.set(rawValue, 0, rawValue.length);
      writer.append(key, value);
      rawKey = tbinput.readRaw();
    }
  } finally {
    writer.close();
  }
  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:LoadTypedBytes.java


注:本文中的org.apache.hadoop.fs.FileSystem.exists方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。