当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.listStatus方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.listStatus方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.listStatus方法的具体用法?Java FileSystem.listStatus怎么用?Java FileSystem.listStatus使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.listStatus方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: validateMapFileOutputContent

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void validateMapFileOutputContent(
    FileSystem fs, Path dir) throws IOException {
  // map output is a directory with index and data files
  Path expectedMapDir = new Path(dir, partFile);
  assert(fs.getFileStatus(expectedMapDir).isDirectory());    
  FileStatus[] files = fs.listStatus(expectedMapDir);
  int fileCount = 0;
  boolean dataFileFound = false; 
  boolean indexFileFound = false; 
  for (FileStatus f : files) {
    if (f.isFile()) {
      ++fileCount;
      if (f.getPath().getName().equals(MapFile.INDEX_FILE_NAME)) {
        indexFileFound = true;
      }
      else if (f.getPath().getName().equals(MapFile.DATA_FILE_NAME)) {
        dataFileFound = true;
      }
    }
  }
  assert(fileCount > 0);
  assert(dataFileFound && indexFileFound);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestFileOutputCommitter.java

示例2: ParquetMetadataStat

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 *
 * @param nameNode the hostname of hdfs namenode
 * @param hdfsPort the port of hdfs namenode, usually 9000 or 8020
 * @param dirPath the path of the directory which contains the parquet files, begin with /, for gen /msra/column/order/parquet/
 * @throws IOException
 * @throws MetadataException
 */
public ParquetMetadataStat(String nameNode, int hdfsPort, String dirPath) throws IOException, MetadataException
{
    Configuration conf = new Configuration();
    FileSystem fileSystem = FileSystem.get(URI.create("hdfs://" + nameNode + ":" + hdfsPort), conf);
    Path hdfsDirPath = new Path(dirPath);
    if (! fileSystem.isFile(hdfsDirPath))
    {
        FileStatus[] fileStatuses = fileSystem.listStatus(hdfsDirPath);
        for (FileStatus status : fileStatuses)
        {
            // compatibility for HDFS 1.x
            if (! status.isDir())
            {
                //System.out.println(status.getPath().toString());
                this.fileMetaDataList.add(new ParquetFileMetadata(conf, status.getPath()));
            }
        }
    }
    if (this.fileMetaDataList.size() == 0)
    {
        throw new MetadataException("fileMetaDataList is empty, path is not a dir.");
    }
    this.fields = this.fileMetaDataList.get(0).getFileMetaData().getSchema().getFields();
    this.columnCount = this.fileMetaDataList.get(0).getFileMetaData().getSchema().getFieldCount();
}
 
开发者ID:dbiir,项目名称:rainbow,代码行数:34,代码来源:ParquetMetadataStat.java

示例3: migrateFsTableDescriptors

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Migrates all snapshots, user tables and system tables that require migration.
 * First migrates snapshots.
 * Then migrates each user table in order,
 * then attempts ROOT (should be gone)
 * Migrates hbase:meta last to indicate migration is complete.
 */
private static void migrateFsTableDescriptors(FileSystem fs, Path rootDir) throws IOException {
  // First migrate snapshots - will migrate any snapshot dir that contains a table info file
  Path snapshotsDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
  if (fs.exists(snapshotsDir)) {
    LOG.info("Migrating snapshots");
    FileStatus[] snapshots = fs.listStatus(snapshotsDir,
        new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
    for (FileStatus snapshot : snapshots) {
      migrateTable(fs, snapshot.getPath());
    }
  }
  
  LOG.info("Migrating user tables");
  List<Path> userTableDirs = FSUtils.getTableDirs(fs, rootDir);
  for (Path userTableDir : userTableDirs) {
    migrateTable(fs, userTableDir);
  }
  
  LOG.info("Migrating system tables");
  // migrate meta last because that's what we check to see if migration is complete
  migrateTableIfExists(fs, rootDir, TableName.META_TABLE_NAME);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:FSTableDescriptorMigrationToSubdir.java

示例4: listStatus

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * 此方法用于获取文件信息
 *
 * @param fileSystemInfo
 *            文件系统信息
 * @param path
 *            文件路径
 * @return 文件是否存在
 */
public static List<FileStatus> listStatus(FileSystemInfo fileSystemInfo, String path) {
	List<FileStatus> info = new ArrayList<FileStatus>();
	FileSystem fs = getFileSystem(fileSystemInfo);
	Path uri = new Path(path);
	try {
		FileStatus[] list = fs.listStatus(uri);
		for (FileStatus f : list) {
			info.add(f);
		}
	} catch (IOException e) {
		e.printStackTrace();
	} finally {
		closeFileSystem(fs);
	}
	return info;
}
 
开发者ID:zhangjunfang,项目名称:alluxio,代码行数:26,代码来源:HdfsAndAlluxioUtils_update.java

示例5: assertListStatusFinds

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Assert that a FileSystem.listStatus on a dir finds the subdir/child entry
 * @param fs filesystem
 * @param dir directory to scan
 * @param subdir full path to look for
 * @throws IOException IO probles
 */
public static void assertListStatusFinds(FileSystem fs,
                                         Path dir,
                                         Path subdir) throws IOException {
  FileStatus[] stats = fs.listStatus(dir);
  boolean found = false;
  StringBuilder builder = new StringBuilder();
  for (FileStatus stat : stats) {
    builder.append(stat.toString()).append('\n');
    if (stat.getPath().equals(subdir)) {
      found = true;
    }
  }
  assertTrue("Path " + subdir
                    + " not found in directory " + dir + ":" + builder,
                    found);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:SwiftTestUtils.java

示例6: changePermissions

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void changePermissions(FileSystem fs, final Path path)
    throws IOException, InterruptedException {
  File f = new File(path.toUri());
  if (FileUtils.isSymlink(f)) {
    // avoid following symlinks when changing permissions
    return;
  }
  boolean isDir = f.isDirectory();
  FsPermission perm = cachePerms;
  // set public perms as 755 or 555 based on dir or file
  if (resource.getVisibility() == LocalResourceVisibility.PUBLIC) {
    perm = isDir ? PUBLIC_DIR_PERMS : PUBLIC_FILE_PERMS;
  }
  // set private perms as 700 or 500
  else {
    // PRIVATE:
    // APPLICATION:
    perm = isDir ? PRIVATE_DIR_PERMS : PRIVATE_FILE_PERMS;
  }
  LOG.debug("Changing permissions for path " + path + " to perm " + perm);
  final FsPermission fPerm = perm;
  if (null == userUgi) {
    files.setPermission(path, perm);
  } else {
    userUgi.doAs(new PrivilegedExceptionAction<Void>() {
      public Void run() throws Exception {
        files.setPermission(path, fPerm);
        return null;
      }
    });
  }
  if (isDir) {
    FileStatus[] statuses = fs.listStatus(path);
    for (FileStatus status : statuses) {
      changePermissions(fs, status.getPath());
    }
  }
}
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:39,代码来源:FSDownload.java

示例7: listRecursive

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private List<FileStatus> listRecursive(FileSystem fs, FileStatus status) throws IOException
{
    List<FileStatus> statusList = Lists.newArrayList();
    if (status.isDirectory()) {
        FileStatus[] entries = fs.listStatus(status.getPath(), HiddenFileFilter.INSTANCE);
        for (FileStatus entry : entries) {
            statusList.addAll(listRecursive(fs, entry));
        }
    }
    else {
        statusList.add(status);
    }
    return statusList;
}
 
开发者ID:CyberAgent,项目名称:embulk-input-parquet_hadoop,代码行数:15,代码来源:ParquetHadoopInputPlugin.java

示例8: setPermission

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Set path permission.
 */
private void setPermission(final FileSystem fs, final Path path, final short filesMode,
    final boolean recursive) throws IOException {
  if (filesMode > 0) {
    FsPermission perm = new FsPermission(filesMode);
    if (recursive && fs.isDirectory(path)) {
      for (FileStatus child : fs.listStatus(path)) {
        setPermission(fs, child.getPath(), filesMode, recursive);
      }
    }
    fs.setPermission(path, perm);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:ExportSnapshot.java

示例9: testWritingPEData

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Run small MR job.
 */
@Test
public void testWritingPEData() throws Exception {
  Configuration conf = util.getConfiguration();
  Path testDir = util.getDataTestDirOnTestFS("testWritingPEData");
  FileSystem fs = testDir.getFileSystem(conf);

  // Set down this value or we OOME in eclipse.
  conf.setInt("mapreduce.task.io.sort.mb", 20);
  // Write a few files.
  conf.setLong(HConstants.HREGION_MAX_FILESIZE, 64 * 1024);

  Job job = new Job(conf, "testWritingPEData");
  setupRandomGeneratorMapper(job);
  // This partitioner doesn't work well for number keys but using it anyways
  // just to demonstrate how to configure it.
  byte[] startKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT];
  byte[] endKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT];

  Arrays.fill(startKey, (byte)0);
  Arrays.fill(endKey, (byte)0xff);

  job.setPartitionerClass(SimpleTotalOrderPartitioner.class);
  // Set start and end rows for partitioner.
  SimpleTotalOrderPartitioner.setStartKey(job.getConfiguration(), startKey);
  SimpleTotalOrderPartitioner.setEndKey(job.getConfiguration(), endKey);
  job.setReducerClass(KeyValueSortReducer.class);
  job.setOutputFormatClass(HFileOutputFormat.class);
  job.setNumReduceTasks(4);
  job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"),
      MutationSerialization.class.getName(), ResultSerialization.class.getName(),
      KeyValueSerialization.class.getName());

  FileOutputFormat.setOutputPath(job, testDir);
  assertTrue(job.waitForCompletion(false));
  FileStatus [] files = fs.listStatus(testDir);
  assertTrue(files.length > 0);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:41,代码来源:TestHFileOutputFormat.java

示例10: listStatus

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Calls fs.listStatus() and treats FileNotFoundException as non-fatal
 * This accommodates differences between hadoop versions, where hadoop 1
 * does not throw a FileNotFoundException, and return an empty FileStatus[]
 * while Hadoop 2 will throw FileNotFoundException.
 *
 * @param fs file system
 * @param dir directory
 * @param filter path filter
 * @return null if dir is empty or doesn't exist, otherwise FileStatus array
 */
public static FileStatus [] listStatus(final FileSystem fs,
    final Path dir, final PathFilter filter) throws IOException {
  FileStatus [] status = null;
  try {
    status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
  } catch (FileNotFoundException fnfe) {
    // if directory doesn't exist, return null
    if (LOG.isTraceEnabled()) {
      LOG.trace(dir + " doesn't exist");
    }
  }
  if (status == null || status.length < 1) return null;
  return status;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:FSUtils.java

示例11: getFooters

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static List<Footer> getFooters(final Configuration conf, List<FileStatus> statuses, int parallelism) throws IOException {
  final List<TimedRunnable<Footer>> readers = Lists.newArrayList();
  List<Footer> foundFooters = Lists.newArrayList();
  for(FileStatus status : statuses){


    if(status.isDirectory()){
      // first we check for summary file.
      FileSystem fs = status.getPath().getFileSystem(conf);

      final Path summaryPath = new Path(status.getPath(), ParquetFileWriter.PARQUET_METADATA_FILE);
      if (fs.exists(summaryPath)){
        FileStatus summaryStatus = fs.getFileStatus(summaryPath);
        foundFooters.addAll(ParquetFileReader.readSummaryFile(conf, summaryStatus));
        continue;
      }

      // else we handle as normal file.
      for(FileStatus inStatus : fs.listStatus(status.getPath(), new DrillPathFilter())){
        readers.add(new FooterReader(conf, inStatus));
      }
    }else{
      readers.add(new FooterReader(conf, status));
    }

  }
  if(!readers.isEmpty()){
    foundFooters.addAll(TimedRunnable.run("Fetch Parquet Footers", logger, readers, parallelism));
  }

  return foundFooters;
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:33,代码来源:FooterGatherer.java

示例12: verifyPermsRecursively

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void verifyPermsRecursively(FileSystem fs,
    FileContext files, Path p,
    LocalResourceVisibility vis) throws IOException {
  FileStatus status = files.getFileStatus(p);
  if (status.isDirectory()) {
    if (vis == LocalResourceVisibility.PUBLIC) {
      Assert.assertTrue(status.getPermission().toShort() ==
        FSDownload.PUBLIC_DIR_PERMS.toShort());
    }
    else {
      Assert.assertTrue(status.getPermission().toShort() ==
        FSDownload.PRIVATE_DIR_PERMS.toShort());
    }
    if (!status.isSymlink()) {
      FileStatus[] statuses = fs.listStatus(p);
      for (FileStatus stat : statuses) {
        verifyPermsRecursively(fs, files, stat.getPath(), vis);
      }
    }
  }
  else {
    if (vis == LocalResourceVisibility.PUBLIC) {
      Assert.assertTrue(status.getPermission().toShort() ==
        FSDownload.PUBLIC_FILE_PERMS.toShort());
    }
    else {
      Assert.assertTrue(status.getPermission().toShort() ==
        FSDownload.PRIVATE_FILE_PERMS.toShort());
    }
  }      
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestFSDownload.java

示例13: testTopUsers

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test(timeout=120000)
@SuppressWarnings("unchecked")
public void testTopUsers() throws Exception {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanNameFsns = new ObjectName(
        "Hadoop:service=NameNode,name=FSNamesystemState");
    FileSystem fs = cluster.getFileSystem();
    final Path path = new Path("/");
    final int NUM_OPS = 10;
    for (int i=0; i< NUM_OPS; i++) {
      fs.listStatus(path);
      fs.setTimes(path, 0, 1);
    }
    String topUsers =
        (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
    ObjectMapper mapper = new ObjectMapper();
    Map<String, Object> map = mapper.readValue(topUsers, Map.class);
    assertTrue("Could not find map key timestamp", 
        map.containsKey("timestamp"));
    assertTrue("Could not find map key windows", map.containsKey("windows"));
    List<Map<String, List<Map<String, Object>>>> windows =
        (List<Map<String, List<Map<String, Object>>>>) map.get("windows");
    assertEquals("Unexpected num windows", 3, windows.size());
    for (Map<String, List<Map<String, Object>>> window : windows) {
      final List<Map<String, Object>> ops = window.get("ops");
      assertEquals("Unexpected num ops", 3, ops.size());
      for (Map<String, Object> op: ops) {
        final long count = Long.parseLong(op.get("totalCount").toString());
        final String opType = op.get("opType").toString();
        final int expected;
        if (opType.equals(TopConf.ALL_CMDS)) {
          expected = 2*NUM_OPS;
        } else {
          expected = NUM_OPS;
        }
        assertEquals("Unexpected total count", expected, count);
      }
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:TestNameNodeMXBean.java

示例14: testRandomCompressedTextDataGenerator

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Test {@link RandomTextDataMapper} via {@link CompressionEmulationUtil}.
 */
@Test
public void testRandomCompressedTextDataGenerator() throws Exception {
  int wordSize = 10;
  int listSize = 20;
  long dataSize = 10*1024*1024;
  
  Configuration conf = new Configuration();
  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf, true);
  
  // configure the RandomTextDataGenerator to generate desired sized data
  conf.setInt(RandomTextDataGenerator.GRIDMIX_DATAGEN_RANDOMTEXT_LISTSIZE, 
              listSize);
  conf.setInt(RandomTextDataGenerator.GRIDMIX_DATAGEN_RANDOMTEXT_WORDSIZE, 
              wordSize);
  conf.setLong(GenerateData.GRIDMIX_GEN_BYTES, dataSize);
  conf.set("mapreduce.job.hdfs-servers", "");
  
  FileSystem lfs = FileSystem.getLocal(conf);
  
  // define the test's root temp directory
  Path rootTempDir =
      new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(
          lfs.getUri(), lfs.getWorkingDirectory());

  Path tempDir = new Path(rootTempDir, "TestRandomCompressedTextDataGenr");
  lfs.delete(tempDir, true);
  
  runDataGenJob(conf, tempDir);
  
  // validate the output data
  FileStatus[] files = 
    lfs.listStatus(tempDir, new Utils.OutputFileUtils.OutputFilesFilter());
  long size = 0;
  long maxLineSize = 0;
  
  for (FileStatus status : files) {
    InputStream in = 
      CompressionEmulationUtil
        .getPossiblyDecompressedInputStream(status.getPath(), conf, 0);
    BufferedReader reader = new BufferedReader(new InputStreamReader(in));
    String line = reader.readLine();
    if (line != null) {
      long lineSize = line.getBytes().length;
      if (lineSize > maxLineSize) {
        maxLineSize = lineSize;
      }
      while (line != null) {
        for (String word : line.split("\\s")) {
          size += word.getBytes().length;
        }
        line = reader.readLine();
      }
    }
    reader.close();
  }

  assertTrue(size >= dataSize);
  assertTrue(size <= dataSize + maxLineSize);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:64,代码来源:TestCompressionEmulationUtils.java

示例15: list

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
void list(FileSystem fs, String name) throws IOException {
  FileSystem.LOG.info("\n\n" + name);
  for(FileStatus s : fs.listStatus(dir)) {
    FileSystem.LOG.info("" + s.getPath());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:7,代码来源:TestDFSRemove.java


注:本文中的org.apache.hadoop.fs.FileSystem.listStatus方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。