当前位置: 首页>>代码示例>>Java>>正文


Java FileStatus.isDir方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileStatus.isDir方法的典型用法代码示例。如果您正苦于以下问题:Java FileStatus.isDir方法的具体用法?Java FileStatus.isDir怎么用?Java FileStatus.isDir使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileStatus的用法示例。


在下文中一共展示了FileStatus.isDir方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: ParquetMetadataStat

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
 *
 * @param nameNode the hostname of hdfs namenode
 * @param hdfsPort the port of hdfs namenode, usually 9000 or 8020
 * @param dirPath the path of the directory which contains the parquet files, begin with /, for gen /msra/column/order/parquet/
 * @throws IOException
 * @throws MetadataException
 */
public ParquetMetadataStat(String nameNode, int hdfsPort, String dirPath) throws IOException, MetadataException
{
    Configuration conf = new Configuration();
    FileSystem fileSystem = FileSystem.get(URI.create("hdfs://" + nameNode + ":" + hdfsPort), conf);
    Path hdfsDirPath = new Path(dirPath);
    if (! fileSystem.isFile(hdfsDirPath))
    {
        FileStatus[] fileStatuses = fileSystem.listStatus(hdfsDirPath);
        for (FileStatus status : fileStatuses)
        {
            // compatibility for HDFS 1.x
            if (! status.isDir())
            {
                //System.out.println(status.getPath().toString());
                this.fileMetaDataList.add(new ParquetFileMetadata(conf, status.getPath()));
            }
        }
    }
    if (this.fileMetaDataList.size() == 0)
    {
        throw new MetadataException("fileMetaDataList is empty, path is not a dir.");
    }
    this.fields = this.fileMetaDataList.get(0).getFileMetaData().getSchema().getFields();
    this.columnCount = this.fileMetaDataList.get(0).getFileMetaData().getSchema().getFieldCount();
}
 
开发者ID:dbiir,项目名称:rainbow,代码行数:34,代码来源:ParquetMetadataStat.java

示例2: listFiles

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/** @return FileStatus for data files only. */
private FileStatus[] listFiles(FileSystem fs, Path path) throws IOException {
  FileStatus[] fileStatuses = fs.listStatus(path);
  ArrayList files = new ArrayList();
  Pattern patt = Pattern.compile("part.*-([0-9][0-9][0-9][0-9][0-9]).*");
  for (FileStatus fstat : fileStatuses) {
    String fname = fstat.getPath().getName();
    if (!fstat.isDir()) {
      Matcher mat = patt.matcher(fname);
      if (mat.matches()) {
        files.add(fstat);
      }
    }
  }
  return (FileStatus[]) files.toArray(new FileStatus[files.size()]);
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:17,代码来源:TestAppendUtils.java

示例3: ensureEmptyWriteDir

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/** Create the directory where we'll write our test files to; and
 * make sure it has no files in it.
 */
private void ensureEmptyWriteDir() throws IOException {
  FileSystem fs = FileSystem.getLocal(getConf());
  Path writeDir = getWritePath();

  fs.mkdirs(writeDir);

  FileStatus [] stats = fs.listStatus(writeDir);

  for (FileStatus stat : stats) {
    if (stat.isDir()) {
      fail("setUp(): Write directory " + writeDir
          + " contains subdirectories");
    }

    LOG.debug("setUp(): Removing " + stat.getPath());
    if (!fs.delete(stat.getPath(), false)) {
      fail("setUp(): Could not delete residual file " + stat.getPath());
    }
  }

  if (!fs.exists(writeDir)) {
    fail("setUp: Could not create " + writeDir);
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:28,代码来源:TestSplittableBufferedWriter.java

示例4: getFileBlockLocations

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@Override
public BlockLocation[] getFileBlockLocations(
    FileStatus stat, long start, long len) throws IOException {
  if (stat.isDir()) {
    return null;
  }
  System.out.println("File " + stat.getPath());
  String name = stat.getPath().toUri().getPath();
  BlockLocation[] locs =
    super.getFileBlockLocations(stat, start, len);
  if (name.equals(fileWithMissingBlocks)) {
    System.out.println("Returning missing blocks for " + fileWithMissingBlocks);
    locs[0] = new HdfsBlockLocation(new BlockLocation(new String[0],
        new String[0], locs[0].getOffset(), locs[0].getLength()), null);
  }
  return locs;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestCombineFileInputFormat.java

示例5: getStoreFiles

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
 * Returns all files belonging to the given region directory. Could return an
 * empty list.
 *
 * @param fs  The file system reference.
 * @param regionDir  The region directory to scan.
 * @return The list of files found.
 * @throws IOException When scanning the files fails.
 */
static List<Path> getStoreFiles(FileSystem fs, Path regionDir)
throws IOException {
  List<Path> res = new ArrayList<Path>();
  PathFilter dirFilter = new FSUtils.DirFilter(fs);
  FileStatus[] familyDirs = fs.listStatus(regionDir, dirFilter);
  for(FileStatus dir : familyDirs) {
    FileStatus[] files = fs.listStatus(dir.getPath());
    for (FileStatus file : files) {
      if (!file.isDir()) {
        res.add(file.getPath());
      }
    }
  }
  return res;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:IndexFile.java

示例6: create

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
public static DotDrillFile create(DrillFileSystem fs, FileStatus status){
  for(DotDrillType d : DotDrillType.values()){
    if(!status.isDir() && d.matches(status)){
      return new DotDrillFile(fs, status, d);
    }
  }
  return null;
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:9,代码来源:DotDrillFile.java

示例7: addRecursiveStatus

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
private void addRecursiveStatus(FileStatus parent, List<FileStatus> listToFill) throws IOException {
  if (parent.isDir()) {
    Path pattern = new Path(parent.getPath(), "*");
    FileStatus[] sub = underlyingFs.globStatus(pattern, new DrillPathFilter());
    for(FileStatus s : sub){
      if (s.isDir()) {
        addRecursiveStatus(s, listToFill);
      } else {
        listToFill.add(s);
      }
    }
  } else {
    listToFill.add(parent);
  }
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:16,代码来源:DrillFileSystem.java

示例8: getNextPartition

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
 * Returns the greatest partition number available for appending, for data
 * files in targetDir.
 */
private int getNextPartition(FileSystem fs, Path targetDir)
    throws IOException {

  int nextPartition = 0;
  FileStatus[] existingFiles = fs.listStatus(targetDir);
  if (existingFiles != null && existingFiles.length > 0) {
    for (FileStatus fileStat : existingFiles) {
      if (!fileStat.isDir()) {
        String filename = fileStat.getPath().getName();
        Matcher mat = DATA_PART_PATTERN.matcher(filename);
        if (mat.matches()) {
          int thisPart = Integer.parseInt(mat.group(1));
          if (thisPart >= nextPartition) {
            nextPartition = thisPart;
            nextPartition++;
          }
        }
      }
    }
  }

  if (nextPartition > 0) {
    LOG.info("Using found partition " + nextPartition);
  }

  return nextPartition;
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:32,代码来源:AppendUtils.java

示例9: getPlan

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@Override
public PhysicalPlan getPlan(SqlNode sqlNode) throws ValidationException, RelConversionException, IOException {

  SqlIdentifier from = ((SqlShowFiles) sqlNode).getDb();

  DrillFileSystem fs = null;
  String defaultLocation = null;
  String fromDir = "./";

  SchemaPlus defaultSchema = context.getNewDefaultSchema();
  SchemaPlus drillSchema = defaultSchema;

  // Show files can be used without from clause, in which case we display the files in the default schema
  if (from != null) {
    // We are not sure if the full from clause is just the schema or includes table name,
    // first try to see if the full path specified is a schema
    drillSchema = SchemaUtilites.findSchema(defaultSchema, from.names);
    if (drillSchema == null) {
      // Entire from clause is not a schema, try to obtain the schema without the last part of the specified clause.
      drillSchema = SchemaUtilites.findSchema(defaultSchema, from.names.subList(0, from.names.size() - 1));
      fromDir = fromDir + from.names.get((from.names.size() - 1));
    }

    if (drillSchema == null) {
      throw UserException.validationError()
          .message("Invalid FROM/IN clause [%s]", from.toString())
          .build(logger);
    }
  }

  WorkspaceSchema wsSchema;
  try {
     wsSchema = (WorkspaceSchema) drillSchema.unwrap(AbstractSchema.class).getDefaultSchema();
  } catch (ClassCastException e) {
    throw UserException.validationError()
        .message("SHOW FILES is supported in workspace type schema only. Schema [%s] is not a workspace schema.",
            SchemaUtilites.getSchemaPath(drillSchema))
        .build(logger);
  }

  // Get the file system object
  fs = wsSchema.getFS();

  // Get the default path
  defaultLocation = wsSchema.getDefaultLocation();

  List<ShowFilesCommandResult> rows = new ArrayList<>();

  for (FileStatus fileStatus : fs.list(false, new Path(defaultLocation, fromDir))) {
    ShowFilesCommandResult result = new ShowFilesCommandResult(fileStatus.getPath().getName(), fileStatus.isDir(),
                                                               !fileStatus.isDir(), fileStatus.getLen(),
                                                               fileStatus.getOwner(), fileStatus.getGroup(),
                                                               fileStatus.getPermission().toString(),
                                                               fileStatus.getAccessTime(), fileStatus.getModificationTime());
    rows.add(result);
  }
  return DirectPlan.createDirectPlan(context.getCurrentEndpoint(), rows.iterator(), ShowFilesCommandResult.class);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:59,代码来源:ShowFileHandler.java

示例10: getFileType

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
 * @return the type of the file represented by p (or the files in p, if a
 * directory)
 */
public static FileType getFileType(Configuration conf, Path p)
    throws IOException {
  FileSystem fs = p.getFileSystem(conf);

  try {
    FileStatus stat = fs.getFileStatus(p);

    if (null == stat) {
      // Couldn't get the item.
      LOG.warn("Input path " + p + " does not exist");
      return FileType.UNKNOWN;
    }

    if (stat.isDir()) {
      FileStatus [] subitems = fs.listStatus(p);
      if (subitems == null || subitems.length == 0) {
        LOG.warn("Input path " + p + " contains no files");
        return FileType.UNKNOWN; // empty dir.
      }

      // Pick a child entry to examine instead.
      boolean foundChild = false;
      for (int i = 0; i < subitems.length; i++) {
        stat = subitems[i];
        if (!stat.isDir() && !stat.getPath().getName().startsWith("_")) {
          foundChild = true;
          break; // This item is a visible file. Check it.
        }
      }

      if (!foundChild) {
        stat = null; // Couldn't find a reasonable candidate.
      }
    }

    if (null == stat) {
      LOG.warn("null FileStatus object in isSequenceFiles(); "
          + "assuming false.");
      return FileType.UNKNOWN;
    }

    Path target = stat.getPath();
    return fromMagicNumber(target, conf);
  } catch (FileNotFoundException fnfe) {
    LOG.warn("Input path " + p + " does not exist");
    return FileType.UNKNOWN; // doesn't exist!
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:53,代码来源:ExportJobBase.java

示例11: toResult

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@Override
public List<ShowFilesCommandResult> toResult(String sql, SqlNode sqlNode) throws ValidationException, RelConversionException,
IOException, ForemanSetupException {

  SqlIdentifier from = ((SqlShowFiles) sqlNode).getDb();
  List<ShowFilesCommandResult> rows = new ArrayList<>();

  FileSystemWrapper fs = null;
  String defaultLocation = null;
  String fromDir = "./";

  SchemaPlus schemaPlus = defaultSchema;

  // Show files can be used without from clause, in which case we display the files in the default schema
  if (from != null) {
    // We are not sure if the full from clause is just the schema or includes table name,
    // first try to see if the full path specified is a schema
    schemaPlus = SchemaUtilities.findSchema(defaultSchema, from.names);
    if (schemaPlus == null) {
      // Entire from clause is not a schema, try to obtain the schema without the last part of the specified clause.
      schemaPlus = SchemaUtilities.findSchema(defaultSchema, from.names.subList(0, from.names.size() - 1));
      fromDir = fromDir + from.names.get((from.names.size() - 1));
    }

    if (schemaPlus == null) {
      throw UserException.validationError()
          .message("Invalid FROM/IN clause [%s]", from.toString())
          .build(logger);
    }
  }

  SimpleSchema schema;
  try {
    schema = schemaPlus.unwrap(SimpleSchema.class);
  } catch (ClassCastException e) {
    throw UserException.validationError()
        .message("SHOW FILES is supported in workspace type schema only. Schema [%s] is not a workspace schema.",
            SchemaUtilities.getSchemaPath(schemaPlus))
        .build(logger);
  }

  // Get the file system object
  fs = schema.getFileSystem();

  // Get the default path
  defaultLocation = schema.getDefaultLocation();

  for (FileStatus fileStatus : fs.list(new Path(defaultLocation, fromDir), false)) {
    ShowFilesCommandResult result = new ShowFilesCommandResult(fileStatus.getPath().getName(), fileStatus.isDir(),
        !fileStatus.isDirectory(), fileStatus.getLen(),
        fileStatus.getOwner(), fileStatus.getGroup(),
        fileStatus.getPermission().toString(),
        fileStatus.getAccessTime(), fileStatus.getModificationTime());
    rows.add(result);
  }

  return rows;
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:59,代码来源:ShowFileHandler.java

示例12: setPermission

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
 * Set the file permission of the path of the given fileStatus. If the path
 * is a directory, apply permission recursively to all subdirectories and
 * files.
 *
 * @param fs         the filesystem
 * @param fileStatus containing the path
 * @param permission the permission
 * @throws java.io.IOException
 */
private void setPermission(FileSystem fs, FileStatus fileStatus,
                           FsPermission permission) throws IOException {
  if(fileStatus.isDir()) {
    for(FileStatus file : fs.listStatus(fileStatus.getPath())){
      setPermission(fs, file, permission);
    }
  }
  fs.setPermission(fileStatus.getPath(), permission);
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:20,代码来源:HBaseBulkImportJob.java


注:本文中的org.apache.hadoop.fs.FileStatus.isDir方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。