當前位置: 首頁>>代碼示例>>Java>>正文


Java FileStatus.isDir方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.FileStatus.isDir方法的典型用法代碼示例。如果您正苦於以下問題:Java FileStatus.isDir方法的具體用法?Java FileStatus.isDir怎麽用?Java FileStatus.isDir使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.FileStatus的用法示例。


在下文中一共展示了FileStatus.isDir方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: ParquetMetadataStat

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 *
 * @param nameNode the hostname of hdfs namenode
 * @param hdfsPort the port of hdfs namenode, usually 9000 or 8020
 * @param dirPath the path of the directory which contains the parquet files, begin with /, for gen /msra/column/order/parquet/
 * @throws IOException
 * @throws MetadataException
 */
public ParquetMetadataStat(String nameNode, int hdfsPort, String dirPath) throws IOException, MetadataException
{
    Configuration conf = new Configuration();
    FileSystem fileSystem = FileSystem.get(URI.create("hdfs://" + nameNode + ":" + hdfsPort), conf);
    Path hdfsDirPath = new Path(dirPath);
    if (! fileSystem.isFile(hdfsDirPath))
    {
        FileStatus[] fileStatuses = fileSystem.listStatus(hdfsDirPath);
        for (FileStatus status : fileStatuses)
        {
            // compatibility for HDFS 1.x
            if (! status.isDir())
            {
                //System.out.println(status.getPath().toString());
                this.fileMetaDataList.add(new ParquetFileMetadata(conf, status.getPath()));
            }
        }
    }
    if (this.fileMetaDataList.size() == 0)
    {
        throw new MetadataException("fileMetaDataList is empty, path is not a dir.");
    }
    this.fields = this.fileMetaDataList.get(0).getFileMetaData().getSchema().getFields();
    this.columnCount = this.fileMetaDataList.get(0).getFileMetaData().getSchema().getFieldCount();
}
 
開發者ID:dbiir,項目名稱:rainbow,代碼行數:34,代碼來源:ParquetMetadataStat.java

示例2: listFiles

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/** @return FileStatus for data files only. */
private FileStatus[] listFiles(FileSystem fs, Path path) throws IOException {
  FileStatus[] fileStatuses = fs.listStatus(path);
  ArrayList files = new ArrayList();
  Pattern patt = Pattern.compile("part.*-([0-9][0-9][0-9][0-9][0-9]).*");
  for (FileStatus fstat : fileStatuses) {
    String fname = fstat.getPath().getName();
    if (!fstat.isDir()) {
      Matcher mat = patt.matcher(fname);
      if (mat.matches()) {
        files.add(fstat);
      }
    }
  }
  return (FileStatus[]) files.toArray(new FileStatus[files.size()]);
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:17,代碼來源:TestAppendUtils.java

示例3: ensureEmptyWriteDir

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/** Create the directory where we'll write our test files to; and
 * make sure it has no files in it.
 */
private void ensureEmptyWriteDir() throws IOException {
  FileSystem fs = FileSystem.getLocal(getConf());
  Path writeDir = getWritePath();

  fs.mkdirs(writeDir);

  FileStatus [] stats = fs.listStatus(writeDir);

  for (FileStatus stat : stats) {
    if (stat.isDir()) {
      fail("setUp(): Write directory " + writeDir
          + " contains subdirectories");
    }

    LOG.debug("setUp(): Removing " + stat.getPath());
    if (!fs.delete(stat.getPath(), false)) {
      fail("setUp(): Could not delete residual file " + stat.getPath());
    }
  }

  if (!fs.exists(writeDir)) {
    fail("setUp: Could not create " + writeDir);
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:28,代碼來源:TestSplittableBufferedWriter.java

示例4: getFileBlockLocations

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public BlockLocation[] getFileBlockLocations(
    FileStatus stat, long start, long len) throws IOException {
  if (stat.isDir()) {
    return null;
  }
  System.out.println("File " + stat.getPath());
  String name = stat.getPath().toUri().getPath();
  BlockLocation[] locs =
    super.getFileBlockLocations(stat, start, len);
  if (name.equals(fileWithMissingBlocks)) {
    System.out.println("Returning missing blocks for " + fileWithMissingBlocks);
    locs[0] = new HdfsBlockLocation(new BlockLocation(new String[0],
        new String[0], locs[0].getOffset(), locs[0].getLength()), null);
  }
  return locs;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:TestCombineFileInputFormat.java

示例5: getStoreFiles

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * Returns all files belonging to the given region directory. Could return an
 * empty list.
 *
 * @param fs  The file system reference.
 * @param regionDir  The region directory to scan.
 * @return The list of files found.
 * @throws IOException When scanning the files fails.
 */
static List<Path> getStoreFiles(FileSystem fs, Path regionDir)
throws IOException {
  List<Path> res = new ArrayList<Path>();
  PathFilter dirFilter = new FSUtils.DirFilter(fs);
  FileStatus[] familyDirs = fs.listStatus(regionDir, dirFilter);
  for(FileStatus dir : familyDirs) {
    FileStatus[] files = fs.listStatus(dir.getPath());
    for (FileStatus file : files) {
      if (!file.isDir()) {
        res.add(file.getPath());
      }
    }
  }
  return res;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:25,代碼來源:IndexFile.java

示例6: create

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
public static DotDrillFile create(DrillFileSystem fs, FileStatus status){
  for(DotDrillType d : DotDrillType.values()){
    if(!status.isDir() && d.matches(status)){
      return new DotDrillFile(fs, status, d);
    }
  }
  return null;
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:9,代碼來源:DotDrillFile.java

示例7: addRecursiveStatus

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private void addRecursiveStatus(FileStatus parent, List<FileStatus> listToFill) throws IOException {
  if (parent.isDir()) {
    Path pattern = new Path(parent.getPath(), "*");
    FileStatus[] sub = underlyingFs.globStatus(pattern, new DrillPathFilter());
    for(FileStatus s : sub){
      if (s.isDir()) {
        addRecursiveStatus(s, listToFill);
      } else {
        listToFill.add(s);
      }
    }
  } else {
    listToFill.add(parent);
  }
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:16,代碼來源:DrillFileSystem.java

示例8: getNextPartition

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * Returns the greatest partition number available for appending, for data
 * files in targetDir.
 */
private int getNextPartition(FileSystem fs, Path targetDir)
    throws IOException {

  int nextPartition = 0;
  FileStatus[] existingFiles = fs.listStatus(targetDir);
  if (existingFiles != null && existingFiles.length > 0) {
    for (FileStatus fileStat : existingFiles) {
      if (!fileStat.isDir()) {
        String filename = fileStat.getPath().getName();
        Matcher mat = DATA_PART_PATTERN.matcher(filename);
        if (mat.matches()) {
          int thisPart = Integer.parseInt(mat.group(1));
          if (thisPart >= nextPartition) {
            nextPartition = thisPart;
            nextPartition++;
          }
        }
      }
    }
  }

  if (nextPartition > 0) {
    LOG.info("Using found partition " + nextPartition);
  }

  return nextPartition;
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:32,代碼來源:AppendUtils.java

示例9: getPlan

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public PhysicalPlan getPlan(SqlNode sqlNode) throws ValidationException, RelConversionException, IOException {

  SqlIdentifier from = ((SqlShowFiles) sqlNode).getDb();

  DrillFileSystem fs = null;
  String defaultLocation = null;
  String fromDir = "./";

  SchemaPlus defaultSchema = context.getNewDefaultSchema();
  SchemaPlus drillSchema = defaultSchema;

  // Show files can be used without from clause, in which case we display the files in the default schema
  if (from != null) {
    // We are not sure if the full from clause is just the schema or includes table name,
    // first try to see if the full path specified is a schema
    drillSchema = SchemaUtilites.findSchema(defaultSchema, from.names);
    if (drillSchema == null) {
      // Entire from clause is not a schema, try to obtain the schema without the last part of the specified clause.
      drillSchema = SchemaUtilites.findSchema(defaultSchema, from.names.subList(0, from.names.size() - 1));
      fromDir = fromDir + from.names.get((from.names.size() - 1));
    }

    if (drillSchema == null) {
      throw UserException.validationError()
          .message("Invalid FROM/IN clause [%s]", from.toString())
          .build(logger);
    }
  }

  WorkspaceSchema wsSchema;
  try {
     wsSchema = (WorkspaceSchema) drillSchema.unwrap(AbstractSchema.class).getDefaultSchema();
  } catch (ClassCastException e) {
    throw UserException.validationError()
        .message("SHOW FILES is supported in workspace type schema only. Schema [%s] is not a workspace schema.",
            SchemaUtilites.getSchemaPath(drillSchema))
        .build(logger);
  }

  // Get the file system object
  fs = wsSchema.getFS();

  // Get the default path
  defaultLocation = wsSchema.getDefaultLocation();

  List<ShowFilesCommandResult> rows = new ArrayList<>();

  for (FileStatus fileStatus : fs.list(false, new Path(defaultLocation, fromDir))) {
    ShowFilesCommandResult result = new ShowFilesCommandResult(fileStatus.getPath().getName(), fileStatus.isDir(),
                                                               !fileStatus.isDir(), fileStatus.getLen(),
                                                               fileStatus.getOwner(), fileStatus.getGroup(),
                                                               fileStatus.getPermission().toString(),
                                                               fileStatus.getAccessTime(), fileStatus.getModificationTime());
    rows.add(result);
  }
  return DirectPlan.createDirectPlan(context.getCurrentEndpoint(), rows.iterator(), ShowFilesCommandResult.class);
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:59,代碼來源:ShowFileHandler.java

示例10: getFileType

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * @return the type of the file represented by p (or the files in p, if a
 * directory)
 */
public static FileType getFileType(Configuration conf, Path p)
    throws IOException {
  FileSystem fs = p.getFileSystem(conf);

  try {
    FileStatus stat = fs.getFileStatus(p);

    if (null == stat) {
      // Couldn't get the item.
      LOG.warn("Input path " + p + " does not exist");
      return FileType.UNKNOWN;
    }

    if (stat.isDir()) {
      FileStatus [] subitems = fs.listStatus(p);
      if (subitems == null || subitems.length == 0) {
        LOG.warn("Input path " + p + " contains no files");
        return FileType.UNKNOWN; // empty dir.
      }

      // Pick a child entry to examine instead.
      boolean foundChild = false;
      for (int i = 0; i < subitems.length; i++) {
        stat = subitems[i];
        if (!stat.isDir() && !stat.getPath().getName().startsWith("_")) {
          foundChild = true;
          break; // This item is a visible file. Check it.
        }
      }

      if (!foundChild) {
        stat = null; // Couldn't find a reasonable candidate.
      }
    }

    if (null == stat) {
      LOG.warn("null FileStatus object in isSequenceFiles(); "
          + "assuming false.");
      return FileType.UNKNOWN;
    }

    Path target = stat.getPath();
    return fromMagicNumber(target, conf);
  } catch (FileNotFoundException fnfe) {
    LOG.warn("Input path " + p + " does not exist");
    return FileType.UNKNOWN; // doesn't exist!
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:53,代碼來源:ExportJobBase.java

示例11: toResult

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public List<ShowFilesCommandResult> toResult(String sql, SqlNode sqlNode) throws ValidationException, RelConversionException,
IOException, ForemanSetupException {

  SqlIdentifier from = ((SqlShowFiles) sqlNode).getDb();
  List<ShowFilesCommandResult> rows = new ArrayList<>();

  FileSystemWrapper fs = null;
  String defaultLocation = null;
  String fromDir = "./";

  SchemaPlus schemaPlus = defaultSchema;

  // Show files can be used without from clause, in which case we display the files in the default schema
  if (from != null) {
    // We are not sure if the full from clause is just the schema or includes table name,
    // first try to see if the full path specified is a schema
    schemaPlus = SchemaUtilities.findSchema(defaultSchema, from.names);
    if (schemaPlus == null) {
      // Entire from clause is not a schema, try to obtain the schema without the last part of the specified clause.
      schemaPlus = SchemaUtilities.findSchema(defaultSchema, from.names.subList(0, from.names.size() - 1));
      fromDir = fromDir + from.names.get((from.names.size() - 1));
    }

    if (schemaPlus == null) {
      throw UserException.validationError()
          .message("Invalid FROM/IN clause [%s]", from.toString())
          .build(logger);
    }
  }

  SimpleSchema schema;
  try {
    schema = schemaPlus.unwrap(SimpleSchema.class);
  } catch (ClassCastException e) {
    throw UserException.validationError()
        .message("SHOW FILES is supported in workspace type schema only. Schema [%s] is not a workspace schema.",
            SchemaUtilities.getSchemaPath(schemaPlus))
        .build(logger);
  }

  // Get the file system object
  fs = schema.getFileSystem();

  // Get the default path
  defaultLocation = schema.getDefaultLocation();

  for (FileStatus fileStatus : fs.list(new Path(defaultLocation, fromDir), false)) {
    ShowFilesCommandResult result = new ShowFilesCommandResult(fileStatus.getPath().getName(), fileStatus.isDir(),
        !fileStatus.isDirectory(), fileStatus.getLen(),
        fileStatus.getOwner(), fileStatus.getGroup(),
        fileStatus.getPermission().toString(),
        fileStatus.getAccessTime(), fileStatus.getModificationTime());
    rows.add(result);
  }

  return rows;
}
 
開發者ID:dremio,項目名稱:dremio-oss,代碼行數:59,代碼來源:ShowFileHandler.java

示例12: setPermission

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * Set the file permission of the path of the given fileStatus. If the path
 * is a directory, apply permission recursively to all subdirectories and
 * files.
 *
 * @param fs         the filesystem
 * @param fileStatus containing the path
 * @param permission the permission
 * @throws java.io.IOException
 */
private void setPermission(FileSystem fs, FileStatus fileStatus,
                           FsPermission permission) throws IOException {
  if(fileStatus.isDir()) {
    for(FileStatus file : fs.listStatus(fileStatus.getPath())){
      setPermission(fs, file, permission);
    }
  }
  fs.setPermission(fileStatus.getPath(), permission);
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:20,代碼來源:HBaseBulkImportJob.java


注:本文中的org.apache.hadoop.fs.FileStatus.isDir方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。