当前位置: 首页>>代码示例>>Java>>正文


Java FileStatus.isDirectory方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileStatus.isDirectory方法的典型用法代码示例。如果您正苦于以下问题:Java FileStatus.isDirectory方法的具体用法?Java FileStatus.isDirectory怎么用?Java FileStatus.isDirectory使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileStatus的用法示例。


在下文中一共展示了FileStatus.isDirectory方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getFileStatus

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
 * Convert the file information in LsEntry to a {@link FileStatus} object. *
 *
 * @param sftpFile
 * @param parentPath
 * @return file status
 * @throws IOException
 */
private FileStatus getFileStatus(ChannelSftp channel, LsEntry sftpFile,
    Path parentPath) throws IOException {

  SftpATTRS attr = sftpFile.getAttrs();
  long length = attr.getSize();
  boolean isDir = attr.isDir();
  boolean isLink = attr.isLink();
  if (isLink) {
    String link = parentPath.toUri().getPath() + "/" + sftpFile.getFilename();
    try {
      link = channel.realpath(link);

      Path linkParent = new Path("/", link);

      FileStatus fstat = getFileStatus(channel, linkParent);
      isDir = fstat.isDirectory();
      length = fstat.getLen();
    } catch (Exception e) {
      throw new IOException(e);
    }
  }
  int blockReplication = 1;
  // Using default block size since there is no way in SFTP channel to know of
  // block sizes on server. The assumption could be less than ideal.
  long blockSize = DEFAULT_BLOCK_SIZE;
  long modTime = attr.getMTime() * 1000; // convert to milliseconds
  long accessTime = 0;
  FsPermission permission = getPermissions(sftpFile);
  // not be able to get the real user group name, just use the user and group
  // id
  String user = Integer.toString(attr.getUId());
  String group = Integer.toString(attr.getGId());
  Path filePath = new Path(parentPath, sftpFile.getFilename());

  return new FileStatus(length, isDir, blockReplication, blockSize, modTime,
      accessTime, permission, user, group, filePath.makeQualified(
          this.getUri(), this.getWorkingDirectory()));
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:47,代码来源:SFTPFileSystem.java

示例2: initFileDirTables

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/** Create a table that contains all directories under the specified path and
 * another table that contains all files under the specified path and
 * whose name starts with "_file_".
 */
private void initFileDirTables(Path path) throws IOException {
  FileStatus[] stats = fc.util().listStatus(path);

  for (FileStatus stat : stats) {
    if (stat.isDirectory()) {
      dirs.add(stat.getPath().toString());
      initFileDirTables(stat.getPath());
    } else {
      Path filePath = stat.getPath();
      if (filePath.getName().startsWith(StructureGenerator.FILE_NAME_PREFIX)) {
        files.add(filePath.toString());
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:LoadGenerator.java

示例3: populateRecursiveStatus

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
private void populateRecursiveStatus(FileStatus[] inputPaths, ImmutableList.Builder<FileStatus> outputPaths, boolean recursive, boolean includeHiddenFiles) throws FileNotFoundException, IOException {
  if(inputPaths == null || inputPaths.length == 0) {
    return;
  }

  for(FileStatus input : inputPaths) {
    outputPaths.add(input);
    if(recursive && input.isDirectory()) {
      final FileStatus[] statuses;
      if(includeHiddenFiles) {
        statuses = underlyingFs.listStatus(input.getPath());
      } else {
        statuses = underlyingFs.listStatus(input.getPath(), DefaultPathFilter.INSTANCE);
      }
      populateRecursiveStatus(statuses, outputPaths, recursive, includeHiddenFiles);
    }
  }
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:19,代码来源:FileSystemWrapper.java

示例4: open

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@Override
public FSDataInputStream open(Path file, int bufferSize) throws IOException {
  FTPClient client = connect();
  Path workDir = new Path(client.printWorkingDirectory());
  Path absolute = makeAbsolute(workDir, file);
  FileStatus fileStat = getFileStatus(client, absolute);
  if (fileStat.isDirectory()) {
    disconnect(client);
    throw new FileNotFoundException("Path " + file + " is a directory.");
  }
  client.allocate(bufferSize);
  Path parent = absolute.getParent();
  // Change to parent directory on the
  // server. Only then can we read the
  // file
  // on the server by opening up an InputStream. As a side effect the working
  // directory on the server is changed to the parent directory of the file.
  // The FTP client connection is closed when close() is called on the
  // FSDataInputStream.
  client.changeWorkingDirectory(parent.toUri().getPath());
  InputStream is = client.retrieveFileStream(file.getName());
  FSDataInputStream fis = new FSDataInputStream(new FTPInputStream(is,
      client, statistics));
  if (!FTPReply.isPositivePreliminary(client.getReplyCode())) {
    // The ftpClient is an inconsistent state. Must close the stream
    // which in turn will logout and disconnect from FTP server
    fis.close();
    throw new IOException("Unable to open file: " + file + ", Aborting");
  }
  return fis;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:FTPFileSystem.java

示例5: run

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
 * The main driver for <code>DumpTypedBytes</code>.
 */
public int run(String[] args) throws Exception {
  if (args.length == 0) {
    System.err.println("Too few arguments!");
    printUsage();
    return 1;
  }
  Path pattern = new Path(args[0]);
  FileSystem fs = pattern.getFileSystem(getConf());
  fs.setVerifyChecksum(true);
  for (Path p : FileUtil.stat2Paths(fs.globStatus(pattern), pattern)) {
    List<FileStatus> inputFiles = new ArrayList<FileStatus>();
    FileStatus status = fs.getFileStatus(p);
    if (status.isDirectory()) {
      FileStatus[] files = fs.listStatus(p);
      Collections.addAll(inputFiles, files);
    } else {
      inputFiles.add(status);
    }
    return dumpTypedBytes(inputFiles);
  }
  return -1;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:DumpTypedBytes.java

示例6: getDictionaryVersion

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
 * @param fs Filesystem
 * @param tableDir root of parquet table
 * @return the highest dictionary version found, -1 if no dictionaries are present
 * @throws IOException
 */
public static long getDictionaryVersion(FileSystem fs, Path tableDir) throws IOException {
  final FileStatus[] statuses = fs.listStatus(tableDir, DICTIONARY_ROOT_FILTER);
  long maxVersion = -1;
  for (FileStatus status : statuses) {
    if (status.isDirectory()) {
      Matcher matcher = DICTIONARY_VERSION_PATTERN.matcher(status.getPath().getName());
      if (matcher.find()) {
        try {
          final long version = Long.parseLong(matcher.group(1));
          if (version > maxVersion) {
            maxVersion = version;
          }
        } catch (NumberFormatException nfe) {
        }
      }
    }
  }
  return maxVersion;
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:26,代码来源:GlobalDictionaryBuilder.java

示例7: checkAndDeleteEntries

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
 * Loop over the given directory entries, and check whether they can be deleted.
 * If an entry is itself a directory it will be recursively checked and deleted itself iff
 * all subentries are deleted (and no new subentries are added in the mean time)
 *
 * @param entries directory entries to check
 * @return true if all entries were successfully deleted
 */
private boolean checkAndDeleteEntries(FileStatus[] entries) {
  if (entries == null) {
    return true;
  }
  boolean allEntriesDeleted = true;
  List<FileStatus> files = Lists.newArrayListWithCapacity(entries.length);
  for (FileStatus child : entries) {
    Path path = child.getPath();
    if (child.isDirectory()) {
      // for each subdirectory delete it and all entries if possible
      if (!checkAndDeleteDirectory(path)) {
        allEntriesDeleted = false;
      }
    } else {
      // collect all files to attempt to delete in one batch
      files.add(child);
    }
  }
  if (!checkAndDeleteFiles(files)) {
    allEntriesDeleted = false;
  }
  return allEntriesDeleted;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:32,代码来源:CleanerChore.java

示例8: apply

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@Override
public PathMetadata apply(@Nonnull Path location) {
  try {
    FileSystem fs = location.getFileSystem(conf);
    FileStatus fileStatus = fs.getFileStatus(location);
    FileChecksum checksum = null;
    if (fileStatus.isFile()) {
      checksum = fs.getFileChecksum(location);
    }

    List<PathMetadata> childPathDescriptors = new ArrayList<>();
    if (fileStatus.isDirectory()) {
      FileStatus[] childStatuses = fs.listStatus(location);
      for (FileStatus childStatus : childStatuses) {
        childPathDescriptors.add(apply(childStatus.getPath()));
      }
    }

    return new PathMetadata(location, fileStatus.getModificationTime(), checksum, childPathDescriptors);

  } catch (IOException e) {
    throw new CircusTrainException("Unable to compute digest for location " + location.toString(), e);
  }
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:25,代码来源:PathToPathMetadata.java

示例9: checkPublicPermsForAll

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
private static boolean checkPublicPermsForAll(FileSystem fs, 
      FileStatus status, FsAction dir, FsAction file) 
  throws IOException {
  FsPermission perms = status.getPermission();
  FsAction otherAction = perms.getOtherAction();
  if (status.isDirectory()) {
    if (!otherAction.implies(dir)) {
      return false;
    }
    
    for (FileStatus child : fs.listStatus(status.getPath())) {
      if(!checkPublicPermsForAll(fs, child, dir, file)) {
        return false;
      }
    }
    return true;
  }
  return (otherAction.implies(file));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:FSDownload.java

示例10: applyNewPermission

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
 * Apply permission against specified file and determine what the
 * new mode would be
 * @param file File against which to apply mode
 * @return File's new mode if applied.
 */
public short applyNewPermission(FileStatus file) {
  FsPermission perms = file.getPermission();
  int existing = perms.toShort();
  boolean exeOk = file.isDirectory() || (existing & 0111) != 0;
  
  return (short)combineModes(existing, exeOk);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:14,代码来源:ChmodParser.java

示例11: isHomogeneous

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
 * Check if the table contains homogenenous files that can be read by Drill. Eg: parquet, json csv etc.
 * However if it contains more than one of these formats or a totally different file format that Drill cannot
 * understand then we will raise an exception.
 * @param tableName - name of the table to be checked for homogeneous property
 * @return
 * @throws IOException
 */
private boolean isHomogeneous(String tableName) throws IOException {
  FileSelection fileSelection = FileSelection.create(fs, config.getLocation(), tableName);

  if (fileSelection == null) {
    throw UserException
        .validationError()
        .message(String.format("Table [%s] not found", tableName))
        .build(logger);
  }

  FormatMatcher matcher = null;
  Queue<FileStatus> listOfFiles = new LinkedList<>();
  listOfFiles.addAll(fileSelection.getFileStatusList(fs));

  while (!listOfFiles.isEmpty()) {
    FileStatus currentFile = listOfFiles.poll();
    if (currentFile.isDirectory()) {
      listOfFiles.addAll(fs.list(true, currentFile.getPath()));
    } else {
      if (matcher != null) {
        if (!matcher.isFileReadable(fs, currentFile)) {
          return false;
        }
      } else {
        matcher = findMatcher(currentFile);
        // Did not match any of the file patterns, exit
        if (matcher == null) {
          return false;
        }
      }
    }
  }
  return true;
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:43,代码来源:WorkspaceSchemaFactory.java

示例12: addToPartFilesAndIndex

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
private void addToPartFilesAndIndex(List<FileStatus> inputFileStatuses) throws IOException {
    for (FileStatus current : inputFileStatuses) {
        if (current.isDirectory()) {
            addDirectoryEntryToIndex(current);
            addToPartFilesAndIndex(getChildrenFromDirectory(current));
        } else if (current.isFile() && !index.containsEntryFor(current)) {
            addToPartFile(current);
        }
    }
}
 
开发者ID:trenner,项目名称:ahar,代码行数:11,代码来源:PartFileManager.java

示例13: transform

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
private static FileStatus transform(FileStatus input, String bucket) {
  String relativePath = removeLeadingSlash(Path.getPathWithoutSchemeAndAuthority(input.getPath()).toString());
  Path bucketPath  = new Path(Path.SEPARATOR + bucket);
  Path fullPath = Strings.isEmpty(relativePath) ? bucketPath : new Path(bucketPath, relativePath);
  return new FileStatus(input.getLen(),
          input.isDirectory(),
          input.getReplication(),
          input.getBlockSize(),
          input.getModificationTime(),
          input.getAccessTime(),
          input.getPermission(),
          input.getOwner(),
          input.getGroup(),
          fullPath);
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:16,代码来源:S3FileSystem.java

示例14: traverseImpl

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
private static ArrayList<FileStatus> traverseImpl(FileSystem fs, Path path) throws IOException {
  if (!fs.exists(path)) {
    return new ArrayList<>();
  }
  ArrayList<FileStatus> result = new ArrayList<>();
  FileStatus[] statuses = fs.listStatus(path);
  for (FileStatus status : statuses) {
    if (status.isDirectory()) {
      result.addAll(traverseImpl(fs, status.getPath()));
    } else {
      result.add(status);
    }
  }
  return result;
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:16,代码来源:FileUtils.java

示例15: singleThreadedListStatus

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
private List<FileStatus> singleThreadedListStatus(JobContext job, Path[] dirs,
    PathFilter inputFilter, boolean recursive) throws IOException {
  List<FileStatus> result = new ArrayList<FileStatus>();
  List<IOException> errors = new ArrayList<IOException>();
  for (int i=0; i < dirs.length; ++i) {
    Path p = dirs[i];
    FileSystem fs = p.getFileSystem(job.getConfiguration()); 
    FileStatus[] matches = fs.globStatus(p, inputFilter);
    if (matches == null) {
      errors.add(new IOException("Input path does not exist: " + p));
    } else if (matches.length == 0) {
      errors.add(new IOException("Input Pattern " + p + " matches 0 files"));
    } else {
      for (FileStatus globStat: matches) {
        if (globStat.isDirectory()) {
          RemoteIterator<LocatedFileStatus> iter =
              fs.listLocatedStatus(globStat.getPath());
          while (iter.hasNext()) {
            LocatedFileStatus stat = iter.next();
            if (inputFilter.accept(stat.getPath())) {
              if (recursive && stat.isDirectory()) {
                addInputPathRecursively(result, fs, stat.getPath(),
                    inputFilter);
              } else {
                result.add(stat);
              }
            }
          }
        } else {
          result.add(globStat);
        }
      }
    }
  }

  if (!errors.isEmpty()) {
    throw new InvalidInputException(errors);
  }
  return result;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:FileInputFormat.java


注:本文中的org.apache.hadoop.fs.FileStatus.isDirectory方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。