當前位置: 首頁>>代碼示例>>Java>>正文


Java FileStatus.isDirectory方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.FileStatus.isDirectory方法的典型用法代碼示例。如果您正苦於以下問題:Java FileStatus.isDirectory方法的具體用法?Java FileStatus.isDirectory怎麽用?Java FileStatus.isDirectory使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.FileStatus的用法示例。


在下文中一共展示了FileStatus.isDirectory方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getFileStatus

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * Convert the file information in LsEntry to a {@link FileStatus} object. *
 *
 * @param sftpFile
 * @param parentPath
 * @return file status
 * @throws IOException
 */
private FileStatus getFileStatus(ChannelSftp channel, LsEntry sftpFile,
    Path parentPath) throws IOException {

  SftpATTRS attr = sftpFile.getAttrs();
  long length = attr.getSize();
  boolean isDir = attr.isDir();
  boolean isLink = attr.isLink();
  if (isLink) {
    String link = parentPath.toUri().getPath() + "/" + sftpFile.getFilename();
    try {
      link = channel.realpath(link);

      Path linkParent = new Path("/", link);

      FileStatus fstat = getFileStatus(channel, linkParent);
      isDir = fstat.isDirectory();
      length = fstat.getLen();
    } catch (Exception e) {
      throw new IOException(e);
    }
  }
  int blockReplication = 1;
  // Using default block size since there is no way in SFTP channel to know of
  // block sizes on server. The assumption could be less than ideal.
  long blockSize = DEFAULT_BLOCK_SIZE;
  long modTime = attr.getMTime() * 1000; // convert to milliseconds
  long accessTime = 0;
  FsPermission permission = getPermissions(sftpFile);
  // not be able to get the real user group name, just use the user and group
  // id
  String user = Integer.toString(attr.getUId());
  String group = Integer.toString(attr.getGId());
  Path filePath = new Path(parentPath, sftpFile.getFilename());

  return new FileStatus(length, isDir, blockReplication, blockSize, modTime,
      accessTime, permission, user, group, filePath.makeQualified(
          this.getUri(), this.getWorkingDirectory()));
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:47,代碼來源:SFTPFileSystem.java

示例2: initFileDirTables

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/** Create a table that contains all directories under the specified path and
 * another table that contains all files under the specified path and
 * whose name starts with "_file_".
 */
private void initFileDirTables(Path path) throws IOException {
  FileStatus[] stats = fc.util().listStatus(path);

  for (FileStatus stat : stats) {
    if (stat.isDirectory()) {
      dirs.add(stat.getPath().toString());
      initFileDirTables(stat.getPath());
    } else {
      Path filePath = stat.getPath();
      if (filePath.getName().startsWith(StructureGenerator.FILE_NAME_PREFIX)) {
        files.add(filePath.toString());
      }
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:20,代碼來源:LoadGenerator.java

示例3: populateRecursiveStatus

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private void populateRecursiveStatus(FileStatus[] inputPaths, ImmutableList.Builder<FileStatus> outputPaths, boolean recursive, boolean includeHiddenFiles) throws FileNotFoundException, IOException {
  if(inputPaths == null || inputPaths.length == 0) {
    return;
  }

  for(FileStatus input : inputPaths) {
    outputPaths.add(input);
    if(recursive && input.isDirectory()) {
      final FileStatus[] statuses;
      if(includeHiddenFiles) {
        statuses = underlyingFs.listStatus(input.getPath());
      } else {
        statuses = underlyingFs.listStatus(input.getPath(), DefaultPathFilter.INSTANCE);
      }
      populateRecursiveStatus(statuses, outputPaths, recursive, includeHiddenFiles);
    }
  }
}
 
開發者ID:dremio,項目名稱:dremio-oss,代碼行數:19,代碼來源:FileSystemWrapper.java

示例4: open

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public FSDataInputStream open(Path file, int bufferSize) throws IOException {
  FTPClient client = connect();
  Path workDir = new Path(client.printWorkingDirectory());
  Path absolute = makeAbsolute(workDir, file);
  FileStatus fileStat = getFileStatus(client, absolute);
  if (fileStat.isDirectory()) {
    disconnect(client);
    throw new FileNotFoundException("Path " + file + " is a directory.");
  }
  client.allocate(bufferSize);
  Path parent = absolute.getParent();
  // Change to parent directory on the
  // server. Only then can we read the
  // file
  // on the server by opening up an InputStream. As a side effect the working
  // directory on the server is changed to the parent directory of the file.
  // The FTP client connection is closed when close() is called on the
  // FSDataInputStream.
  client.changeWorkingDirectory(parent.toUri().getPath());
  InputStream is = client.retrieveFileStream(file.getName());
  FSDataInputStream fis = new FSDataInputStream(new FTPInputStream(is,
      client, statistics));
  if (!FTPReply.isPositivePreliminary(client.getReplyCode())) {
    // The ftpClient is an inconsistent state. Must close the stream
    // which in turn will logout and disconnect from FTP server
    fis.close();
    throw new IOException("Unable to open file: " + file + ", Aborting");
  }
  return fis;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:32,代碼來源:FTPFileSystem.java

示例5: run

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * The main driver for <code>DumpTypedBytes</code>.
 */
public int run(String[] args) throws Exception {
  if (args.length == 0) {
    System.err.println("Too few arguments!");
    printUsage();
    return 1;
  }
  Path pattern = new Path(args[0]);
  FileSystem fs = pattern.getFileSystem(getConf());
  fs.setVerifyChecksum(true);
  for (Path p : FileUtil.stat2Paths(fs.globStatus(pattern), pattern)) {
    List<FileStatus> inputFiles = new ArrayList<FileStatus>();
    FileStatus status = fs.getFileStatus(p);
    if (status.isDirectory()) {
      FileStatus[] files = fs.listStatus(p);
      Collections.addAll(inputFiles, files);
    } else {
      inputFiles.add(status);
    }
    return dumpTypedBytes(inputFiles);
  }
  return -1;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:DumpTypedBytes.java

示例6: getDictionaryVersion

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * @param fs Filesystem
 * @param tableDir root of parquet table
 * @return the highest dictionary version found, -1 if no dictionaries are present
 * @throws IOException
 */
public static long getDictionaryVersion(FileSystem fs, Path tableDir) throws IOException {
  final FileStatus[] statuses = fs.listStatus(tableDir, DICTIONARY_ROOT_FILTER);
  long maxVersion = -1;
  for (FileStatus status : statuses) {
    if (status.isDirectory()) {
      Matcher matcher = DICTIONARY_VERSION_PATTERN.matcher(status.getPath().getName());
      if (matcher.find()) {
        try {
          final long version = Long.parseLong(matcher.group(1));
          if (version > maxVersion) {
            maxVersion = version;
          }
        } catch (NumberFormatException nfe) {
        }
      }
    }
  }
  return maxVersion;
}
 
開發者ID:dremio,項目名稱:dremio-oss,代碼行數:26,代碼來源:GlobalDictionaryBuilder.java

示例7: checkAndDeleteEntries

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * Loop over the given directory entries, and check whether they can be deleted.
 * If an entry is itself a directory it will be recursively checked and deleted itself iff
 * all subentries are deleted (and no new subentries are added in the mean time)
 *
 * @param entries directory entries to check
 * @return true if all entries were successfully deleted
 */
private boolean checkAndDeleteEntries(FileStatus[] entries) {
  if (entries == null) {
    return true;
  }
  boolean allEntriesDeleted = true;
  List<FileStatus> files = Lists.newArrayListWithCapacity(entries.length);
  for (FileStatus child : entries) {
    Path path = child.getPath();
    if (child.isDirectory()) {
      // for each subdirectory delete it and all entries if possible
      if (!checkAndDeleteDirectory(path)) {
        allEntriesDeleted = false;
      }
    } else {
      // collect all files to attempt to delete in one batch
      files.add(child);
    }
  }
  if (!checkAndDeleteFiles(files)) {
    allEntriesDeleted = false;
  }
  return allEntriesDeleted;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:32,代碼來源:CleanerChore.java

示例8: apply

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public PathMetadata apply(@Nonnull Path location) {
  try {
    FileSystem fs = location.getFileSystem(conf);
    FileStatus fileStatus = fs.getFileStatus(location);
    FileChecksum checksum = null;
    if (fileStatus.isFile()) {
      checksum = fs.getFileChecksum(location);
    }

    List<PathMetadata> childPathDescriptors = new ArrayList<>();
    if (fileStatus.isDirectory()) {
      FileStatus[] childStatuses = fs.listStatus(location);
      for (FileStatus childStatus : childStatuses) {
        childPathDescriptors.add(apply(childStatus.getPath()));
      }
    }

    return new PathMetadata(location, fileStatus.getModificationTime(), checksum, childPathDescriptors);

  } catch (IOException e) {
    throw new CircusTrainException("Unable to compute digest for location " + location.toString(), e);
  }
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:25,代碼來源:PathToPathMetadata.java

示例9: checkPublicPermsForAll

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private static boolean checkPublicPermsForAll(FileSystem fs, 
      FileStatus status, FsAction dir, FsAction file) 
  throws IOException {
  FsPermission perms = status.getPermission();
  FsAction otherAction = perms.getOtherAction();
  if (status.isDirectory()) {
    if (!otherAction.implies(dir)) {
      return false;
    }
    
    for (FileStatus child : fs.listStatus(status.getPath())) {
      if(!checkPublicPermsForAll(fs, child, dir, file)) {
        return false;
      }
    }
    return true;
  }
  return (otherAction.implies(file));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:20,代碼來源:FSDownload.java

示例10: applyNewPermission

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * Apply permission against specified file and determine what the
 * new mode would be
 * @param file File against which to apply mode
 * @return File's new mode if applied.
 */
public short applyNewPermission(FileStatus file) {
  FsPermission perms = file.getPermission();
  int existing = perms.toShort();
  boolean exeOk = file.isDirectory() || (existing & 0111) != 0;
  
  return (short)combineModes(existing, exeOk);
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:14,代碼來源:ChmodParser.java

示例11: isHomogeneous

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * Check if the table contains homogenenous files that can be read by Drill. Eg: parquet, json csv etc.
 * However if it contains more than one of these formats or a totally different file format that Drill cannot
 * understand then we will raise an exception.
 * @param tableName - name of the table to be checked for homogeneous property
 * @return
 * @throws IOException
 */
private boolean isHomogeneous(String tableName) throws IOException {
  FileSelection fileSelection = FileSelection.create(fs, config.getLocation(), tableName);

  if (fileSelection == null) {
    throw UserException
        .validationError()
        .message(String.format("Table [%s] not found", tableName))
        .build(logger);
  }

  FormatMatcher matcher = null;
  Queue<FileStatus> listOfFiles = new LinkedList<>();
  listOfFiles.addAll(fileSelection.getFileStatusList(fs));

  while (!listOfFiles.isEmpty()) {
    FileStatus currentFile = listOfFiles.poll();
    if (currentFile.isDirectory()) {
      listOfFiles.addAll(fs.list(true, currentFile.getPath()));
    } else {
      if (matcher != null) {
        if (!matcher.isFileReadable(fs, currentFile)) {
          return false;
        }
      } else {
        matcher = findMatcher(currentFile);
        // Did not match any of the file patterns, exit
        if (matcher == null) {
          return false;
        }
      }
    }
  }
  return true;
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:43,代碼來源:WorkspaceSchemaFactory.java

示例12: addToPartFilesAndIndex

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private void addToPartFilesAndIndex(List<FileStatus> inputFileStatuses) throws IOException {
    for (FileStatus current : inputFileStatuses) {
        if (current.isDirectory()) {
            addDirectoryEntryToIndex(current);
            addToPartFilesAndIndex(getChildrenFromDirectory(current));
        } else if (current.isFile() && !index.containsEntryFor(current)) {
            addToPartFile(current);
        }
    }
}
 
開發者ID:trenner,項目名稱:ahar,代碼行數:11,代碼來源:PartFileManager.java

示例13: transform

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private static FileStatus transform(FileStatus input, String bucket) {
  String relativePath = removeLeadingSlash(Path.getPathWithoutSchemeAndAuthority(input.getPath()).toString());
  Path bucketPath  = new Path(Path.SEPARATOR + bucket);
  Path fullPath = Strings.isEmpty(relativePath) ? bucketPath : new Path(bucketPath, relativePath);
  return new FileStatus(input.getLen(),
          input.isDirectory(),
          input.getReplication(),
          input.getBlockSize(),
          input.getModificationTime(),
          input.getAccessTime(),
          input.getPermission(),
          input.getOwner(),
          input.getGroup(),
          fullPath);
}
 
開發者ID:dremio,項目名稱:dremio-oss,代碼行數:16,代碼來源:S3FileSystem.java

示例14: traverseImpl

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private static ArrayList<FileStatus> traverseImpl(FileSystem fs, Path path) throws IOException {
  if (!fs.exists(path)) {
    return new ArrayList<>();
  }
  ArrayList<FileStatus> result = new ArrayList<>();
  FileStatus[] statuses = fs.listStatus(path);
  for (FileStatus status : statuses) {
    if (status.isDirectory()) {
      result.addAll(traverseImpl(fs, status.getPath()));
    } else {
      result.add(status);
    }
  }
  return result;
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:16,代碼來源:FileUtils.java

示例15: singleThreadedListStatus

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private List<FileStatus> singleThreadedListStatus(JobContext job, Path[] dirs,
    PathFilter inputFilter, boolean recursive) throws IOException {
  List<FileStatus> result = new ArrayList<FileStatus>();
  List<IOException> errors = new ArrayList<IOException>();
  for (int i=0; i < dirs.length; ++i) {
    Path p = dirs[i];
    FileSystem fs = p.getFileSystem(job.getConfiguration()); 
    FileStatus[] matches = fs.globStatus(p, inputFilter);
    if (matches == null) {
      errors.add(new IOException("Input path does not exist: " + p));
    } else if (matches.length == 0) {
      errors.add(new IOException("Input Pattern " + p + " matches 0 files"));
    } else {
      for (FileStatus globStat: matches) {
        if (globStat.isDirectory()) {
          RemoteIterator<LocatedFileStatus> iter =
              fs.listLocatedStatus(globStat.getPath());
          while (iter.hasNext()) {
            LocatedFileStatus stat = iter.next();
            if (inputFilter.accept(stat.getPath())) {
              if (recursive && stat.isDirectory()) {
                addInputPathRecursively(result, fs, stat.getPath(),
                    inputFilter);
              } else {
                result.add(stat);
              }
            }
          }
        } else {
          result.add(globStat);
        }
      }
    }
  }

  if (!errors.isEmpty()) {
    throw new InvalidInputException(errors);
  }
  return result;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:41,代碼來源:FileInputFormat.java


注:本文中的org.apache.hadoop.fs.FileStatus.isDirectory方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。