當前位置: 首頁>>代碼示例>>Java>>正文


Java FileStatus.isSymlink方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.FileStatus.isSymlink方法的典型用法代碼示例。如果您正苦於以下問題:Java FileStatus.isSymlink方法的具體用法?Java FileStatus.isSymlink怎麽用?Java FileStatus.isSymlink使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.FileStatus的用法示例。


在下文中一共展示了FileStatus.isSymlink方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: validateToString

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * Validates the toString method for FileStatus.
 * @param fileStatus FileStatus to be validated
 */
private void validateToString(FileStatus fileStatus) throws IOException {
  StringBuilder expected = new StringBuilder();
  expected.append("FileStatus{");
  expected.append("path=").append(fileStatus.getPath()).append("; ");
  expected.append("isDirectory=").append(fileStatus.isDirectory()).append("; ");
  if(!fileStatus.isDirectory()) {
    expected.append("length=").append(fileStatus.getLen()).append("; ");
    expected.append("replication=").append(fileStatus.getReplication()).append("; ");
    expected.append("blocksize=").append(fileStatus.getBlockSize()).append("; ");
  }
  expected.append("modification_time=").append(fileStatus.getModificationTime()).append("; ");
  expected.append("access_time=").append(fileStatus.getAccessTime()).append("; ");
  expected.append("owner=").append(fileStatus.getOwner()).append("; ");
  expected.append("group=").append(fileStatus.getGroup()).append("; ");
  expected.append("permission=").append(fileStatus.getPermission()).append("; ");
  if(fileStatus.isSymlink()) {
    expected.append("isSymlink=").append(true).append("; ");
    expected.append("symlink=").append(fileStatus.getSymlink()).append("}");
  } else {
    expected.append("isSymlink=").append(false).append("}");
  }
  
  assertEquals(expected.toString(), fileStatus.toString());
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:29,代碼來源:TestFileStatus.java

示例2: getFileStatus

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * Returns the {@link FileStatus} from the {@link PathData} item. If the
 * current options require links to be followed then the returned file status
 * is that of the linked file.
 *
 * @param item
 *          PathData
 * @param depth
 *          current depth in the process directories
 * @return FileStatus
 */
protected FileStatus getFileStatus(PathData item, int depth)
    throws IOException {
  FileStatus fileStatus = item.stat;
  if (fileStatus.isSymlink()) {
    if (options.isFollowLink() || (options.isFollowArgLink() &&
        (depth == 0))) {
      Path linkedFile = item.fs.resolvePath(fileStatus.getSymlink());
      fileStatus = getFileSystem(item).getFileStatus(linkedFile);
    }
  }
  return fileStatus;
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:24,代碼來源:BaseExpression.java

示例3: verifyPermsRecursively

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private void verifyPermsRecursively(FileSystem fs,
    FileContext files, Path p,
    LocalResourceVisibility vis) throws IOException {
  FileStatus status = files.getFileStatus(p);
  if (status.isDirectory()) {
    if (vis == LocalResourceVisibility.PUBLIC) {
      Assert.assertTrue(status.getPermission().toShort() ==
        FSDownload.PUBLIC_DIR_PERMS.toShort());
    }
    else {
      Assert.assertTrue(status.getPermission().toShort() ==
        FSDownload.PRIVATE_DIR_PERMS.toShort());
    }
    if (!status.isSymlink()) {
      FileStatus[] statuses = fs.listStatus(p);
      for (FileStatus stat : statuses) {
        verifyPermsRecursively(fs, files, stat.getPath(), vis);
      }
    }
  }
  else {
    if (vis == LocalResourceVisibility.PUBLIC) {
      Assert.assertTrue(status.getPermission().toShort() ==
        FSDownload.PUBLIC_FILE_PERMS.toShort());
    }
    else {
      Assert.assertTrue(status.getPermission().toShort() ==
        FSDownload.PRIVATE_FILE_PERMS.toShort());
    }
  }      
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:32,代碼來源:TestFSDownload.java

示例4: getType

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
public static FILE_TYPE getType(FileStatus fileStatus) {
  if (fileStatus.isFile()) {
    return FILE;
  }
  if (fileStatus.isDirectory()) {
    return DIRECTORY;
  }
  if (fileStatus.isSymlink()) {
    return SYMLINK;
  }
  throw new IllegalArgumentException("Could not determine filetype for: " +
                                     fileStatus.getPath());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:14,代碼來源:HttpFSFileSystem.java

示例5: matches

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
public boolean matches(FileSystemWrapper fs, FileStatus status) throws IOException{
  if (ranges.isEmpty() || status.isDirectory()) {
    return false;
  }
  // walk all the way down in the symlinks until a hard entry is reached
  FileStatus current = status;
  while (current.isSymlink()) {
    current = fs.getFileStatus(status.getSymlink());
  }
  // if hard entry is not a file nor can it be a symlink then it is not readable simply deny matching.
  if (!current.isFile()) {
    return false;
  }

  final Range<Long> fileRange = Range.closedOpen( 0L, status.getLen());

  try (FSDataInputStream is = fs.open(status.getPath())) {
    for(RangeMagics rMagic : ranges) {
      Range<Long> r = rMagic.range;
      if (!fileRange.encloses(r)) {
        continue;
      }
      int len = (int) (r.upperEndpoint() - r.lowerEndpoint());
      byte[] bytes = new byte[len];
      is.readFully(r.lowerEndpoint(), bytes);
      for (byte[] magic : rMagic.magics) {
        if (Arrays.equals(magic, bytes)) {
          return true;
        }
      }
    }
  }
  return false;
}
 
開發者ID:dremio,項目名稱:dremio-oss,代碼行數:35,代碼來源:BasicFormatMatcher.java

示例6: toProtoFileStatus

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * Converts a Hadoop {@link FileStatus} instance into a protobuf
 * {@link DFSProtos.FileStatus}
 *
 * @param status
 *          the Hadoop status instance to convert
 * @return a protobuf status instance
 * @throws IOException
 */
static DFS.FileStatus toProtoFileStatus(FileStatus status) throws IOException {
  DFS.FileStatus.Builder builder = DFS.FileStatus.newBuilder();

  builder
    .setLength(status.getLen())
    .setIsDirectory(status.isDirectory())
    .setBlockReplication(status.getReplication())
    .setBlockSize(status.getBlockSize())
    .setModificationTime(status.getModificationTime())
    .setAccessTime(status.getAccessTime());

  // Handling potential null values
  if (status.getPath() != null) {
    builder = builder.setPath(status.getPath().toUri().getPath());
  }
  if (status.getPermission() != null) {
    builder = builder.setPermission(status.getPermission().toExtendedShort());
  }
  if (status.getOwner() != null) {
    builder = builder.setOwner(status.getOwner());
  }
  if (status.getGroup() != null) {
    builder = builder.setGroup(status.getGroup());
  }
  if (status.isSymlink()) {
    builder = builder.setSymlink(status.getSymlink().toString());
  }

  return builder.build();
}
 
開發者ID:dremio,項目名稱:dremio-oss,代碼行數:40,代碼來源:RemoteNodeFileSystem.java

示例7: concat

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * Move blocks from srcs to trg and delete srcs afterwards.
 * The file block sizes must be the same.
 * 
 * @param trg existing file to append to
 * @param psrcs list of files (same block size, same replication)
 * @throws IOException
 */
@Override
public void concat(Path trg, Path [] psrcs) throws IOException {
  statistics.incrementWriteOps(1);
  // Make target absolute
  Path absF = fixRelativePart(trg);
  // Make all srcs absolute
  Path[] srcs = new Path[psrcs.length];
  for (int i=0; i<psrcs.length; i++) {
    srcs[i] = fixRelativePart(psrcs[i]);
  }
  // Try the concat without resolving any links
  String[] srcsStr = new String[psrcs.length];
  try {
    for (int i=0; i<psrcs.length; i++) {
      srcsStr[i] = getPathName(srcs[i]);
    }
    dfs.concat(getPathName(trg), srcsStr);
  } catch (UnresolvedLinkException e) {
    // Exception could be from trg or any src.
    // Fully resolve trg and srcs. Fail if any of them are a symlink.
    FileStatus stat = getFileLinkStatus(absF);
    if (stat.isSymlink()) {
      throw new IOException("Cannot concat with a symlink target: "
          + trg + " -> " + stat.getPath());
    }
    absF = fixRelativePart(stat.getPath());
    for (int i=0; i<psrcs.length; i++) {
      stat = getFileLinkStatus(srcs[i]);
      if (stat.isSymlink()) {
        throw new IOException("Cannot concat with a symlink src: "
            + psrcs[i] + " -> " + stat.getPath());
      }
      srcs[i] = fixRelativePart(stat.getPath());
    }
    // Try concat again. Can still race with another symlink.
    for (int i=0; i<psrcs.length; i++) {
      srcsStr[i] = getPathName(srcs[i]);
    }
    dfs.concat(getPathName(absF), srcsStr);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:50,代碼來源:DistributedFileSystem.java


注:本文中的org.apache.hadoop.fs.FileStatus.isSymlink方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。