當前位置: 首頁>>代碼示例>>Java>>正文


Java FileStatus類代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.FileStatus的典型用法代碼示例。如果您正苦於以下問題:Java FileStatus類的具體用法?Java FileStatus怎麽用?Java FileStatus使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


FileStatus類屬於org.apache.hadoop.fs包,在下文中一共展示了FileStatus類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: fixFileStatus

import org.apache.hadoop.fs.FileStatus; //導入依賴的package包/類
private FileStatus fixFileStatus(String endpoint, FileStatus status) throws IOException {
  final Path remotePath = Path.getPathWithoutSchemeAndAuthority(status.getPath());

  if (status.isDirectory()) {
    return new PDFSFileStatus(makeQualified(remotePath), status);
  }

  String basename = remotePath.getName();
  boolean hidden = isHidden(basename);

  StringBuilder sb = new StringBuilder();
  if (hidden) {
    sb.append(basename.charAt(0));
  }
  sb.append(endpoint).append('@');
  sb.append(hidden ? basename.substring(1) : basename);

  return new PDFSFileStatus(makeQualified(new Path(remotePath.getParent(), sb.toString())), status);
}
 
開發者ID:dremio,項目名稱:dremio-oss,代碼行數:20,代碼來源:PseudoDistributedFileSystem.java

示例2: call

import org.apache.hadoop.fs.FileStatus; //導入依賴的package包/類
@Override
public Result call() throws Exception {
  Result result = new Result();
  FileSystem fs = path.getFileSystem(conf);
  result.fs = fs;
  FileStatus[] matches = fs.globStatus(path, inputFilter);
  if (matches == null) {
    result.addError(new IOException("Input path does not exist: " + path));
  } else if (matches.length == 0) {
    result.addError(new IOException("Input Pattern " + path
        + " matches 0 files"));
  } else {
    result.matchedFileStatuses = matches;
  }
  return result;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:17,代碼來源:LocatedFileStatusFetcher.java

示例3: validateMapFileOutputContent

import org.apache.hadoop.fs.FileStatus; //導入依賴的package包/類
private void validateMapFileOutputContent(
    FileSystem fs, Path dir) throws IOException {
  // map output is a directory with index and data files
  Path expectedMapDir = new Path(dir, partFile);
  assert(fs.getFileStatus(expectedMapDir).isDirectory());    
  FileStatus[] files = fs.listStatus(expectedMapDir);
  int fileCount = 0;
  boolean dataFileFound = false; 
  boolean indexFileFound = false; 
  for (FileStatus f : files) {
    if (f.isFile()) {
      ++fileCount;
      if (f.getPath().getName().equals(MapFile.INDEX_FILE_NAME)) {
        indexFileFound = true;
      }
      else if (f.getPath().getName().equals(MapFile.DATA_FILE_NAME)) {
        dataFileFound = true;
      }
    }
  }
  assert(fileCount > 0);
  assert(dataFileFound && indexFileFound);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:24,代碼來源:TestFileOutputCommitter.java

示例4: open

import org.apache.hadoop.fs.FileStatus; //導入依賴的package包/類
@Override
public FSDataInputStream open(Path file, int bufferSize) throws IOException {
  FTPClient client = connect();
  Path workDir = new Path(client.printWorkingDirectory());
  Path absolute = makeAbsolute(workDir, file);
  FileStatus fileStat = getFileStatus(client, absolute);
  if (fileStat.isDirectory()) {
    disconnect(client);
    throw new FileNotFoundException("Path " + file + " is a directory.");
  }
  client.allocate(bufferSize);
  Path parent = absolute.getParent();
  // Change to parent directory on the
  // server. Only then can we read the
  // file
  // on the server by opening up an InputStream. As a side effect the working
  // directory on the server is changed to the parent directory of the file.
  // The FTP client connection is closed when close() is called on the
  // FSDataInputStream.
  client.changeWorkingDirectory(parent.toUri().getPath());
  InputStream is = client.retrieveFileStream(file.getName());
  FSDataInputStream fis = new FSDataInputStream(new FTPInputStream(is,
      client, statistics));
  if (!FTPReply.isPositivePreliminary(client.getReplyCode())) {
    // The ftpClient is an inconsistent state. Must close the stream
    // which in turn will logout and disconnect from FTP server
    fis.close();
    throw new IOException("Unable to open file: " + file + ", Aborting");
  }
  return fis;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:32,代碼來源:FTPFileSystem.java

示例5: getStoreFiles

import org.apache.hadoop.fs.FileStatus; //導入依賴的package包/類
/**
 * Returns all files belonging to the given region directory. Could return an
 * empty list.
 *
 * @param fs  The file system reference.
 * @param regionDir  The region directory to scan.
 * @return The list of files found.
 * @throws IOException When scanning the files fails.
 */
static List<Path> getStoreFiles(FileSystem fs, Path regionDir)
throws IOException {
  List<Path> res = new ArrayList<Path>();
  PathFilter dirFilter = new FSUtils.DirFilter(fs);
  FileStatus[] familyDirs = fs.listStatus(regionDir, dirFilter);
  for(FileStatus dir : familyDirs) {
    FileStatus[] files = fs.listStatus(dir.getPath());
    for (FileStatus file : files) {
      if (!file.isDir()) {
        res.add(file.getPath());
      }
    }
  }
  return res;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:25,代碼來源:IndexFile.java

示例6: start

import org.apache.hadoop.fs.FileStatus; //導入依賴的package包/類
@Override
public void start(CoprocessorEnvironment env) {
  this.env = (RegionCoprocessorEnvironment)env;
  random = new SecureRandom();
  conf = env.getConfiguration();
  baseStagingDir = SecureBulkLoadUtil.getBaseStagingDir(conf);
  this.userProvider = UserProvider.instantiate(conf);

  try {
    fs = FileSystem.get(conf);
    fs.mkdirs(baseStagingDir, PERM_HIDDEN);
    fs.setPermission(baseStagingDir, PERM_HIDDEN);
    //no sticky bit in hadoop-1.0, making directory nonempty so it never gets erased
    fs.mkdirs(new Path(baseStagingDir,"DONOTERASE"), PERM_HIDDEN);
    FileStatus status = fs.getFileStatus(baseStagingDir);
    if(status == null) {
      throw new IllegalStateException("Failed to create staging directory");
    }
    if(!status.getPermission().equals(PERM_HIDDEN)) {
      throw new IllegalStateException(
          "Directory already exists but permissions aren't set to '-rwx--x--x' ");
    }
  } catch (IOException e) {
    throw new IllegalStateException("Failed to get FileSystem instance",e);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:27,代碼來源:SecureBulkLoadEndpoint.java

示例7: call

import org.apache.hadoop.fs.FileStatus; //導入依賴的package包/類
@Override
public void call(T ignored) throws IOException {
  Path dataDirPath = new Path(dataDirString + "/*");
  FileSystem fs = FileSystem.get(dataDirPath.toUri(), hadoopConf);
  FileStatus[] inputPathStatuses = fs.globStatus(dataDirPath);
  if (inputPathStatuses != null) {
    long oldestTimeAllowed =
        System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(maxAgeHours, TimeUnit.HOURS);
    Arrays.stream(inputPathStatuses).filter(FileStatus::isDirectory).map(FileStatus::getPath).
        filter(subdir -> {
          Matcher m = dirTimestampPattern.matcher(subdir.getName());
          return m.find() && Long.parseLong(m.group(1)) < oldestTimeAllowed;
        }).forEach(subdir -> {
          log.info("Deleting old data at {}", subdir);
          try {
            fs.delete(subdir, true);
          } catch (IOException e) {
            log.warn("Unable to delete {}; continuing", subdir, e);
          }
        });
  }
}
 
開發者ID:oncewang,項目名稱:oryx2,代碼行數:23,代碼來源:DeleteOldDataFn.java

示例8: getFileStatus

import org.apache.hadoop.fs.FileStatus; //導入依賴的package包/類
@Override
public FileStatus getFileStatus(Path f) throws IOException {
  Path absolutePath = toAbsolutePath(f);
  checkPath(absolutePath);

  // if the path is not a remote file path
  if (!isRemoteFile(absolutePath)) {
    return new GetFileStatusTask(absolutePath).get();
  }

  // Parse top level directory
  try {
    RemotePath remotePath = getRemotePath(absolutePath);

    FileSystem delegate = getDelegateFileSystem(remotePath.address);
    FileStatus status = delegate.getFileStatus(remotePath.path);

    return fixFileStatus(remotePath.address, status);
  } catch (IllegalArgumentException e) {
    throw (FileNotFoundException) (new FileNotFoundException("No file " + absolutePath).initCause(e));
  }
}
 
開發者ID:dremio,項目名稱:dremio-oss,代碼行數:23,代碼來源:PseudoDistributedFileSystem.java

示例9: writeToFileListing

import org.apache.hadoop.fs.FileStatus; //導入依賴的package包/類
private void writeToFileListing(
    SequenceFile.Writer fileListWriter,
    CopyListingFileStatus fileStatus,
    Path sourcePathRoot,
    S3MapReduceCpOptions options)
  throws IOException {
  LOG.debug("REL PATH: {}, FULL PATH: {}", PathUtil.getRelativePath(sourcePathRoot, fileStatus.getPath()),
      fileStatus.getPath());

  FileStatus status = fileStatus;

  if (!shouldCopy(fileStatus.getPath(), options)) {
    return;
  }

  fileListWriter.append(new Text(PathUtil.getRelativePath(sourcePathRoot, fileStatus.getPath())), status);
  fileListWriter.sync();

  if (!fileStatus.isDirectory()) {
    totalBytesToCopy += fileStatus.getLen();
  }
  totalPaths++;
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:24,代碼來源:SimpleCopyListing.java

示例10: testToProtobuFileStatusWithDefault

import org.apache.hadoop.fs.FileStatus; //導入依賴的package包/類
@Test
public void testToProtobuFileStatusWithDefault() throws IOException {
  FileStatus status = new FileStatus();

  DFS.FileStatus result = RemoteNodeFileSystem.toProtoFileStatus(status);
  assertFalse(result.hasPath());
  assertEquals(0, result.getLength());
  assertFalse(result.getIsDirectory());
  assertEquals(0, result.getBlockReplication());
  assertEquals(0, result.getBlockSize());
  assertEquals(0, result.getAccessTime());
  assertEquals(0, result.getModificationTime());
  assertEquals(FsPermission.getFileDefault().toExtendedShort(), result.getPermission());
  assertEquals("", result.getOwner());
  assertEquals("", result.getGroup());
  assertFalse(result.hasSymlink());
}
 
開發者ID:dremio,項目名稱:dremio-oss,代碼行數:18,代碼來源:TestRemoteNodeFileSystem.java

示例11: splitFastq

import org.apache.hadoop.fs.FileStatus; //導入依賴的package包/類
private static void splitFastq(FileStatus fst, String fqPath, String splitDir, int splitlen, JavaSparkContext sc) throws IOException {
  Path fqpath = new Path(fqPath);
  String fqname = fqpath.getName();
  String[] ns = fqname.split("\\.");
  //TODO: Handle also compressed files
  List<FileSplit> nlif = NLineInputFormat.getSplitsForFile(fst, sc.hadoopConfiguration(), splitlen);

  JavaRDD<FileSplit> splitRDD = sc.parallelize(nlif);

  splitRDD.foreach( split ->  {

    FastqRecordReader fqreader = new FastqRecordReader(new Configuration(), split);
    writeFastqFile(fqreader, new Configuration(), splitDir + "/split_" + split.getStart() + "." + ns[1]);

   });
}
 
開發者ID:NGSeq,項目名稱:ViraPipe,代碼行數:17,代碼來源:InterleaveMulti.java

示例12: testUriEncodingMoreComplexCharacters

import org.apache.hadoop.fs.FileStatus; //導入依賴的package包/類
@Test
public void testUriEncodingMoreComplexCharacters() throws Exception {
  // Create a file name with URI reserved characters, plus the percent
  String fileName = "!#$'()*;=[]%";
  String directoryName = "*;=[]%!#$'()";
  fs.create(new Path(directoryName, fileName)).close();
  FileStatus[] listing = fs.listStatus(new Path(directoryName));
  assertEquals(1, listing.length);
  assertEquals(fileName, listing[0].getPath().getName());
  FileStatus status = fs.getFileStatus(new Path(directoryName, fileName));
  assertEquals(fileName, status.getPath().getName());
  InputStream stream = fs.open(new Path(directoryName, fileName));
  assertNotNull(stream);
  stream.close();
  assertTrue(fs.delete(new Path(directoryName, fileName), true));
  assertTrue(fs.delete(new Path(directoryName), true));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:NativeAzureFileSystemBaseTest.java

示例13: addMockStoreFiles

import org.apache.hadoop.fs.FileStatus; //導入依賴的package包/類
private FileStatus[] addMockStoreFiles(int count, MasterServices services, Path storedir)
    throws IOException {
  // get the existing store files
  FileSystem fs = services.getMasterFileSystem().getFileSystem();
  fs.mkdirs(storedir);
  // create the store files in the parent
  for (int i = 0; i < count; i++) {
    Path storeFile = new Path(storedir, "_store" + i);
    FSDataOutputStream dos = fs.create(storeFile, true);
    dos.writeBytes("Some data: " + i);
    dos.close();
  }
  LOG.debug("Adding " + count + " store files to the storedir:" + storedir);
  // make sure the mock store files are there
  FileStatus[] storeFiles = fs.listStatus(storedir);
  assertEquals("Didn't have expected store files", count, storeFiles.length);
  return storeFiles;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:19,代碼來源:TestCatalogJanitor.java

示例14: newMapTask

import org.apache.hadoop.fs.FileStatus; //導入依賴的package包/類
@Override
protected Callable<FileStatus[]> newMapTask(final String address) throws IOException {
  return new Callable<FileStatus[]>() {
    @Override
    public FileStatus[] call() throws Exception {
      // Only directories should be listed with a fork/join task
      final FileSystem fs = getDelegateFileSystem(address);
      FileStatus status = fs.getFileStatus(path);
      if (status.isFile()) {
        throw new FileNotFoundException("Directory not found: " + path);
      }
      FileStatus[] remoteStatuses = fs.listStatus(path);

      FileStatus[] statuses = new FileStatus[remoteStatuses.length];
      for (int i = 0; i < statuses.length; i++) {
        statuses[i] = fixFileStatus(address, remoteStatuses[i]);
      }

      return statuses;
    }
  };
}
 
開發者ID:dremio,項目名稱:dremio-oss,代碼行數:23,代碼來源:PseudoDistributedFileSystem.java

示例15: doBuildListing

import org.apache.hadoop.fs.FileStatus; //導入依賴的package包/類
@Override
public void doBuildListing(Path pathToListFile, DistCpOptions options) throws IOException {
  try (Writer writer = newWriter(pathToListFile)) {

    Path sourceRootPath = getRootPath(getConf());

    for (Path sourcePath : options.getSourcePaths()) {

      FileSystem fileSystem = sourcePath.getFileSystem(getConf());
      FileStatus directory = fileSystem.getFileStatus(sourcePath);

      Map<String, CopyListingFileStatus> children = new FileStatusTreeTraverser(fileSystem)
          .preOrderTraversal(directory)
          .transform(new CopyListingFileStatusFunction(fileSystem, options))
          .uniqueIndex(new RelativePathFunction(sourceRootPath));

      for (Entry<String, CopyListingFileStatus> entry : children.entrySet()) {
        LOG.debug("Adding '{}' with relative path '{}'", entry.getValue().getPath(), entry.getKey());
        writer.append(new Text(entry.getKey()), entry.getValue());
        writer.sync();
      }
    }
  }
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:25,代碼來源:CircusTrainCopyListing.java


注:本文中的org.apache.hadoop.fs.FileStatus類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。