当前位置: 首页>>代码示例>>Java>>正文


Java FileStatus类代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileStatus的典型用法代码示例。如果您正苦于以下问题:Java FileStatus类的具体用法?Java FileStatus怎么用?Java FileStatus使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


FileStatus类属于org.apache.hadoop.fs包,在下文中一共展示了FileStatus类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: fixFileStatus

import org.apache.hadoop.fs.FileStatus; //导入依赖的package包/类
private FileStatus fixFileStatus(String endpoint, FileStatus status) throws IOException {
  final Path remotePath = Path.getPathWithoutSchemeAndAuthority(status.getPath());

  if (status.isDirectory()) {
    return new PDFSFileStatus(makeQualified(remotePath), status);
  }

  String basename = remotePath.getName();
  boolean hidden = isHidden(basename);

  StringBuilder sb = new StringBuilder();
  if (hidden) {
    sb.append(basename.charAt(0));
  }
  sb.append(endpoint).append('@');
  sb.append(hidden ? basename.substring(1) : basename);

  return new PDFSFileStatus(makeQualified(new Path(remotePath.getParent(), sb.toString())), status);
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:20,代码来源:PseudoDistributedFileSystem.java

示例2: call

import org.apache.hadoop.fs.FileStatus; //导入依赖的package包/类
@Override
public Result call() throws Exception {
  Result result = new Result();
  FileSystem fs = path.getFileSystem(conf);
  result.fs = fs;
  FileStatus[] matches = fs.globStatus(path, inputFilter);
  if (matches == null) {
    result.addError(new IOException("Input path does not exist: " + path));
  } else if (matches.length == 0) {
    result.addError(new IOException("Input Pattern " + path
        + " matches 0 files"));
  } else {
    result.matchedFileStatuses = matches;
  }
  return result;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:LocatedFileStatusFetcher.java

示例3: validateMapFileOutputContent

import org.apache.hadoop.fs.FileStatus; //导入依赖的package包/类
private void validateMapFileOutputContent(
    FileSystem fs, Path dir) throws IOException {
  // map output is a directory with index and data files
  Path expectedMapDir = new Path(dir, partFile);
  assert(fs.getFileStatus(expectedMapDir).isDirectory());    
  FileStatus[] files = fs.listStatus(expectedMapDir);
  int fileCount = 0;
  boolean dataFileFound = false; 
  boolean indexFileFound = false; 
  for (FileStatus f : files) {
    if (f.isFile()) {
      ++fileCount;
      if (f.getPath().getName().equals(MapFile.INDEX_FILE_NAME)) {
        indexFileFound = true;
      }
      else if (f.getPath().getName().equals(MapFile.DATA_FILE_NAME)) {
        dataFileFound = true;
      }
    }
  }
  assert(fileCount > 0);
  assert(dataFileFound && indexFileFound);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestFileOutputCommitter.java

示例4: open

import org.apache.hadoop.fs.FileStatus; //导入依赖的package包/类
@Override
public FSDataInputStream open(Path file, int bufferSize) throws IOException {
  FTPClient client = connect();
  Path workDir = new Path(client.printWorkingDirectory());
  Path absolute = makeAbsolute(workDir, file);
  FileStatus fileStat = getFileStatus(client, absolute);
  if (fileStat.isDirectory()) {
    disconnect(client);
    throw new FileNotFoundException("Path " + file + " is a directory.");
  }
  client.allocate(bufferSize);
  Path parent = absolute.getParent();
  // Change to parent directory on the
  // server. Only then can we read the
  // file
  // on the server by opening up an InputStream. As a side effect the working
  // directory on the server is changed to the parent directory of the file.
  // The FTP client connection is closed when close() is called on the
  // FSDataInputStream.
  client.changeWorkingDirectory(parent.toUri().getPath());
  InputStream is = client.retrieveFileStream(file.getName());
  FSDataInputStream fis = new FSDataInputStream(new FTPInputStream(is,
      client, statistics));
  if (!FTPReply.isPositivePreliminary(client.getReplyCode())) {
    // The ftpClient is an inconsistent state. Must close the stream
    // which in turn will logout and disconnect from FTP server
    fis.close();
    throw new IOException("Unable to open file: " + file + ", Aborting");
  }
  return fis;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:FTPFileSystem.java

示例5: getStoreFiles

import org.apache.hadoop.fs.FileStatus; //导入依赖的package包/类
/**
 * Returns all files belonging to the given region directory. Could return an
 * empty list.
 *
 * @param fs  The file system reference.
 * @param regionDir  The region directory to scan.
 * @return The list of files found.
 * @throws IOException When scanning the files fails.
 */
static List<Path> getStoreFiles(FileSystem fs, Path regionDir)
throws IOException {
  List<Path> res = new ArrayList<Path>();
  PathFilter dirFilter = new FSUtils.DirFilter(fs);
  FileStatus[] familyDirs = fs.listStatus(regionDir, dirFilter);
  for(FileStatus dir : familyDirs) {
    FileStatus[] files = fs.listStatus(dir.getPath());
    for (FileStatus file : files) {
      if (!file.isDir()) {
        res.add(file.getPath());
      }
    }
  }
  return res;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:IndexFile.java

示例6: start

import org.apache.hadoop.fs.FileStatus; //导入依赖的package包/类
@Override
public void start(CoprocessorEnvironment env) {
  this.env = (RegionCoprocessorEnvironment)env;
  random = new SecureRandom();
  conf = env.getConfiguration();
  baseStagingDir = SecureBulkLoadUtil.getBaseStagingDir(conf);
  this.userProvider = UserProvider.instantiate(conf);

  try {
    fs = FileSystem.get(conf);
    fs.mkdirs(baseStagingDir, PERM_HIDDEN);
    fs.setPermission(baseStagingDir, PERM_HIDDEN);
    //no sticky bit in hadoop-1.0, making directory nonempty so it never gets erased
    fs.mkdirs(new Path(baseStagingDir,"DONOTERASE"), PERM_HIDDEN);
    FileStatus status = fs.getFileStatus(baseStagingDir);
    if(status == null) {
      throw new IllegalStateException("Failed to create staging directory");
    }
    if(!status.getPermission().equals(PERM_HIDDEN)) {
      throw new IllegalStateException(
          "Directory already exists but permissions aren't set to '-rwx--x--x' ");
    }
  } catch (IOException e) {
    throw new IllegalStateException("Failed to get FileSystem instance",e);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:SecureBulkLoadEndpoint.java

示例7: call

import org.apache.hadoop.fs.FileStatus; //导入依赖的package包/类
@Override
public void call(T ignored) throws IOException {
  Path dataDirPath = new Path(dataDirString + "/*");
  FileSystem fs = FileSystem.get(dataDirPath.toUri(), hadoopConf);
  FileStatus[] inputPathStatuses = fs.globStatus(dataDirPath);
  if (inputPathStatuses != null) {
    long oldestTimeAllowed =
        System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(maxAgeHours, TimeUnit.HOURS);
    Arrays.stream(inputPathStatuses).filter(FileStatus::isDirectory).map(FileStatus::getPath).
        filter(subdir -> {
          Matcher m = dirTimestampPattern.matcher(subdir.getName());
          return m.find() && Long.parseLong(m.group(1)) < oldestTimeAllowed;
        }).forEach(subdir -> {
          log.info("Deleting old data at {}", subdir);
          try {
            fs.delete(subdir, true);
          } catch (IOException e) {
            log.warn("Unable to delete {}; continuing", subdir, e);
          }
        });
  }
}
 
开发者ID:oncewang,项目名称:oryx2,代码行数:23,代码来源:DeleteOldDataFn.java

示例8: getFileStatus

import org.apache.hadoop.fs.FileStatus; //导入依赖的package包/类
@Override
public FileStatus getFileStatus(Path f) throws IOException {
  Path absolutePath = toAbsolutePath(f);
  checkPath(absolutePath);

  // if the path is not a remote file path
  if (!isRemoteFile(absolutePath)) {
    return new GetFileStatusTask(absolutePath).get();
  }

  // Parse top level directory
  try {
    RemotePath remotePath = getRemotePath(absolutePath);

    FileSystem delegate = getDelegateFileSystem(remotePath.address);
    FileStatus status = delegate.getFileStatus(remotePath.path);

    return fixFileStatus(remotePath.address, status);
  } catch (IllegalArgumentException e) {
    throw (FileNotFoundException) (new FileNotFoundException("No file " + absolutePath).initCause(e));
  }
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:23,代码来源:PseudoDistributedFileSystem.java

示例9: writeToFileListing

import org.apache.hadoop.fs.FileStatus; //导入依赖的package包/类
private void writeToFileListing(
    SequenceFile.Writer fileListWriter,
    CopyListingFileStatus fileStatus,
    Path sourcePathRoot,
    S3MapReduceCpOptions options)
  throws IOException {
  LOG.debug("REL PATH: {}, FULL PATH: {}", PathUtil.getRelativePath(sourcePathRoot, fileStatus.getPath()),
      fileStatus.getPath());

  FileStatus status = fileStatus;

  if (!shouldCopy(fileStatus.getPath(), options)) {
    return;
  }

  fileListWriter.append(new Text(PathUtil.getRelativePath(sourcePathRoot, fileStatus.getPath())), status);
  fileListWriter.sync();

  if (!fileStatus.isDirectory()) {
    totalBytesToCopy += fileStatus.getLen();
  }
  totalPaths++;
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:24,代码来源:SimpleCopyListing.java

示例10: testToProtobuFileStatusWithDefault

import org.apache.hadoop.fs.FileStatus; //导入依赖的package包/类
@Test
public void testToProtobuFileStatusWithDefault() throws IOException {
  FileStatus status = new FileStatus();

  DFS.FileStatus result = RemoteNodeFileSystem.toProtoFileStatus(status);
  assertFalse(result.hasPath());
  assertEquals(0, result.getLength());
  assertFalse(result.getIsDirectory());
  assertEquals(0, result.getBlockReplication());
  assertEquals(0, result.getBlockSize());
  assertEquals(0, result.getAccessTime());
  assertEquals(0, result.getModificationTime());
  assertEquals(FsPermission.getFileDefault().toExtendedShort(), result.getPermission());
  assertEquals("", result.getOwner());
  assertEquals("", result.getGroup());
  assertFalse(result.hasSymlink());
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:18,代码来源:TestRemoteNodeFileSystem.java

示例11: splitFastq

import org.apache.hadoop.fs.FileStatus; //导入依赖的package包/类
private static void splitFastq(FileStatus fst, String fqPath, String splitDir, int splitlen, JavaSparkContext sc) throws IOException {
  Path fqpath = new Path(fqPath);
  String fqname = fqpath.getName();
  String[] ns = fqname.split("\\.");
  //TODO: Handle also compressed files
  List<FileSplit> nlif = NLineInputFormat.getSplitsForFile(fst, sc.hadoopConfiguration(), splitlen);

  JavaRDD<FileSplit> splitRDD = sc.parallelize(nlif);

  splitRDD.foreach( split ->  {

    FastqRecordReader fqreader = new FastqRecordReader(new Configuration(), split);
    writeFastqFile(fqreader, new Configuration(), splitDir + "/split_" + split.getStart() + "." + ns[1]);

   });
}
 
开发者ID:NGSeq,项目名称:ViraPipe,代码行数:17,代码来源:InterleaveMulti.java

示例12: testUriEncodingMoreComplexCharacters

import org.apache.hadoop.fs.FileStatus; //导入依赖的package包/类
@Test
public void testUriEncodingMoreComplexCharacters() throws Exception {
  // Create a file name with URI reserved characters, plus the percent
  String fileName = "!#$'()*;=[]%";
  String directoryName = "*;=[]%!#$'()";
  fs.create(new Path(directoryName, fileName)).close();
  FileStatus[] listing = fs.listStatus(new Path(directoryName));
  assertEquals(1, listing.length);
  assertEquals(fileName, listing[0].getPath().getName());
  FileStatus status = fs.getFileStatus(new Path(directoryName, fileName));
  assertEquals(fileName, status.getPath().getName());
  InputStream stream = fs.open(new Path(directoryName, fileName));
  assertNotNull(stream);
  stream.close();
  assertTrue(fs.delete(new Path(directoryName, fileName), true));
  assertTrue(fs.delete(new Path(directoryName), true));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:NativeAzureFileSystemBaseTest.java

示例13: addMockStoreFiles

import org.apache.hadoop.fs.FileStatus; //导入依赖的package包/类
private FileStatus[] addMockStoreFiles(int count, MasterServices services, Path storedir)
    throws IOException {
  // get the existing store files
  FileSystem fs = services.getMasterFileSystem().getFileSystem();
  fs.mkdirs(storedir);
  // create the store files in the parent
  for (int i = 0; i < count; i++) {
    Path storeFile = new Path(storedir, "_store" + i);
    FSDataOutputStream dos = fs.create(storeFile, true);
    dos.writeBytes("Some data: " + i);
    dos.close();
  }
  LOG.debug("Adding " + count + " store files to the storedir:" + storedir);
  // make sure the mock store files are there
  FileStatus[] storeFiles = fs.listStatus(storedir);
  assertEquals("Didn't have expected store files", count, storeFiles.length);
  return storeFiles;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestCatalogJanitor.java

示例14: newMapTask

import org.apache.hadoop.fs.FileStatus; //导入依赖的package包/类
@Override
protected Callable<FileStatus[]> newMapTask(final String address) throws IOException {
  return new Callable<FileStatus[]>() {
    @Override
    public FileStatus[] call() throws Exception {
      // Only directories should be listed with a fork/join task
      final FileSystem fs = getDelegateFileSystem(address);
      FileStatus status = fs.getFileStatus(path);
      if (status.isFile()) {
        throw new FileNotFoundException("Directory not found: " + path);
      }
      FileStatus[] remoteStatuses = fs.listStatus(path);

      FileStatus[] statuses = new FileStatus[remoteStatuses.length];
      for (int i = 0; i < statuses.length; i++) {
        statuses[i] = fixFileStatus(address, remoteStatuses[i]);
      }

      return statuses;
    }
  };
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:23,代码来源:PseudoDistributedFileSystem.java

示例15: doBuildListing

import org.apache.hadoop.fs.FileStatus; //导入依赖的package包/类
@Override
public void doBuildListing(Path pathToListFile, DistCpOptions options) throws IOException {
  try (Writer writer = newWriter(pathToListFile)) {

    Path sourceRootPath = getRootPath(getConf());

    for (Path sourcePath : options.getSourcePaths()) {

      FileSystem fileSystem = sourcePath.getFileSystem(getConf());
      FileStatus directory = fileSystem.getFileStatus(sourcePath);

      Map<String, CopyListingFileStatus> children = new FileStatusTreeTraverser(fileSystem)
          .preOrderTraversal(directory)
          .transform(new CopyListingFileStatusFunction(fileSystem, options))
          .uniqueIndex(new RelativePathFunction(sourceRootPath));

      for (Entry<String, CopyListingFileStatus> entry : children.entrySet()) {
        LOG.debug("Adding '{}' with relative path '{}'", entry.getValue().getPath(), entry.getKey());
        writer.append(new Text(entry.getKey()), entry.getValue());
        writer.sync();
      }
    }
  }
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:25,代码来源:CircusTrainCopyListing.java


注:本文中的org.apache.hadoop.fs.FileStatus类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。