当前位置: 首页>>代码示例>>Java>>正文


Java FileStatus.getPath方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileStatus.getPath方法的典型用法代码示例。如果您正苦于以下问题:Java FileStatus.getPath方法的具体用法?Java FileStatus.getPath怎么用?Java FileStatus.getPath使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileStatus的用法示例。


在下文中一共展示了FileStatus.getPath方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: isFileDeletable

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@Override
public boolean isFileDeletable(FileStatus fStat) {
  try {
    // if its a directory, then it can be deleted
    if (fStat.isDirectory()) return true;
    
    Path file = fStat.getPath();
    // check to see if
    FileStatus[] deleteStatus = FSUtils.listStatus(this.fs, file, null);
    // if the file doesn't exist, then it can be deleted (but should never
    // happen since deleted files shouldn't get passed in)
    if (deleteStatus == null) return true;

    // otherwise, we need to check the file's table and see its being archived
    Path family = file.getParent();
    Path region = family.getParent();
    Path table = region.getParent();

    String tableName = table.getName();
    boolean ret = !archiveTracker.keepHFiles(tableName);
    LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + tableName);
    return ret;
  } catch (IOException e) {
    LOG.error("Failed to lookup status of:" + fStat.getPath() + ", keeping it just incase.", e);
    return false;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:LongTermArchivingHFileCleaner.java

示例2: verify

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
private void verify(Set<Path> expectedFiles, Struct[] records, Schema schema) throws IOException {
  Path path = new Path(FileUtils.topicDirectory(url, topicsDir, TOPIC));
  FileStatus[] statuses = FileUtils.traverse(storage, path, new CommittedFileFilter());
  assertEquals(expectedFiles.size(), statuses.length);
  int index = 0;
  for (FileStatus status : statuses) {
    Path filePath = status.getPath();
    assertTrue(expectedFiles.contains(status.getPath()));
    Collection<Object> avroRecords = schemaFileReader.readData(conf, filePath);
    assertEquals(3, avroRecords.size());
    for (Object avroRecord: avroRecords) {
      assertEquals(avroData.fromConnectData(schema, records[index]), avroRecord);
    }
    index++;
  }
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:17,代码来源:TopicPartitionWriterTest.java

示例3: checkRegionDir

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
 * Check all column families in a region dir.
 *
 * @param regionDir
 *          region directory
 * @throws IOException
 */
protected void checkRegionDir(Path regionDir) throws IOException {
  FileStatus[] cfs = null;
  try {
    cfs = fs.listStatus(regionDir, new FamilyDirFilter(fs));
  } catch (FileNotFoundException fnfe) {
    // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist.
    LOG.warn("Region Directory " + regionDir +
        " does not exist.  Likely due to concurrent split/compaction. Skipping.");
    missing.add(regionDir);
    return;
  }

  // Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
  if (cfs.length == 0 && !fs.exists(regionDir)) {
    LOG.warn("Region Directory " + regionDir +
        " does not exist.  Likely due to concurrent split/compaction. Skipping.");
    missing.add(regionDir);
    return;
  }

  for (FileStatus cfFs : cfs) {
    Path cfDir = cfFs.getPath();
    checkColFamDir(cfDir);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:HFileCorruptionChecker.java

示例4: run

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@Override
public void run() {
  long cutoffMillis = System.currentTimeMillis() - retentionMillis;
  LOG.info("aggregated log deletion started.");
  try {
    FileSystem fs = remoteRootLogDir.getFileSystem(conf);
    for(FileStatus userDir : fs.listStatus(remoteRootLogDir)) {
      if(userDir.isDirectory()) {
        Path userDirPath = new Path(userDir.getPath(), suffix);
        deleteOldLogDirsFrom(userDirPath, cutoffMillis, fs, rmClient);
      }
    }
  } catch (IOException e) {
    logIOException("Error reading root log dir this deletion " +
    		"attempt is being aborted", e);
  }
  LOG.info("aggregated log deletion finished.");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:AggregatedLogDeletionService.java

示例5: listStatus

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@Override
protected List<FileStatus> listStatus(JobContext job
                                      )throws IOException {

  List<FileStatus> files = super.listStatus(job);
  int len = files.size();
  for(int i=0; i < len; ++i) {
    FileStatus file = files.get(i);
    if (file.isDirectory()) {     // it's a MapFile
      Path p = file.getPath();
      FileSystem fs = p.getFileSystem(job.getConfiguration());
      // use the data file
      files.set(i, fs.getFileStatus(new Path(p, MapFile.DATA_FILE_NAME)));
    }
  }
  return files;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:SequenceFileInputFormat.java

示例6: recordStartsWith

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
 * Return true if there's a file in 'dirName' with a line that starts with
 * 'prefix'.
 */
protected boolean recordStartsWith(List<Integer> record, String dirName,
    SqoopOptions.FileLayout fileLayout)
    throws Exception {
  Path warehousePath = new Path(LOCAL_WAREHOUSE_DIR);
  Path targetPath = new Path(warehousePath, dirName);

  FileSystem fs = FileSystem.getLocal(new Configuration());
  FileStatus [] files = fs.listStatus(targetPath);

  if (null == files || files.length == 0) {
    fail("Got no import files!");
  }

  for (FileStatus stat : files) {
    Path p = stat.getPath();
    if (p.getName().startsWith("part-")) {
      if (checkFileForLine(fs, p, fileLayout, record)) {
        // We found the line. Nothing further to do.
        return true;
      }
    }
  }

  return false;
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:30,代码来源:TestMerge.java

示例7: isDirReadable

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
boolean isDirReadable(DrillFileSystem fs, FileStatus dir) {
  Path p = new Path(dir.getPath(), ParquetFileWriter.PARQUET_METADATA_FILE);
  try {
    if (fs.exists(p)) {
      return true;
    } else {

      if (metaDataFileExists(fs, dir)) {
        return true;
      }
      PathFilter filter = new DrillPathFilter();

      FileStatus[] files = fs.listStatus(dir.getPath(), filter);
      if (files.length == 0) {
        return false;
      }
      return super.isFileReadable(fs, files[0]);
    }
  } catch (IOException e) {
    logger.info("Failure while attempting to check for Parquet metadata file.", e);
    return false;
  }
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:24,代码来源:ParquetFormatPlugin.java

示例8: scanDirectory

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@VisibleForTesting
protected static List<FileStatus> scanDirectory(Path path, FileContext fc,
    PathFilter pathFilter) throws IOException {
  path = fc.makeQualified(path);
  List<FileStatus> jhStatusList = new ArrayList<FileStatus>();
  try {
    RemoteIterator<FileStatus> fileStatusIter = fc.listStatus(path);
    while (fileStatusIter.hasNext()) {
      FileStatus fileStatus = fileStatusIter.next();
      Path filePath = fileStatus.getPath();
      if (fileStatus.isFile() && pathFilter.accept(filePath)) {
        jhStatusList.add(fileStatus);
      }
    }
  } catch (FileNotFoundException fe) {
    LOG.error("Error while scanning directory " + path, fe);
  }
  return jhStatusList;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:HistoryFileManager.java

示例9: scanPathHelper

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
private static void scanPathHelper(Path path, FileSystem scanFs)
  throws IOException, InterruptedException, SQLException {
  String curPath = path.toUri().getPath();
  Path n = path;
  if (path.getName().matches("^(\\.|_|tmp|temp|test|trash|backup|archive|ARCHIVE|storkinternal).*"))
      return;

  logger.info("  -- scanPath(" + curPath + ")\n");
  int x = isTable(path, scanFs);
  if (x > 0) {
    // System.err.println("  traceTable(" + path.toString() + ")");
    traceTableInfo(path, scanFs);
  } else if (x == 0) { // iterate over each table
    // FileStatus[] fslist = scanFs.listStatus(path);
    // System.err.println(" => " + fslist.length + " subdirs");
    for (FileStatus fstat : scanFs.listStatus(path)) {
      n = fstat.getPath();
      curPath = n.toUri().getPath();
      // System.err.println("  traceSubDir(" + curPath + ")");
      if (n == path) {
        continue;
      }
      try {
        if (isTable(n, scanFs) > 0) {
          traceTableInfo(n, scanFs);
        } else if (scanFs.listStatus(n).length > 0 || scanFs.getContentSummary(n).getLength() > 0) {
          scanPath(n, scanFs);
        } else {
          logger.info("* scanPath() size = 0: " + curPath);
        }
      } catch (AccessControlException e) {
        logger.error("* scanPath(e) Permission denied. Cannot access: " + curPath +
            " owner:" + fstat.getOwner() + " group: " + fstat.getGroup() + "with current user " +
            UserGroupInformation.getCurrentUser());
        // System.err.println(e);
        continue;
      } // catch
    } // end of for
  } // end else
}
 
开发者ID:thomas-young-2013,项目名称:wherehowsX,代码行数:41,代码来源:SchemaFetch.java

示例10: TextRecordInputStream

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
public TextRecordInputStream(FileStatus f) throws IOException {
  final Path fpath = f.getPath();
  final Configuration lconf = getConf();
  r = new SequenceFile.Reader(lconf, 
      SequenceFile.Reader.file(fpath));
  key = ReflectionUtils.newInstance(
      r.getKeyClass().asSubclass(Writable.class), lconf);
  val = ReflectionUtils.newInstance(
      r.getValueClass().asSubclass(Writable.class), lconf);
  inbuf = new DataInputBuffer();
  outbuf = new DataOutputBuffer();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:13,代码来源:Display.java

示例11: describeUpload

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
private S3UploadDescriptor describeUpload(FileStatus sourceFileStatus, Path targetPath) throws IOException {
  URI targetUri = targetPath.toUri();
  String bucketName = PathUtil.toBucketName(targetUri);
  String key = PathUtil.toBucketKey(targetUri);

  Path sourcePath = sourceFileStatus.getPath();

  ObjectMetadata metadata = new ObjectMetadata();
  metadata.setContentLength(sourceFileStatus.getLen());
  if (conf.getBoolean(ConfigurationVariable.S3_SERVER_SIDE_ENCRYPTION)) {
    metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
  }
  return new S3UploadDescriptor(sourcePath, bucketName, key, metadata);
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:15,代码来源:CopyMapper.java

示例12: getReferenceFilePaths

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
public static List<Path> getReferenceFilePaths(final FileSystem fs, final Path familyDir) throws IOException {
  FileStatus[] fds = fs.listStatus(familyDir, new ReferenceFileFilter(fs));
  List<Path> referenceFiles = new ArrayList<Path>(fds.length);
  for (FileStatus fdfs: fds) {
    Path fdPath = fdfs.getPath();
    referenceFiles.add(fdPath);
  }
  return referenceFiles;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:FSUtils.java

示例13: checkMagicBytes

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
private static void checkMagicBytes(FileStatus status, byte[] data, int offset) throws IOException {
  for(int i =0, v = offset; i < MAGIC_LENGTH; i++, v++){
    if(ParquetFileWriter.MAGIC[i] != data[v]){
      byte[] magic = ArrayUtils.subarray(data, offset, offset + MAGIC_LENGTH);
      throw new IOException(status.getPath() + " is not a Parquet file. expected magic number at tail " + Arrays.toString(ParquetFileWriter.MAGIC) + " but found " + Arrays.toString(magic));
    }
  }
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:9,代码来源:SingletonParquetFooterCache.java

示例14: listStatus

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@Override
protected FileStatus[] listStatus(JobConf job) throws IOException {
  FileStatus[] files = super.listStatus(job);
  for (int i = 0; i < files.length; i++) {
    FileStatus file = files[i];
    if (file.isDirectory()) {     // it's a MapFile
      Path dataFile = new Path(file.getPath(), MapFile.DATA_FILE_NAME);
      FileSystem fs = file.getPath().getFileSystem(job);
      // use the data file
      files[i] = fs.getFileStatus(dataFile);
    }
  }
  return files;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:SequenceFileInputFormat.java

示例15: readFooter

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
 * An updated footer reader that tries to read the entire footer without knowing the length.
 * This should reduce the amount of seek/read roundtrips in most workloads.
 * @param fs
 * @param status
 * @return
 * @throws IOException
 */
public static Footer readFooter(final Configuration config, final FileStatus status) throws IOException {
  final FileSystem fs = status.getPath().getFileSystem(config);
  try(FSDataInputStream file = fs.open(status.getPath())) {

    final long fileLength = status.getLen();
    Preconditions.checkArgument(fileLength >= MIN_FILE_SIZE, "%s is not a Parquet file (too small)", status.getPath());

    int len = (int) Math.min( fileLength, (long) DEFAULT_READ_SIZE);
    byte[] footerBytes = new byte[len];
    readFully(file, fileLength - len, footerBytes, 0, len);

    checkMagicBytes(status, footerBytes, footerBytes.length - ParquetFileWriter.MAGIC.length);
    final int size = BytesUtils.readIntLittleEndian(footerBytes, footerBytes.length - FOOTER_METADATA_SIZE);

    if(size > footerBytes.length - FOOTER_METADATA_SIZE){
      // if the footer is larger than our initial read, we need to read the rest.
      byte[] origFooterBytes = footerBytes;
      int origFooterRead = origFooterBytes.length - FOOTER_METADATA_SIZE;

      footerBytes = new byte[size];

      readFully(file, fileLength - size - FOOTER_METADATA_SIZE, footerBytes, 0, size - origFooterRead);
      System.arraycopy(origFooterBytes, 0, footerBytes, size - origFooterRead, origFooterRead);
    }else{
      int start = footerBytes.length - (size + FOOTER_METADATA_SIZE);
      footerBytes = ArrayUtils.subarray(footerBytes, start, start + size);
    }

    ParquetMetadata metadata = ParquetFormatPlugin.parquetMetadataConverter.readParquetMetadata(new ByteArrayInputStream(footerBytes));
    Footer footer = new Footer(status.getPath(), metadata);
    return footer;
  }
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:42,代码来源:FooterGatherer.java


注:本文中的org.apache.hadoop.fs.FileStatus.getPath方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。