當前位置: 首頁>>代碼示例>>Java>>正文


Java FileStatus.getPath方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.FileStatus.getPath方法的典型用法代碼示例。如果您正苦於以下問題:Java FileStatus.getPath方法的具體用法?Java FileStatus.getPath怎麽用?Java FileStatus.getPath使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.FileStatus的用法示例。


在下文中一共展示了FileStatus.getPath方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: isFileDeletable

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public boolean isFileDeletable(FileStatus fStat) {
  try {
    // if its a directory, then it can be deleted
    if (fStat.isDirectory()) return true;
    
    Path file = fStat.getPath();
    // check to see if
    FileStatus[] deleteStatus = FSUtils.listStatus(this.fs, file, null);
    // if the file doesn't exist, then it can be deleted (but should never
    // happen since deleted files shouldn't get passed in)
    if (deleteStatus == null) return true;

    // otherwise, we need to check the file's table and see its being archived
    Path family = file.getParent();
    Path region = family.getParent();
    Path table = region.getParent();

    String tableName = table.getName();
    boolean ret = !archiveTracker.keepHFiles(tableName);
    LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + tableName);
    return ret;
  } catch (IOException e) {
    LOG.error("Failed to lookup status of:" + fStat.getPath() + ", keeping it just incase.", e);
    return false;
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:28,代碼來源:LongTermArchivingHFileCleaner.java

示例2: verify

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private void verify(Set<Path> expectedFiles, Struct[] records, Schema schema) throws IOException {
  Path path = new Path(FileUtils.topicDirectory(url, topicsDir, TOPIC));
  FileStatus[] statuses = FileUtils.traverse(storage, path, new CommittedFileFilter());
  assertEquals(expectedFiles.size(), statuses.length);
  int index = 0;
  for (FileStatus status : statuses) {
    Path filePath = status.getPath();
    assertTrue(expectedFiles.contains(status.getPath()));
    Collection<Object> avroRecords = schemaFileReader.readData(conf, filePath);
    assertEquals(3, avroRecords.size());
    for (Object avroRecord: avroRecords) {
      assertEquals(avroData.fromConnectData(schema, records[index]), avroRecord);
    }
    index++;
  }
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:17,代碼來源:TopicPartitionWriterTest.java

示例3: checkRegionDir

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * Check all column families in a region dir.
 *
 * @param regionDir
 *          region directory
 * @throws IOException
 */
protected void checkRegionDir(Path regionDir) throws IOException {
  FileStatus[] cfs = null;
  try {
    cfs = fs.listStatus(regionDir, new FamilyDirFilter(fs));
  } catch (FileNotFoundException fnfe) {
    // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist.
    LOG.warn("Region Directory " + regionDir +
        " does not exist.  Likely due to concurrent split/compaction. Skipping.");
    missing.add(regionDir);
    return;
  }

  // Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
  if (cfs.length == 0 && !fs.exists(regionDir)) {
    LOG.warn("Region Directory " + regionDir +
        " does not exist.  Likely due to concurrent split/compaction. Skipping.");
    missing.add(regionDir);
    return;
  }

  for (FileStatus cfFs : cfs) {
    Path cfDir = cfFs.getPath();
    checkColFamDir(cfDir);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:33,代碼來源:HFileCorruptionChecker.java

示例4: run

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public void run() {
  long cutoffMillis = System.currentTimeMillis() - retentionMillis;
  LOG.info("aggregated log deletion started.");
  try {
    FileSystem fs = remoteRootLogDir.getFileSystem(conf);
    for(FileStatus userDir : fs.listStatus(remoteRootLogDir)) {
      if(userDir.isDirectory()) {
        Path userDirPath = new Path(userDir.getPath(), suffix);
        deleteOldLogDirsFrom(userDirPath, cutoffMillis, fs, rmClient);
      }
    }
  } catch (IOException e) {
    logIOException("Error reading root log dir this deletion " +
    		"attempt is being aborted", e);
  }
  LOG.info("aggregated log deletion finished.");
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:19,代碼來源:AggregatedLogDeletionService.java

示例5: listStatus

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
protected List<FileStatus> listStatus(JobContext job
                                      )throws IOException {

  List<FileStatus> files = super.listStatus(job);
  int len = files.size();
  for(int i=0; i < len; ++i) {
    FileStatus file = files.get(i);
    if (file.isDirectory()) {     // it's a MapFile
      Path p = file.getPath();
      FileSystem fs = p.getFileSystem(job.getConfiguration());
      // use the data file
      files.set(i, fs.getFileStatus(new Path(p, MapFile.DATA_FILE_NAME)));
    }
  }
  return files;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:SequenceFileInputFormat.java

示例6: recordStartsWith

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * Return true if there's a file in 'dirName' with a line that starts with
 * 'prefix'.
 */
protected boolean recordStartsWith(List<Integer> record, String dirName,
    SqoopOptions.FileLayout fileLayout)
    throws Exception {
  Path warehousePath = new Path(LOCAL_WAREHOUSE_DIR);
  Path targetPath = new Path(warehousePath, dirName);

  FileSystem fs = FileSystem.getLocal(new Configuration());
  FileStatus [] files = fs.listStatus(targetPath);

  if (null == files || files.length == 0) {
    fail("Got no import files!");
  }

  for (FileStatus stat : files) {
    Path p = stat.getPath();
    if (p.getName().startsWith("part-")) {
      if (checkFileForLine(fs, p, fileLayout, record)) {
        // We found the line. Nothing further to do.
        return true;
      }
    }
  }

  return false;
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:30,代碼來源:TestMerge.java

示例7: isDirReadable

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
boolean isDirReadable(DrillFileSystem fs, FileStatus dir) {
  Path p = new Path(dir.getPath(), ParquetFileWriter.PARQUET_METADATA_FILE);
  try {
    if (fs.exists(p)) {
      return true;
    } else {

      if (metaDataFileExists(fs, dir)) {
        return true;
      }
      PathFilter filter = new DrillPathFilter();

      FileStatus[] files = fs.listStatus(dir.getPath(), filter);
      if (files.length == 0) {
        return false;
      }
      return super.isFileReadable(fs, files[0]);
    }
  } catch (IOException e) {
    logger.info("Failure while attempting to check for Parquet metadata file.", e);
    return false;
  }
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:24,代碼來源:ParquetFormatPlugin.java

示例8: scanDirectory

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@VisibleForTesting
protected static List<FileStatus> scanDirectory(Path path, FileContext fc,
    PathFilter pathFilter) throws IOException {
  path = fc.makeQualified(path);
  List<FileStatus> jhStatusList = new ArrayList<FileStatus>();
  try {
    RemoteIterator<FileStatus> fileStatusIter = fc.listStatus(path);
    while (fileStatusIter.hasNext()) {
      FileStatus fileStatus = fileStatusIter.next();
      Path filePath = fileStatus.getPath();
      if (fileStatus.isFile() && pathFilter.accept(filePath)) {
        jhStatusList.add(fileStatus);
      }
    }
  } catch (FileNotFoundException fe) {
    LOG.error("Error while scanning directory " + path, fe);
  }
  return jhStatusList;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:20,代碼來源:HistoryFileManager.java

示例9: scanPathHelper

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private static void scanPathHelper(Path path, FileSystem scanFs)
  throws IOException, InterruptedException, SQLException {
  String curPath = path.toUri().getPath();
  Path n = path;
  if (path.getName().matches("^(\\.|_|tmp|temp|test|trash|backup|archive|ARCHIVE|storkinternal).*"))
      return;

  logger.info("  -- scanPath(" + curPath + ")\n");
  int x = isTable(path, scanFs);
  if (x > 0) {
    // System.err.println("  traceTable(" + path.toString() + ")");
    traceTableInfo(path, scanFs);
  } else if (x == 0) { // iterate over each table
    // FileStatus[] fslist = scanFs.listStatus(path);
    // System.err.println(" => " + fslist.length + " subdirs");
    for (FileStatus fstat : scanFs.listStatus(path)) {
      n = fstat.getPath();
      curPath = n.toUri().getPath();
      // System.err.println("  traceSubDir(" + curPath + ")");
      if (n == path) {
        continue;
      }
      try {
        if (isTable(n, scanFs) > 0) {
          traceTableInfo(n, scanFs);
        } else if (scanFs.listStatus(n).length > 0 || scanFs.getContentSummary(n).getLength() > 0) {
          scanPath(n, scanFs);
        } else {
          logger.info("* scanPath() size = 0: " + curPath);
        }
      } catch (AccessControlException e) {
        logger.error("* scanPath(e) Permission denied. Cannot access: " + curPath +
            " owner:" + fstat.getOwner() + " group: " + fstat.getGroup() + "with current user " +
            UserGroupInformation.getCurrentUser());
        // System.err.println(e);
        continue;
      } // catch
    } // end of for
  } // end else
}
 
開發者ID:thomas-young-2013,項目名稱:wherehowsX,代碼行數:41,代碼來源:SchemaFetch.java

示例10: TextRecordInputStream

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
public TextRecordInputStream(FileStatus f) throws IOException {
  final Path fpath = f.getPath();
  final Configuration lconf = getConf();
  r = new SequenceFile.Reader(lconf, 
      SequenceFile.Reader.file(fpath));
  key = ReflectionUtils.newInstance(
      r.getKeyClass().asSubclass(Writable.class), lconf);
  val = ReflectionUtils.newInstance(
      r.getValueClass().asSubclass(Writable.class), lconf);
  inbuf = new DataInputBuffer();
  outbuf = new DataOutputBuffer();
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:13,代碼來源:Display.java

示例11: describeUpload

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private S3UploadDescriptor describeUpload(FileStatus sourceFileStatus, Path targetPath) throws IOException {
  URI targetUri = targetPath.toUri();
  String bucketName = PathUtil.toBucketName(targetUri);
  String key = PathUtil.toBucketKey(targetUri);

  Path sourcePath = sourceFileStatus.getPath();

  ObjectMetadata metadata = new ObjectMetadata();
  metadata.setContentLength(sourceFileStatus.getLen());
  if (conf.getBoolean(ConfigurationVariable.S3_SERVER_SIDE_ENCRYPTION)) {
    metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
  }
  return new S3UploadDescriptor(sourcePath, bucketName, key, metadata);
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:15,代碼來源:CopyMapper.java

示例12: getReferenceFilePaths

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
public static List<Path> getReferenceFilePaths(final FileSystem fs, final Path familyDir) throws IOException {
  FileStatus[] fds = fs.listStatus(familyDir, new ReferenceFileFilter(fs));
  List<Path> referenceFiles = new ArrayList<Path>(fds.length);
  for (FileStatus fdfs: fds) {
    Path fdPath = fdfs.getPath();
    referenceFiles.add(fdPath);
  }
  return referenceFiles;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:10,代碼來源:FSUtils.java

示例13: checkMagicBytes

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private static void checkMagicBytes(FileStatus status, byte[] data, int offset) throws IOException {
  for(int i =0, v = offset; i < MAGIC_LENGTH; i++, v++){
    if(ParquetFileWriter.MAGIC[i] != data[v]){
      byte[] magic = ArrayUtils.subarray(data, offset, offset + MAGIC_LENGTH);
      throw new IOException(status.getPath() + " is not a Parquet file. expected magic number at tail " + Arrays.toString(ParquetFileWriter.MAGIC) + " but found " + Arrays.toString(magic));
    }
  }
}
 
開發者ID:dremio,項目名稱:dremio-oss,代碼行數:9,代碼來源:SingletonParquetFooterCache.java

示例14: listStatus

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
protected FileStatus[] listStatus(JobConf job) throws IOException {
  FileStatus[] files = super.listStatus(job);
  for (int i = 0; i < files.length; i++) {
    FileStatus file = files[i];
    if (file.isDirectory()) {     // it's a MapFile
      Path dataFile = new Path(file.getPath(), MapFile.DATA_FILE_NAME);
      FileSystem fs = file.getPath().getFileSystem(job);
      // use the data file
      files[i] = fs.getFileStatus(dataFile);
    }
  }
  return files;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:15,代碼來源:SequenceFileInputFormat.java

示例15: readFooter

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * An updated footer reader that tries to read the entire footer without knowing the length.
 * This should reduce the amount of seek/read roundtrips in most workloads.
 * @param fs
 * @param status
 * @return
 * @throws IOException
 */
public static Footer readFooter(final Configuration config, final FileStatus status) throws IOException {
  final FileSystem fs = status.getPath().getFileSystem(config);
  try(FSDataInputStream file = fs.open(status.getPath())) {

    final long fileLength = status.getLen();
    Preconditions.checkArgument(fileLength >= MIN_FILE_SIZE, "%s is not a Parquet file (too small)", status.getPath());

    int len = (int) Math.min( fileLength, (long) DEFAULT_READ_SIZE);
    byte[] footerBytes = new byte[len];
    readFully(file, fileLength - len, footerBytes, 0, len);

    checkMagicBytes(status, footerBytes, footerBytes.length - ParquetFileWriter.MAGIC.length);
    final int size = BytesUtils.readIntLittleEndian(footerBytes, footerBytes.length - FOOTER_METADATA_SIZE);

    if(size > footerBytes.length - FOOTER_METADATA_SIZE){
      // if the footer is larger than our initial read, we need to read the rest.
      byte[] origFooterBytes = footerBytes;
      int origFooterRead = origFooterBytes.length - FOOTER_METADATA_SIZE;

      footerBytes = new byte[size];

      readFully(file, fileLength - size - FOOTER_METADATA_SIZE, footerBytes, 0, size - origFooterRead);
      System.arraycopy(origFooterBytes, 0, footerBytes, size - origFooterRead, origFooterRead);
    }else{
      int start = footerBytes.length - (size + FOOTER_METADATA_SIZE);
      footerBytes = ArrayUtils.subarray(footerBytes, start, start + size);
    }

    ParquetMetadata metadata = ParquetFormatPlugin.parquetMetadataConverter.readParquetMetadata(new ByteArrayInputStream(footerBytes));
    Footer footer = new Footer(status.getPath(), metadata);
    return footer;
  }
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:42,代碼來源:FooterGatherer.java


注:本文中的org.apache.hadoop.fs.FileStatus.getPath方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。