本文整理匯總了Java中org.apache.hadoop.fs.FileStatus.getPath方法的典型用法代碼示例。如果您正苦於以下問題:Java FileStatus.getPath方法的具體用法?Java FileStatus.getPath怎麽用?Java FileStatus.getPath使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.fs.FileStatus
的用法示例。
在下文中一共展示了FileStatus.getPath方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: isFileDeletable
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public boolean isFileDeletable(FileStatus fStat) {
try {
// if its a directory, then it can be deleted
if (fStat.isDirectory()) return true;
Path file = fStat.getPath();
// check to see if
FileStatus[] deleteStatus = FSUtils.listStatus(this.fs, file, null);
// if the file doesn't exist, then it can be deleted (but should never
// happen since deleted files shouldn't get passed in)
if (deleteStatus == null) return true;
// otherwise, we need to check the file's table and see its being archived
Path family = file.getParent();
Path region = family.getParent();
Path table = region.getParent();
String tableName = table.getName();
boolean ret = !archiveTracker.keepHFiles(tableName);
LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + tableName);
return ret;
} catch (IOException e) {
LOG.error("Failed to lookup status of:" + fStat.getPath() + ", keeping it just incase.", e);
return false;
}
}
示例2: verify
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private void verify(Set<Path> expectedFiles, Struct[] records, Schema schema) throws IOException {
Path path = new Path(FileUtils.topicDirectory(url, topicsDir, TOPIC));
FileStatus[] statuses = FileUtils.traverse(storage, path, new CommittedFileFilter());
assertEquals(expectedFiles.size(), statuses.length);
int index = 0;
for (FileStatus status : statuses) {
Path filePath = status.getPath();
assertTrue(expectedFiles.contains(status.getPath()));
Collection<Object> avroRecords = schemaFileReader.readData(conf, filePath);
assertEquals(3, avroRecords.size());
for (Object avroRecord: avroRecords) {
assertEquals(avroData.fromConnectData(schema, records[index]), avroRecord);
}
index++;
}
}
示例3: checkRegionDir
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
* Check all column families in a region dir.
*
* @param regionDir
* region directory
* @throws IOException
*/
protected void checkRegionDir(Path regionDir) throws IOException {
FileStatus[] cfs = null;
try {
cfs = fs.listStatus(regionDir, new FamilyDirFilter(fs));
} catch (FileNotFoundException fnfe) {
// Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist.
LOG.warn("Region Directory " + regionDir +
" does not exist. Likely due to concurrent split/compaction. Skipping.");
missing.add(regionDir);
return;
}
// Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
if (cfs.length == 0 && !fs.exists(regionDir)) {
LOG.warn("Region Directory " + regionDir +
" does not exist. Likely due to concurrent split/compaction. Skipping.");
missing.add(regionDir);
return;
}
for (FileStatus cfFs : cfs) {
Path cfDir = cfFs.getPath();
checkColFamDir(cfDir);
}
}
示例4: run
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public void run() {
long cutoffMillis = System.currentTimeMillis() - retentionMillis;
LOG.info("aggregated log deletion started.");
try {
FileSystem fs = remoteRootLogDir.getFileSystem(conf);
for(FileStatus userDir : fs.listStatus(remoteRootLogDir)) {
if(userDir.isDirectory()) {
Path userDirPath = new Path(userDir.getPath(), suffix);
deleteOldLogDirsFrom(userDirPath, cutoffMillis, fs, rmClient);
}
}
} catch (IOException e) {
logIOException("Error reading root log dir this deletion " +
"attempt is being aborted", e);
}
LOG.info("aggregated log deletion finished.");
}
示例5: listStatus
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
protected List<FileStatus> listStatus(JobContext job
)throws IOException {
List<FileStatus> files = super.listStatus(job);
int len = files.size();
for(int i=0; i < len; ++i) {
FileStatus file = files.get(i);
if (file.isDirectory()) { // it's a MapFile
Path p = file.getPath();
FileSystem fs = p.getFileSystem(job.getConfiguration());
// use the data file
files.set(i, fs.getFileStatus(new Path(p, MapFile.DATA_FILE_NAME)));
}
}
return files;
}
示例6: recordStartsWith
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
* Return true if there's a file in 'dirName' with a line that starts with
* 'prefix'.
*/
protected boolean recordStartsWith(List<Integer> record, String dirName,
SqoopOptions.FileLayout fileLayout)
throws Exception {
Path warehousePath = new Path(LOCAL_WAREHOUSE_DIR);
Path targetPath = new Path(warehousePath, dirName);
FileSystem fs = FileSystem.getLocal(new Configuration());
FileStatus [] files = fs.listStatus(targetPath);
if (null == files || files.length == 0) {
fail("Got no import files!");
}
for (FileStatus stat : files) {
Path p = stat.getPath();
if (p.getName().startsWith("part-")) {
if (checkFileForLine(fs, p, fileLayout, record)) {
// We found the line. Nothing further to do.
return true;
}
}
}
return false;
}
示例7: isDirReadable
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
boolean isDirReadable(DrillFileSystem fs, FileStatus dir) {
Path p = new Path(dir.getPath(), ParquetFileWriter.PARQUET_METADATA_FILE);
try {
if (fs.exists(p)) {
return true;
} else {
if (metaDataFileExists(fs, dir)) {
return true;
}
PathFilter filter = new DrillPathFilter();
FileStatus[] files = fs.listStatus(dir.getPath(), filter);
if (files.length == 0) {
return false;
}
return super.isFileReadable(fs, files[0]);
}
} catch (IOException e) {
logger.info("Failure while attempting to check for Parquet metadata file.", e);
return false;
}
}
示例8: scanDirectory
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@VisibleForTesting
protected static List<FileStatus> scanDirectory(Path path, FileContext fc,
PathFilter pathFilter) throws IOException {
path = fc.makeQualified(path);
List<FileStatus> jhStatusList = new ArrayList<FileStatus>();
try {
RemoteIterator<FileStatus> fileStatusIter = fc.listStatus(path);
while (fileStatusIter.hasNext()) {
FileStatus fileStatus = fileStatusIter.next();
Path filePath = fileStatus.getPath();
if (fileStatus.isFile() && pathFilter.accept(filePath)) {
jhStatusList.add(fileStatus);
}
}
} catch (FileNotFoundException fe) {
LOG.error("Error while scanning directory " + path, fe);
}
return jhStatusList;
}
示例9: scanPathHelper
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private static void scanPathHelper(Path path, FileSystem scanFs)
throws IOException, InterruptedException, SQLException {
String curPath = path.toUri().getPath();
Path n = path;
if (path.getName().matches("^(\\.|_|tmp|temp|test|trash|backup|archive|ARCHIVE|storkinternal).*"))
return;
logger.info(" -- scanPath(" + curPath + ")\n");
int x = isTable(path, scanFs);
if (x > 0) {
// System.err.println(" traceTable(" + path.toString() + ")");
traceTableInfo(path, scanFs);
} else if (x == 0) { // iterate over each table
// FileStatus[] fslist = scanFs.listStatus(path);
// System.err.println(" => " + fslist.length + " subdirs");
for (FileStatus fstat : scanFs.listStatus(path)) {
n = fstat.getPath();
curPath = n.toUri().getPath();
// System.err.println(" traceSubDir(" + curPath + ")");
if (n == path) {
continue;
}
try {
if (isTable(n, scanFs) > 0) {
traceTableInfo(n, scanFs);
} else if (scanFs.listStatus(n).length > 0 || scanFs.getContentSummary(n).getLength() > 0) {
scanPath(n, scanFs);
} else {
logger.info("* scanPath() size = 0: " + curPath);
}
} catch (AccessControlException e) {
logger.error("* scanPath(e) Permission denied. Cannot access: " + curPath +
" owner:" + fstat.getOwner() + " group: " + fstat.getGroup() + "with current user " +
UserGroupInformation.getCurrentUser());
// System.err.println(e);
continue;
} // catch
} // end of for
} // end else
}
示例10: TextRecordInputStream
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
public TextRecordInputStream(FileStatus f) throws IOException {
final Path fpath = f.getPath();
final Configuration lconf = getConf();
r = new SequenceFile.Reader(lconf,
SequenceFile.Reader.file(fpath));
key = ReflectionUtils.newInstance(
r.getKeyClass().asSubclass(Writable.class), lconf);
val = ReflectionUtils.newInstance(
r.getValueClass().asSubclass(Writable.class), lconf);
inbuf = new DataInputBuffer();
outbuf = new DataOutputBuffer();
}
示例11: describeUpload
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private S3UploadDescriptor describeUpload(FileStatus sourceFileStatus, Path targetPath) throws IOException {
URI targetUri = targetPath.toUri();
String bucketName = PathUtil.toBucketName(targetUri);
String key = PathUtil.toBucketKey(targetUri);
Path sourcePath = sourceFileStatus.getPath();
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(sourceFileStatus.getLen());
if (conf.getBoolean(ConfigurationVariable.S3_SERVER_SIDE_ENCRYPTION)) {
metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
}
return new S3UploadDescriptor(sourcePath, bucketName, key, metadata);
}
示例12: getReferenceFilePaths
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
public static List<Path> getReferenceFilePaths(final FileSystem fs, final Path familyDir) throws IOException {
FileStatus[] fds = fs.listStatus(familyDir, new ReferenceFileFilter(fs));
List<Path> referenceFiles = new ArrayList<Path>(fds.length);
for (FileStatus fdfs: fds) {
Path fdPath = fdfs.getPath();
referenceFiles.add(fdPath);
}
return referenceFiles;
}
示例13: checkMagicBytes
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private static void checkMagicBytes(FileStatus status, byte[] data, int offset) throws IOException {
for(int i =0, v = offset; i < MAGIC_LENGTH; i++, v++){
if(ParquetFileWriter.MAGIC[i] != data[v]){
byte[] magic = ArrayUtils.subarray(data, offset, offset + MAGIC_LENGTH);
throw new IOException(status.getPath() + " is not a Parquet file. expected magic number at tail " + Arrays.toString(ParquetFileWriter.MAGIC) + " but found " + Arrays.toString(magic));
}
}
}
示例14: listStatus
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
protected FileStatus[] listStatus(JobConf job) throws IOException {
FileStatus[] files = super.listStatus(job);
for (int i = 0; i < files.length; i++) {
FileStatus file = files[i];
if (file.isDirectory()) { // it's a MapFile
Path dataFile = new Path(file.getPath(), MapFile.DATA_FILE_NAME);
FileSystem fs = file.getPath().getFileSystem(job);
// use the data file
files[i] = fs.getFileStatus(dataFile);
}
}
return files;
}
示例15: readFooter
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
* An updated footer reader that tries to read the entire footer without knowing the length.
* This should reduce the amount of seek/read roundtrips in most workloads.
* @param fs
* @param status
* @return
* @throws IOException
*/
public static Footer readFooter(final Configuration config, final FileStatus status) throws IOException {
final FileSystem fs = status.getPath().getFileSystem(config);
try(FSDataInputStream file = fs.open(status.getPath())) {
final long fileLength = status.getLen();
Preconditions.checkArgument(fileLength >= MIN_FILE_SIZE, "%s is not a Parquet file (too small)", status.getPath());
int len = (int) Math.min( fileLength, (long) DEFAULT_READ_SIZE);
byte[] footerBytes = new byte[len];
readFully(file, fileLength - len, footerBytes, 0, len);
checkMagicBytes(status, footerBytes, footerBytes.length - ParquetFileWriter.MAGIC.length);
final int size = BytesUtils.readIntLittleEndian(footerBytes, footerBytes.length - FOOTER_METADATA_SIZE);
if(size > footerBytes.length - FOOTER_METADATA_SIZE){
// if the footer is larger than our initial read, we need to read the rest.
byte[] origFooterBytes = footerBytes;
int origFooterRead = origFooterBytes.length - FOOTER_METADATA_SIZE;
footerBytes = new byte[size];
readFully(file, fileLength - size - FOOTER_METADATA_SIZE, footerBytes, 0, size - origFooterRead);
System.arraycopy(origFooterBytes, 0, footerBytes, size - origFooterRead, origFooterRead);
}else{
int start = footerBytes.length - (size + FOOTER_METADATA_SIZE);
footerBytes = ArrayUtils.subarray(footerBytes, start, start + size);
}
ParquetMetadata metadata = ParquetFormatPlugin.parquetMetadataConverter.readParquetMetadata(new ByteArrayInputStream(footerBytes));
Footer footer = new Footer(status.getPath(), metadata);
return footer;
}
}