本文整理汇总了Java中org.apache.hadoop.fs.LocatedFileStatus.isDirectory方法的典型用法代码示例。如果您正苦于以下问题:Java LocatedFileStatus.isDirectory方法的具体用法?Java LocatedFileStatus.isDirectory怎么用?Java LocatedFileStatus.isDirectory使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.LocatedFileStatus
的用法示例。
在下文中一共展示了LocatedFileStatus.isDirectory方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: addInputPathRecursively
import org.apache.hadoop.fs.LocatedFileStatus; //导入方法依赖的package包/类
/**
* Add files in the input path recursively into the results.
* @param result
* The List to store all files.
* @param fs
* The FileSystem.
* @param path
* The input path.
* @param inputFilter
* The input filter that can be used to filter files/dirs.
* @throws IOException
*/
protected void addInputPathRecursively(List<FileStatus> result,
FileSystem fs, Path path, PathFilter inputFilter)
throws IOException {
RemoteIterator<LocatedFileStatus> iter = fs.listLocatedStatus(path);
while (iter.hasNext()) {
LocatedFileStatus stat = iter.next();
if (inputFilter.accept(stat.getPath())) {
if (stat.isDirectory()) {
addInputPathRecursively(result, fs, stat.getPath(), inputFilter);
} else {
result.add(stat);
}
}
}
}
示例2: call
import org.apache.hadoop.fs.LocatedFileStatus; //导入方法依赖的package包/类
@Override
public Result call() throws Exception {
Result result = new Result();
result.fs = fs;
if (fileStatus.isDirectory()) {
RemoteIterator<LocatedFileStatus> iter = fs
.listLocatedStatus(fileStatus.getPath());
while (iter.hasNext()) {
LocatedFileStatus stat = iter.next();
if (inputFilter.accept(stat.getPath())) {
if (recursive && stat.isDirectory()) {
result.dirsNeedingRecursiveCalls.add(stat);
} else {
result.locatedFileStatuses.add(stat);
}
}
}
} else {
result.locatedFileStatuses.add(fileStatus);
}
return result;
}
示例3: findTemporaryTableLocation
import org.apache.hadoop.fs.LocatedFileStatus; //导入方法依赖的package包/类
private List<Path> findTemporaryTableLocation(String tableName) throws IOException {
Path sessionTempLocation = new Path(dirTestWatcher.getDfsTestTmpDir().getAbsolutePath(), session_id.toString());
assertTrue("Session temporary location must exist", fs.exists(sessionTempLocation));
assertEquals("Session temporary location permission should match",
expectedFolderPermission, fs.getFileStatus(sessionTempLocation).getPermission());
String tableUUID = UUID.nameUUIDFromBytes(tableName.getBytes()).toString();
RemoteIterator<LocatedFileStatus> pathList = fs.listLocatedStatus(sessionTempLocation);
List<Path> matchingPath = Lists.newArrayList();
while (pathList.hasNext()) {
LocatedFileStatus path = pathList.next();
if (path.isDirectory() && path.getPath().getName().equals(tableUUID)) {
matchingPath.add(path.getPath());
}
}
return matchingPath;
}
示例4: singleThreadedListStatus
import org.apache.hadoop.fs.LocatedFileStatus; //导入方法依赖的package包/类
private List<FileStatus> singleThreadedListStatus(JobConf job, Path[] dirs,
PathFilter inputFilter, boolean recursive) throws IOException {
List<FileStatus> result = new ArrayList<FileStatus>();
List<IOException> errors = new ArrayList<IOException>();
for (Path p: dirs) {
FileSystem fs = p.getFileSystem(job);
FileStatus[] matches = fs.globStatus(p, inputFilter);
if (matches == null) {
errors.add(new IOException("Input path does not exist: " + p));
} else if (matches.length == 0) {
errors.add(new IOException("Input Pattern " + p + " matches 0 files"));
} else {
for (FileStatus globStat: matches) {
if (globStat.isDirectory()) {
RemoteIterator<LocatedFileStatus> iter =
fs.listLocatedStatus(globStat.getPath());
while (iter.hasNext()) {
LocatedFileStatus stat = iter.next();
if (inputFilter.accept(stat.getPath())) {
if (recursive && stat.isDirectory()) {
addInputPathRecursively(result, fs, stat.getPath(),
inputFilter);
} else {
result.add(stat);
}
}
}
} else {
result.add(globStat);
}
}
}
}
if (!errors.isEmpty()) {
throw new InvalidInputException(errors);
}
return result;
}
示例5: singleThreadedListStatus
import org.apache.hadoop.fs.LocatedFileStatus; //导入方法依赖的package包/类
private List<FileStatus> singleThreadedListStatus(JobContext job, Path[] dirs,
PathFilter inputFilter, boolean recursive) throws IOException {
List<FileStatus> result = new ArrayList<FileStatus>();
List<IOException> errors = new ArrayList<IOException>();
for (int i=0; i < dirs.length; ++i) {
Path p = dirs[i];
FileSystem fs = p.getFileSystem(job.getConfiguration());
FileStatus[] matches = fs.globStatus(p, inputFilter);
if (matches == null) {
errors.add(new IOException("Input path does not exist: " + p));
} else if (matches.length == 0) {
errors.add(new IOException("Input Pattern " + p + " matches 0 files"));
} else {
for (FileStatus globStat: matches) {
if (globStat.isDirectory()) {
RemoteIterator<LocatedFileStatus> iter =
fs.listLocatedStatus(globStat.getPath());
while (iter.hasNext()) {
LocatedFileStatus stat = iter.next();
if (inputFilter.accept(stat.getPath())) {
if (recursive && stat.isDirectory()) {
addInputPathRecursively(result, fs, stat.getPath(),
inputFilter);
} else {
result.add(stat);
}
}
}
} else {
result.add(globStat);
}
}
}
}
if (!errors.isEmpty()) {
throw new InvalidInputException(errors);
}
return result;
}
示例6: getFileStatusOfSegments
import org.apache.hadoop.fs.LocatedFileStatus; //导入方法依赖的package包/类
private void getFileStatusOfSegments(JobContext job, String[] segmentsToConsider,
List<FileStatus> result) throws IOException {
String[] partitionsToConsider = getValidPartitions(job);
if (partitionsToConsider.length == 0) {
throw new IOException("No partitions/data found");
}
PathFilter inputFilter = getDataFileFilter(job);
CarbonTablePath tablePath = getTablePath(job.getConfiguration());
// get tokens for all the required FileSystem for table path
TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { tablePath },
job.getConfiguration());
//get all data files of valid partitions and segments
for (int i = 0; i < partitionsToConsider.length; ++i) {
String partition = partitionsToConsider[i];
for (int j = 0; j < segmentsToConsider.length; ++j) {
String segmentId = segmentsToConsider[j];
Path segmentPath = new Path(tablePath.getCarbonDataDirectoryPath(partition, segmentId));
FileSystem fs = segmentPath.getFileSystem(job.getConfiguration());
RemoteIterator<LocatedFileStatus> iter = fs.listLocatedStatus(segmentPath);
while (iter.hasNext()) {
LocatedFileStatus stat = iter.next();
if (inputFilter.accept(stat.getPath())) {
if (stat.isDirectory()) {
addInputPathRecursively(result, fs, stat.getPath(), inputFilter);
} else {
result.add(stat);
}
}
}
}
}
}
示例7: clearFolder
import org.apache.hadoop.fs.LocatedFileStatus; //导入方法依赖的package包/类
/**
* Deletes all files in <code>folder</code>.
*
* @param folder the folder to delete the files within
* @throws IOException in case that deleting fails
*/
public static void clearFolder(File folder) throws IOException {
if (!isEmpty(getHdfsUrl())) {
FileSystem fs = getFilesystem();
Path target = new Path(getHdfsPath() + "/" + folder);
if (fs.exists(target)) {
RemoteIterator<LocatedFileStatus> iter = fs.listFiles(target, false);
while (iter.hasNext()) {
LocatedFileStatus file = iter.next();
if (!file.isDirectory()) {
fs.delete(file.getPath(), false);
}
}
}
} else if (!isEmpty(getDfsPath())) {
File targetPath = new File(getDfsPath(), folder.getName());
File[] files = targetPath.listFiles();
if (null != files) {
for (File f : files) {
if (!f.isDirectory()) {
f.delete();
}
}
}
} else {
throw new IOException("Cannot cleanup folder. Check HDFS/DFS configuration.");
}
}