本文整理汇总了Java中org.apache.hadoop.fs.FileStatus.isDir方法的典型用法代码示例。如果您正苦于以下问题:Java FileStatus.isDir方法的具体用法?Java FileStatus.isDir怎么用?Java FileStatus.isDir使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.FileStatus
的用法示例。
在下文中一共展示了FileStatus.isDir方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: ParquetMetadataStat
import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
*
* @param nameNode the hostname of hdfs namenode
* @param hdfsPort the port of hdfs namenode, usually 9000 or 8020
* @param dirPath the path of the directory which contains the parquet files, begin with /, for gen /msra/column/order/parquet/
* @throws IOException
* @throws MetadataException
*/
public ParquetMetadataStat(String nameNode, int hdfsPort, String dirPath) throws IOException, MetadataException
{
Configuration conf = new Configuration();
FileSystem fileSystem = FileSystem.get(URI.create("hdfs://" + nameNode + ":" + hdfsPort), conf);
Path hdfsDirPath = new Path(dirPath);
if (! fileSystem.isFile(hdfsDirPath))
{
FileStatus[] fileStatuses = fileSystem.listStatus(hdfsDirPath);
for (FileStatus status : fileStatuses)
{
// compatibility for HDFS 1.x
if (! status.isDir())
{
//System.out.println(status.getPath().toString());
this.fileMetaDataList.add(new ParquetFileMetadata(conf, status.getPath()));
}
}
}
if (this.fileMetaDataList.size() == 0)
{
throw new MetadataException("fileMetaDataList is empty, path is not a dir.");
}
this.fields = this.fileMetaDataList.get(0).getFileMetaData().getSchema().getFields();
this.columnCount = this.fileMetaDataList.get(0).getFileMetaData().getSchema().getFieldCount();
}
示例2: listFiles
import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/** @return FileStatus for data files only. */
private FileStatus[] listFiles(FileSystem fs, Path path) throws IOException {
FileStatus[] fileStatuses = fs.listStatus(path);
ArrayList files = new ArrayList();
Pattern patt = Pattern.compile("part.*-([0-9][0-9][0-9][0-9][0-9]).*");
for (FileStatus fstat : fileStatuses) {
String fname = fstat.getPath().getName();
if (!fstat.isDir()) {
Matcher mat = patt.matcher(fname);
if (mat.matches()) {
files.add(fstat);
}
}
}
return (FileStatus[]) files.toArray(new FileStatus[files.size()]);
}
示例3: ensureEmptyWriteDir
import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/** Create the directory where we'll write our test files to; and
* make sure it has no files in it.
*/
private void ensureEmptyWriteDir() throws IOException {
FileSystem fs = FileSystem.getLocal(getConf());
Path writeDir = getWritePath();
fs.mkdirs(writeDir);
FileStatus [] stats = fs.listStatus(writeDir);
for (FileStatus stat : stats) {
if (stat.isDir()) {
fail("setUp(): Write directory " + writeDir
+ " contains subdirectories");
}
LOG.debug("setUp(): Removing " + stat.getPath());
if (!fs.delete(stat.getPath(), false)) {
fail("setUp(): Could not delete residual file " + stat.getPath());
}
}
if (!fs.exists(writeDir)) {
fail("setUp: Could not create " + writeDir);
}
}
示例4: getFileBlockLocations
import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@Override
public BlockLocation[] getFileBlockLocations(
FileStatus stat, long start, long len) throws IOException {
if (stat.isDir()) {
return null;
}
System.out.println("File " + stat.getPath());
String name = stat.getPath().toUri().getPath();
BlockLocation[] locs =
super.getFileBlockLocations(stat, start, len);
if (name.equals(fileWithMissingBlocks)) {
System.out.println("Returning missing blocks for " + fileWithMissingBlocks);
locs[0] = new HdfsBlockLocation(new BlockLocation(new String[0],
new String[0], locs[0].getOffset(), locs[0].getLength()), null);
}
return locs;
}
示例5: getStoreFiles
import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
* Returns all files belonging to the given region directory. Could return an
* empty list.
*
* @param fs The file system reference.
* @param regionDir The region directory to scan.
* @return The list of files found.
* @throws IOException When scanning the files fails.
*/
static List<Path> getStoreFiles(FileSystem fs, Path regionDir)
throws IOException {
List<Path> res = new ArrayList<Path>();
PathFilter dirFilter = new FSUtils.DirFilter(fs);
FileStatus[] familyDirs = fs.listStatus(regionDir, dirFilter);
for(FileStatus dir : familyDirs) {
FileStatus[] files = fs.listStatus(dir.getPath());
for (FileStatus file : files) {
if (!file.isDir()) {
res.add(file.getPath());
}
}
}
return res;
}
示例6: create
import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
public static DotDrillFile create(DrillFileSystem fs, FileStatus status){
for(DotDrillType d : DotDrillType.values()){
if(!status.isDir() && d.matches(status)){
return new DotDrillFile(fs, status, d);
}
}
return null;
}
示例7: addRecursiveStatus
import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
private void addRecursiveStatus(FileStatus parent, List<FileStatus> listToFill) throws IOException {
if (parent.isDir()) {
Path pattern = new Path(parent.getPath(), "*");
FileStatus[] sub = underlyingFs.globStatus(pattern, new DrillPathFilter());
for(FileStatus s : sub){
if (s.isDir()) {
addRecursiveStatus(s, listToFill);
} else {
listToFill.add(s);
}
}
} else {
listToFill.add(parent);
}
}
示例8: getNextPartition
import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
* Returns the greatest partition number available for appending, for data
* files in targetDir.
*/
private int getNextPartition(FileSystem fs, Path targetDir)
throws IOException {
int nextPartition = 0;
FileStatus[] existingFiles = fs.listStatus(targetDir);
if (existingFiles != null && existingFiles.length > 0) {
for (FileStatus fileStat : existingFiles) {
if (!fileStat.isDir()) {
String filename = fileStat.getPath().getName();
Matcher mat = DATA_PART_PATTERN.matcher(filename);
if (mat.matches()) {
int thisPart = Integer.parseInt(mat.group(1));
if (thisPart >= nextPartition) {
nextPartition = thisPart;
nextPartition++;
}
}
}
}
}
if (nextPartition > 0) {
LOG.info("Using found partition " + nextPartition);
}
return nextPartition;
}
示例9: getPlan
import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@Override
public PhysicalPlan getPlan(SqlNode sqlNode) throws ValidationException, RelConversionException, IOException {
SqlIdentifier from = ((SqlShowFiles) sqlNode).getDb();
DrillFileSystem fs = null;
String defaultLocation = null;
String fromDir = "./";
SchemaPlus defaultSchema = context.getNewDefaultSchema();
SchemaPlus drillSchema = defaultSchema;
// Show files can be used without from clause, in which case we display the files in the default schema
if (from != null) {
// We are not sure if the full from clause is just the schema or includes table name,
// first try to see if the full path specified is a schema
drillSchema = SchemaUtilites.findSchema(defaultSchema, from.names);
if (drillSchema == null) {
// Entire from clause is not a schema, try to obtain the schema without the last part of the specified clause.
drillSchema = SchemaUtilites.findSchema(defaultSchema, from.names.subList(0, from.names.size() - 1));
fromDir = fromDir + from.names.get((from.names.size() - 1));
}
if (drillSchema == null) {
throw UserException.validationError()
.message("Invalid FROM/IN clause [%s]", from.toString())
.build(logger);
}
}
WorkspaceSchema wsSchema;
try {
wsSchema = (WorkspaceSchema) drillSchema.unwrap(AbstractSchema.class).getDefaultSchema();
} catch (ClassCastException e) {
throw UserException.validationError()
.message("SHOW FILES is supported in workspace type schema only. Schema [%s] is not a workspace schema.",
SchemaUtilites.getSchemaPath(drillSchema))
.build(logger);
}
// Get the file system object
fs = wsSchema.getFS();
// Get the default path
defaultLocation = wsSchema.getDefaultLocation();
List<ShowFilesCommandResult> rows = new ArrayList<>();
for (FileStatus fileStatus : fs.list(false, new Path(defaultLocation, fromDir))) {
ShowFilesCommandResult result = new ShowFilesCommandResult(fileStatus.getPath().getName(), fileStatus.isDir(),
!fileStatus.isDir(), fileStatus.getLen(),
fileStatus.getOwner(), fileStatus.getGroup(),
fileStatus.getPermission().toString(),
fileStatus.getAccessTime(), fileStatus.getModificationTime());
rows.add(result);
}
return DirectPlan.createDirectPlan(context.getCurrentEndpoint(), rows.iterator(), ShowFilesCommandResult.class);
}
示例10: getFileType
import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
* @return the type of the file represented by p (or the files in p, if a
* directory)
*/
public static FileType getFileType(Configuration conf, Path p)
throws IOException {
FileSystem fs = p.getFileSystem(conf);
try {
FileStatus stat = fs.getFileStatus(p);
if (null == stat) {
// Couldn't get the item.
LOG.warn("Input path " + p + " does not exist");
return FileType.UNKNOWN;
}
if (stat.isDir()) {
FileStatus [] subitems = fs.listStatus(p);
if (subitems == null || subitems.length == 0) {
LOG.warn("Input path " + p + " contains no files");
return FileType.UNKNOWN; // empty dir.
}
// Pick a child entry to examine instead.
boolean foundChild = false;
for (int i = 0; i < subitems.length; i++) {
stat = subitems[i];
if (!stat.isDir() && !stat.getPath().getName().startsWith("_")) {
foundChild = true;
break; // This item is a visible file. Check it.
}
}
if (!foundChild) {
stat = null; // Couldn't find a reasonable candidate.
}
}
if (null == stat) {
LOG.warn("null FileStatus object in isSequenceFiles(); "
+ "assuming false.");
return FileType.UNKNOWN;
}
Path target = stat.getPath();
return fromMagicNumber(target, conf);
} catch (FileNotFoundException fnfe) {
LOG.warn("Input path " + p + " does not exist");
return FileType.UNKNOWN; // doesn't exist!
}
}
示例11: toResult
import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@Override
public List<ShowFilesCommandResult> toResult(String sql, SqlNode sqlNode) throws ValidationException, RelConversionException,
IOException, ForemanSetupException {
SqlIdentifier from = ((SqlShowFiles) sqlNode).getDb();
List<ShowFilesCommandResult> rows = new ArrayList<>();
FileSystemWrapper fs = null;
String defaultLocation = null;
String fromDir = "./";
SchemaPlus schemaPlus = defaultSchema;
// Show files can be used without from clause, in which case we display the files in the default schema
if (from != null) {
// We are not sure if the full from clause is just the schema or includes table name,
// first try to see if the full path specified is a schema
schemaPlus = SchemaUtilities.findSchema(defaultSchema, from.names);
if (schemaPlus == null) {
// Entire from clause is not a schema, try to obtain the schema without the last part of the specified clause.
schemaPlus = SchemaUtilities.findSchema(defaultSchema, from.names.subList(0, from.names.size() - 1));
fromDir = fromDir + from.names.get((from.names.size() - 1));
}
if (schemaPlus == null) {
throw UserException.validationError()
.message("Invalid FROM/IN clause [%s]", from.toString())
.build(logger);
}
}
SimpleSchema schema;
try {
schema = schemaPlus.unwrap(SimpleSchema.class);
} catch (ClassCastException e) {
throw UserException.validationError()
.message("SHOW FILES is supported in workspace type schema only. Schema [%s] is not a workspace schema.",
SchemaUtilities.getSchemaPath(schemaPlus))
.build(logger);
}
// Get the file system object
fs = schema.getFileSystem();
// Get the default path
defaultLocation = schema.getDefaultLocation();
for (FileStatus fileStatus : fs.list(new Path(defaultLocation, fromDir), false)) {
ShowFilesCommandResult result = new ShowFilesCommandResult(fileStatus.getPath().getName(), fileStatus.isDir(),
!fileStatus.isDirectory(), fileStatus.getLen(),
fileStatus.getOwner(), fileStatus.getGroup(),
fileStatus.getPermission().toString(),
fileStatus.getAccessTime(), fileStatus.getModificationTime());
rows.add(result);
}
return rows;
}
示例12: setPermission
import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
* Set the file permission of the path of the given fileStatus. If the path
* is a directory, apply permission recursively to all subdirectories and
* files.
*
* @param fs the filesystem
* @param fileStatus containing the path
* @param permission the permission
* @throws java.io.IOException
*/
private void setPermission(FileSystem fs, FileStatus fileStatus,
FsPermission permission) throws IOException {
if(fileStatus.isDir()) {
for(FileStatus file : fs.listStatus(fileStatus.getPath())){
setPermission(fs, file, permission);
}
}
fs.setPermission(fileStatus.getPath(), permission);
}