本文整理汇总了Java中org.apache.flink.core.fs.FileStatus.isDir方法的典型用法代码示例。如果您正苦于以下问题:Java FileStatus.isDir方法的具体用法?Java FileStatus.isDir怎么用?Java FileStatus.isDir使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.core.fs.FileStatus
的用法示例。
在下文中一共展示了FileStatus.isDir方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getFiles
import org.apache.flink.core.fs.FileStatus; //导入方法依赖的package包/类
protected List<FileStatus> getFiles() throws IOException {
// get all the files that are involved in the splits
List<FileStatus> files = new ArrayList<FileStatus>();
final FileSystem fs = this.filePath.getFileSystem();
final FileStatus pathFile = fs.getFileStatus(this.filePath);
if (pathFile.isDir()) {
// input is directory. list all contained files
final FileStatus[] partials = fs.listStatus(this.filePath);
for (FileStatus partial : partials) {
if (!partial.isDir()) {
files.add(partial);
}
}
} else {
files.add(pathFile);
}
return files;
}
示例2: getFileStats
import org.apache.flink.core.fs.FileStatus; //导入方法依赖的package包/类
protected FileBaseStatistics getFileStats(FileBaseStatistics cachedStats, Path filePath, FileSystem fs,
ArrayList<FileStatus> files) throws IOException {
// get the file info and check whether the cached statistics are still valid.
final FileStatus file = fs.getFileStatus(filePath);
long totalLength = 0;
// enumerate all files
if (file.isDir()) {
totalLength += addFilesInDir(file.getPath(), files, false);
} else {
files.add(file);
testForUnsplittable(file);
totalLength += file.getLen();
}
// check the modification time stamp
long latestModTime = 0;
for (FileStatus f : files) {
latestModTime = Math.max(f.getModificationTime(), latestModTime);
}
// check whether the cached statistics are still valid, if we have any
if (cachedStats != null && latestModTime <= cachedStats.getLastModificationTime()) {
return cachedStats;
}
// sanity check
if (totalLength <= 0) {
totalLength = BaseStatistics.SIZE_UNKNOWN;
}
return new FileBaseStatistics(latestModTime, totalLength, BaseStatistics.AVG_RECORD_BYTES_UNKNOWN);
}
示例3: addFilesInDir
import org.apache.flink.core.fs.FileStatus; //导入方法依赖的package包/类
/**
* Enumerate all files in the directory and recursive if enumerateNestedFiles is true.
* @return the total length of accepted files.
*/
private long addFilesInDir(Path path, List<FileStatus> files, boolean logExcludedFiles)
throws IOException {
final FileSystem fs = path.getFileSystem();
long length = 0;
for(FileStatus dir: fs.listStatus(path)) {
if (dir.isDir()) {
if (acceptFile(dir) && enumerateNestedFiles) {
length += addFilesInDir(dir.getPath(), files, logExcludedFiles);
} else {
if (logExcludedFiles && LOG.isDebugEnabled()) {
LOG.debug("Directory "+dir.getPath().toString()+" did not pass the file-filter and is excluded.");
}
}
}
else {
if(acceptFile(dir)) {
files.add(dir);
length += dir.getLen();
testForUnsplittable(dir);
} else {
if (logExcludedFiles && LOG.isDebugEnabled()) {
LOG.debug("Directory "+dir.getPath().toString()+" did not pass the file-filter and is excluded.");
}
}
}
}
return length;
}
示例4: copy
import org.apache.flink.core.fs.FileStatus; //导入方法依赖的package包/类
public static void copy(Path sourcePath, Path targetPath, boolean executable) throws IOException {
// TODO rewrite this to make it participate in the closable registry and the lifecycle of a task.
// we unwrap the file system to get raw streams without safety net
FileSystem sFS = FileSystem.getUnguardedFileSystem(sourcePath.toUri());
FileSystem tFS = FileSystem.getUnguardedFileSystem(targetPath.toUri());
if (!tFS.exists(targetPath)) {
if (sFS.getFileStatus(sourcePath).isDir()) {
tFS.mkdirs(targetPath);
FileStatus[] contents = sFS.listStatus(sourcePath);
for (FileStatus content : contents) {
String distPath = content.getPath().toString();
if (content.isDir()) {
if (distPath.endsWith("/")) {
distPath = distPath.substring(0, distPath.length() - 1);
}
}
String localPath = targetPath.toString() + distPath.substring(distPath.lastIndexOf("/"));
copy(content.getPath(), new Path(localPath), executable);
}
} else {
try (FSDataOutputStream lfsOutput = tFS.create(targetPath, FileSystem.WriteMode.NO_OVERWRITE); FSDataInputStream fsInput = sFS.open(sourcePath)) {
IOUtils.copyBytes(fsInput, lfsOutput);
//noinspection ResultOfMethodCallIgnored
new File(targetPath.toString()).setExecutable(executable);
} catch (IOException ioe) {
LOG.error("could not copy file to local file cache.", ioe);
}
}
}
}
示例5: getFiles
import org.apache.flink.core.fs.FileStatus; //导入方法依赖的package包/类
protected List<FileStatus> getFiles() throws IOException {
// get all the files that are involved in the splits
List<FileStatus> files = new ArrayList<FileStatus>();
final FileSystem fs = this.filePath.getFileSystem();
final FileStatus pathFile = fs.getFileStatus(this.filePath);
if (pathFile.isDir()) {
// input is directory. list all contained files
final FileStatus[] partials = fs.listStatus(this.filePath);
for (int i = 0; i < partials.length; i++) {
if (!partials[i].isDir()) {
files.add(partials[i]);
}
}
} else {
files.add(pathFile);
}
return files;
}
示例6: openAllInputs
import org.apache.flink.core.fs.FileStatus; //导入方法依赖的package包/类
/**
* Creates {@link InputFormat}s from a given class for the specified file(s). The optional {@link Configuration}
* initializes the formats.
*
* @param <T>
* the class of the InputFormat
* @param inputFormatClass
* the class of the InputFormat
* @param path
* the path of the file or to the directory containing the splits
* @param configuration
* optional configuration of the InputFormat
* @return the created {@link InputFormat}s for each file in the specified path
* @throws IOException
* if an I/O error occurred while accessing the files or initializing the InputFormat.
*/
@SuppressWarnings("unchecked")
public static <T, F extends FileInputFormat<T>> List<F> openAllInputs(
Class<F> inputFormatClass, String path, Configuration configuration) throws IOException {
Path nephelePath = new Path(path);
FileSystem fs = nephelePath.getFileSystem();
FileStatus fileStatus = fs.getFileStatus(nephelePath);
if (!fileStatus.isDir()) {
return Arrays.asList(openInput(inputFormatClass, path, configuration));
}
FileStatus[] list = fs.listStatus(nephelePath);
List<F> formats = new ArrayList<F>();
for (int index = 0; index < list.length; index++) {
formats.add(openInput(inputFormatClass, list[index].getPath().toString(), configuration));
}
return formats;
}
示例7: open
import org.apache.flink.core.fs.FileStatus; //导入方法依赖的package包/类
@Override
public FSDataInputStream open(final Path f) throws IOException {
final FileStatus fileStatus = getFileStatus(f); // Will throw FileNotFoundException if f does not exist
// Make sure f is not a directory
if (fileStatus.isDir()) {
throw new IOException("Cannot open " + f.toUri() + " because it is a directory");
}
final S3BucketObjectPair bop = this.directoryStructure.toBucketObjectPair(f);
if (!bop.hasBucket() || !bop.hasObject()) {
throw new IOException(f.toUri() + " cannot be opened");
}
return new S3DataInputStream(this.s3Client, bop.getBucket(), bop.getObject());
}
示例8: loadSavepointWithHandle
import org.apache.flink.core.fs.FileStatus; //导入方法依赖的package包/类
/**
* Loads the savepoint at the specified path. This methods returns the savepoint, as well as the
* handle to the metadata.
*
* @param savepointFileOrDirectory Path to the parent savepoint directory or the meta data file.
* @param classLoader The class loader used to resolve serialized classes from legacy savepoint formats.
* @return The loaded savepoint
*
* @throws IOException Failures during load are forwarded
*/
public static Tuple2<Savepoint, StreamStateHandle> loadSavepointWithHandle(
String savepointFileOrDirectory,
ClassLoader classLoader) throws IOException {
checkNotNull(savepointFileOrDirectory, "savepointFileOrDirectory");
checkNotNull(classLoader, "classLoader");
Path path = new Path(savepointFileOrDirectory);
LOG.info("Loading savepoint from {}", path);
FileSystem fs = FileSystem.get(path.toUri());
FileStatus status = fs.getFileStatus(path);
// If this is a directory, we need to find the meta data file
if (status.isDir()) {
Path candidatePath = new Path(path, SAVEPOINT_METADATA_FILE);
if (fs.exists(candidatePath)) {
path = candidatePath;
LOG.info("Using savepoint file in {}", path);
} else {
throw new IOException("Cannot find meta data file in directory " + path
+ ". Please try to load the savepoint directly from the meta data file "
+ "instead of the directory.");
}
}
// load the savepoint
final Savepoint savepoint;
try (DataInputStream dis = new DataInputViewStreamWrapper(fs.open(path))) {
int magicNumber = dis.readInt();
if (magicNumber == MAGIC_NUMBER) {
int version = dis.readInt();
SavepointSerializer<?> serializer = SavepointSerializers.getSerializer(version);
savepoint = serializer.deserialize(dis, classLoader);
} else {
throw new RuntimeException("Unexpected magic number. This can have multiple reasons: " +
"(1) You are trying to load a Flink 1.0 savepoint, which is not supported by this " +
"version of Flink. (2) The file you were pointing to is not a savepoint at all. " +
"(3) The savepoint file has been corrupted.");
}
}
// construct the stream handle to the metadata file
// we get the size best-effort
long size = 0;
try {
size = fs.getFileStatus(path).getLen();
}
catch (Exception ignored) {
// we don't know the size, but we don't want to fail the savepoint loading for that
}
StreamStateHandle metadataHandle = new FileStateHandle(path, size);
return new Tuple2<>(savepoint, metadataHandle);
}
示例9: getCopyTasks
import org.apache.flink.core.fs.FileStatus; //导入方法依赖的package包/类
private static void getCopyTasks(Path p, String rel, List<FileCopyTask> tasks) throws IOException {
FileStatus[] res = p.getFileSystem().listStatus(p);
if (res == null) {
return;
}
for (FileStatus fs : res) {
if (fs.isDir()) {
getCopyTasks(fs.getPath(), rel + fs.getPath().getName() + "/", tasks);
} else {
Path cp = fs.getPath();
tasks.add(new FileCopyTask(cp, rel + cp.getName()));
}
}
}
示例10: copy
import org.apache.flink.core.fs.FileStatus; //导入方法依赖的package包/类
public static void copy(Path sourcePath, Path targetPath, boolean executable) throws IOException {
FileSystem sFS = sourcePath.getFileSystem();
FileSystem tFS = targetPath.getFileSystem();
if (!tFS.exists(targetPath)) {
if (sFS.getFileStatus(sourcePath).isDir()) {
tFS.mkdirs(targetPath);
FileStatus[] contents = sFS.listStatus(sourcePath);
for (FileStatus content : contents) {
String distPath = content.getPath().toString();
if (content.isDir()) {
if (distPath.endsWith("/")) {
distPath = distPath.substring(0, distPath.length() - 1);
}
}
String localPath = targetPath.toString() + distPath.substring(distPath.lastIndexOf("/"));
copy(content.getPath(), new Path(localPath), executable);
}
} else {
try {
FSDataOutputStream lfsOutput = tFS.create(targetPath, false);
FSDataInputStream fsInput = sFS.open(sourcePath);
IOUtils.copyBytes(fsInput, lfsOutput);
new File(targetPath.toString()).setExecutable(executable);
} catch (IOException ioe) {
LOG.error("could not copy file to local file cache.", ioe);
}
}
}
}
示例11: listEligibleFiles
import org.apache.flink.core.fs.FileStatus; //导入方法依赖的package包/类
/**
* Returns the paths of the files not yet processed.
* @param fileSystem The filesystem where the monitored directory resides.
*/
private Map<Path, FileStatus> listEligibleFiles(FileSystem fileSystem, Path path) throws IOException {
final FileStatus[] statuses;
try {
statuses = fileSystem.listStatus(path);
} catch (IOException e) {
// we may run into an IOException if files are moved while listing their status
// delay the check for eligible files in this case
return Collections.emptyMap();
}
if (statuses == null) {
LOG.warn("Path does not exist: {}", path);
return Collections.emptyMap();
} else {
Map<Path, FileStatus> files = new HashMap<>();
// handle the new files
for (FileStatus status : statuses) {
if (!status.isDir()) {
Path filePath = status.getPath();
long modificationTime = status.getModificationTime();
if (!shouldIgnore(filePath, modificationTime)) {
files.put(filePath, status);
}
} else if (format.getNestedFileEnumeration() && format.acceptFile(status)){
files.putAll(listEligibleFiles(fileSystem, status.getPath()));
}
}
return files;
}
}
示例12: delete
import org.apache.flink.core.fs.FileStatus; //导入方法依赖的package包/类
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
try {
final FileStatus fileStatus = getFileStatus(f); // Will throw a FileNotFoundException if f is invalid
final S3BucketObjectPair bop = this.directoryStructure.toBucketObjectPair(f);
if (fileStatus.isDir()) {
boolean retVal = false;
final FileStatus[] dirContent = listStatus(f);
if (dirContent.length > 0) {
// Directory is not empty
if (!recursive) {
throw new IOException("Found non-empty directory " + f
+ " while performing non-recursive delete");
}
for (final FileStatus entry : dirContent) {
if (delete(entry.getPath(), true)) {
retVal = true;
}
}
}
// Now the directory is empty
if (!bop.hasBucket()) {
// This is the root directory, do not delete this
return retVal;
}
if (!bop.hasObject()) {
// This is a real bucket
this.s3Client.deleteBucket(bop.getBucket());
} else {
// This directory is actually represented by an object in S3
this.s3Client.deleteObject(bop.getBucket(), bop.getObject());
}
} else {
// This is a file
this.s3Client.deleteObject(bop.getBucket(), bop.getObject());
}
} catch (AmazonClientException e) {
throw new IOException(StringUtils.stringifyException(e));
}
return true;
}