本文整理匯總了Java中org.apache.hadoop.fs.FileStatus.isFile方法的典型用法代碼示例。如果您正苦於以下問題:Java FileStatus.isFile方法的具體用法?Java FileStatus.isFile怎麽用?Java FileStatus.isFile使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.fs.FileStatus
的用法示例。
在下文中一共展示了FileStatus.isFile方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: convertInputToPaths
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private List<Path> convertInputToPaths() throws IOException {
List<String> inputs = args.getInput();
List<Path> paths = new ArrayList<>(inputs.size());
for (String input : inputs) {
Path p = new Path(input);
FileSystem fs = p.getFileSystem(conf);
FileStatus fstat = fs.getFileStatus(p);
if (fstat.isFile()) {
paths.add(p);
} else if (fstat.isDirectory()) {
for (FileStatus child : fs.listStatus(p)) {
if (child.isFile()) {
paths.add(child.getPath());
}
}
} else {
throw new IllegalStateException("Unable to handle that which is not file nor directory: " + p);
}
}
return paths;
}
示例2: findFileAbsPath
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
public static String findFileAbsPath(String filePath, String target) {
if (filePath.endsWith("/")) filePath = filePath.substring(0, filePath.length()-1);
try {
FileStatus[] status = fs.listStatus(new Path(filePath));
for (int i = status.length-1; i >= 0; i--) {
FileStatus file = status[i];
if (file.isDirectory()) {
String res = findFileAbsPath(filePath+"/"+file.getPath().getName(), target);
if (res != null) return res;
}
if (file.isFile() && file.getPath().getName().equals(target)) return filePath+"/"+file.getPath().getName();
}
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
示例3: listFiles
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
public List<Path> listFiles(Path dirPath)
{
List<Path> files = new ArrayList<>();
if (!getFS().isPresent()) {
throw new FileSystemNotFoundException("");
}
FileStatus[] fileStatuses = new FileStatus[0];
try {
fileStatuses = getFS().get().listStatus(dirPath);
}
catch (IOException e) {
log.error(e);
}
for (FileStatus f : fileStatuses) {
if (f.isFile()) {
files.add(f.getPath());
}
}
return files;
}
示例4: doBuildListing
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
* Collect the list of <sourceRelativePath, sourceFileStatus> to be copied and write to the sequence file. In essence,
* any file or directory that need to be copied or sync-ed is written as an entry to the sequence file, with the
* possible exception of the source root: when either -update (sync) or -overwrite switch is specified, and if the the
* source root is a directory, then the source root entry is not written to the sequence file, because only the
* contents of the source directory need to be copied in this case. See
* {@link com.hotels.bdp.circustrain.s3mapreducecp.util.ConfigurationUtil#getRelativePath} for how relative path is
* computed. See computeSourceRootPath method for how the root path of the source is computed.
*
* @param fileListWriter
* @param options
* @param globbedPaths
* @throws IOException
*/
@VisibleForTesting
public void doBuildListing(SequenceFile.Writer fileListWriter, S3MapReduceCpOptions options) throws IOException {
List<Path> globbedPaths = new ArrayList<>(options.getSources().size());
for (Path sourcePath : options.getSources()) {
FileSystem fs = sourcePath.getFileSystem(getConf());
FileStatus sourceFileStatus = fs.getFileStatus(sourcePath);
if (sourceFileStatus.isFile()) {
LOG.debug("Adding path {}", sourceFileStatus.getPath());
globbedPaths.add(sourceFileStatus.getPath());
} else {
FileStatus[] inputs = fs.globStatus(sourcePath);
if (inputs != null && inputs.length > 0) {
for (FileStatus onePath : inputs) {
LOG.debug("Adding path {}", onePath.getPath());
globbedPaths.add(onePath.getPath());
}
} else {
throw new InvalidInputException("Source path " + sourcePath + " doesn't exist");
}
}
}
doBuildListing(fileListWriter, options, globbedPaths);
}
示例5: delete
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
* Convenience method, so that we don't open a new connection when using this
* method from within another method. Otherwise every API invocation incurs
* the overhead of opening/closing a TCP connection.
*/
private boolean delete(FTPClient client, Path file, boolean recursive)
throws IOException {
Path workDir = new Path(client.printWorkingDirectory());
Path absolute = makeAbsolute(workDir, file);
String pathName = absolute.toUri().getPath();
try {
FileStatus fileStat = getFileStatus(client, absolute);
if (fileStat.isFile()) {
return client.deleteFile(pathName);
}
} catch (FileNotFoundException e) {
//the file is not there
return false;
}
FileStatus[] dirEntries = listStatus(client, absolute);
if (dirEntries != null && dirEntries.length > 0 && !(recursive)) {
throw new IOException("Directory: " + file + " is not empty.");
}
for (FileStatus dirEntry : dirEntries) {
delete(client, new Path(absolute, dirEntry.getPath()), recursive);
}
return client.removeDirectory(pathName);
}
示例6: traverseNonEmptyDirectory
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private void traverseNonEmptyDirectory(
SequenceFile.Writer fileListWriter,
FileStatus sourceStatus,
Path sourcePathRoot,
S3MapReduceCpOptions options)
throws IOException {
FileSystem sourceFS = sourcePathRoot.getFileSystem(getConf());
Stack<FileStatus> pathStack = new Stack<>();
pathStack.push(sourceStatus);
while (!pathStack.isEmpty()) {
for (FileStatus child : getChildren(sourceFS, pathStack.pop())) {
if (child.isFile()) {
LOG.debug("Recording source-path: {} for copy.", sourceStatus.getPath());
CopyListingFileStatus childCopyListingStatus = new CopyListingFileStatus(child);
writeToFileListing(fileListWriter, childCopyListingStatus, sourcePathRoot, options);
}
if (isDirectoryAndNotEmpty(sourceFS, child)) {
LOG.debug("Traversing non-empty source dir: {}", sourceStatus.getPath());
pathStack.push(child);
}
}
}
}
示例7: printMessage
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
public void printMessage(String path) {
System.out.println("\nprint result:");
DistributedFileSystem distributedFileSystem = distributedFileSystem();
try {
FileStatus[] fileStatuses = distributedFileSystem.listStatus(new Path(path));
for (FileStatus fileStatus : fileStatuses) {
System.out.println(fileStatus);
if (fileStatus.isFile()) {
FSDataInputStream fsDataInputStream = distributedFileSystem.open(fileStatus.getPath());
byte[] bs = new byte[fsDataInputStream.available()];
fsDataInputStream.read(bs);
fsDataInputStream.close();
System.out.println(new String(bs));
}
}
} catch (IOException e) {
e.printStackTrace();
} finally {
close(distributedFileSystem);
}
}
示例8: newMapTask
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
protected Callable<FileStatus[]> newMapTask(final String address) throws IOException {
return new Callable<FileStatus[]>() {
@Override
public FileStatus[] call() throws Exception {
// Only directories should be listed with a fork/join task
final FileSystem fs = getDelegateFileSystem(address);
FileStatus status = fs.getFileStatus(path);
if (status.isFile()) {
throw new FileNotFoundException("Directory not found: " + path);
}
FileStatus[] remoteStatuses = fs.listStatus(path);
FileStatus[] statuses = new FileStatus[remoteStatuses.length];
for (int i = 0; i < statuses.length; i++) {
statuses[i] = fixFileStatus(address, remoteStatuses[i]);
}
return statuses;
}
};
}
示例9: validateMapFileOutputContent
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private void validateMapFileOutputContent(
FileSystem fs, Path dir) throws IOException {
// map output is a directory with index and data files
Path expectedMapDir = new Path(dir, partFile);
assert(fs.getFileStatus(expectedMapDir).isDirectory());
FileStatus[] files = fs.listStatus(expectedMapDir);
int fileCount = 0;
boolean dataFileFound = false;
boolean indexFileFound = false;
for (FileStatus f : files) {
if (f.isFile()) {
++fileCount;
if (f.getPath().getName().equals(MapFile.INDEX_FILE_NAME)) {
indexFileFound = true;
}
else if (f.getPath().getName().equals(MapFile.DATA_FILE_NAME)) {
dataFileFound = true;
}
}
}
assert(fileCount > 0);
assert(dataFileFound && indexFileFound);
}
示例10: mkdir
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private boolean mkdir(Path f) throws IOException {
try {
FileStatus fileStatus = getFileStatus(f);
if (fileStatus.isFile()) {
throw new FileAlreadyExistsException(String.format(
"Can't make directory for path '%s' since it is a file.", f));
}
} catch (FileNotFoundException e) {
if(LOG.isDebugEnabled()) {
LOG.debug("Making dir '" + f + "' in S3");
}
String key = pathToKey(f) + FOLDER_SUFFIX;
store.storeEmptyFile(key);
}
return true;
}
示例11: getExtensions
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
public List<String> getExtensions() {
final List<String> extensions = Lists.newArrayList();
for (FileStatus fileStatus : statuses) {
if (fileStatus.isFile()) {
final String ext = FilenameUtils.getExtension(fileStatus.getPath().getName());
if (ext != null && !ext.isEmpty()) {
extensions.add(ext);
}
}
}
return extensions;
}
示例12: scanHdfsMember
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private void scanHdfsMember(Path hdfsPath, FileSystem hdfsFS)
throws FileNotFoundException, IOException {
FileStatus targetPathStatus = hdfsFS.getFileStatus(hdfsPath);
if (targetPathStatus.isFile()) {
fileStatusArry.add(targetPathStatus);
return;
}
FileStatus[] memberArry = hdfsFS.listStatus(hdfsPath);
for (FileStatus member : memberArry) {
fileStatusArry.add(member);
if (member.isDirectory()) {
scanHdfsMember(member.getPath(), hdfsFS);
}
}
}
示例13: checkFileStatus
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
static void checkFileStatus(ChPermissionStatus expected, FileStatus actual) {
assertEquals(expected.getUserName(), actual.getOwner());
assertEquals(expected.getGroupName(), actual.getGroup());
FsPermission perm = expected.getPermission();
if (actual.isFile() && expected.defaultPerm) {
perm = perm.applyUMask(UMASK);
}
assertEquals(perm, actual.getPermission());
}
示例14: children
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public Iterable<FileStatus> children(FileStatus root) {
if (root.isFile()) {
return ImmutableList.of();
}
try {
FileStatus[] listStatus = fileSystem.listStatus(root.getPath());
if (listStatus == null || listStatus.length == 0) {
return ImmutableList.of();
}
return ImmutableList.copyOf(listStatus);
} catch (IOException e) {
throw new CircusTrainException("Unable to list children for path: " + root.getPath());
}
}
示例15: concat
import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
public static void concat(String dir) throws IOException {
String directory = NodeConfig.HDFS_PATH + dir;
Configuration conf = new Configuration();
DistributedFileSystem fs = (DistributedFileSystem)FileSystem.get(URI.create(directory), conf);
FileStatus fileList[] = fs.listStatus(new Path(directory));
if (fileList.length>=2) {
ArrayList<Path> srcs = new ArrayList<Path>(fileList.length);
for (FileStatus fileStatus : fileList) {
if ( fileStatus.isFile() &&
(fileStatus.getLen()&~fileStatus.getBlockSize())<fileStatus.getBlockSize()/2 ) {
srcs.add(fileStatus.getPath());
}
}
if (srcs.size()>=2) {
Logger.println("come to here");
Path appended = srcs.get(0);
Path[] sources = new Path[srcs.size()-1];
for (int i=0; i<srcs.size()-1; i++) {
sources[i] = srcs.get(i+1);
}
Logger.println(fs==null);
Logger.println(appended==null);
Logger.println(sources==null);
fs.concat(appended, sources);
Logger.println("concat to : " + appended.getName());
Logger.println(Arrays.toString(sources));
}
fs.close();
}
}