本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.HdfsFileStatus.isDir方法的典型用法代码示例。如果您正苦于以下问题:Java HdfsFileStatus.isDir方法的具体用法?Java HdfsFileStatus.isDir怎么用?Java HdfsFileStatus.isDir使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.protocol.HdfsFileStatus
的用法示例。
在下文中一共展示了HdfsFileStatus.isDir方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getNfs3FileAttrFromFileStatus
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入方法依赖的package包/类
public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus(
HdfsFileStatus fs, IdMappingServiceProvider iug) {
/**
* Some 32bit Linux client has problem with 64bit fileId: it seems the 32bit
* client takes only the lower 32bit of the fileId and treats it as signed
* int. When the 32th bit is 1, the client considers it invalid.
*/
NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG;
fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType;
int nlink = (fileType == NfsFileType.NFSDIR) ? fs.getChildrenNum() + 2 : 1;
long size = (fileType == NfsFileType.NFSDIR) ? getDirSize(fs
.getChildrenNum()) : fs.getLen();
return new Nfs3FileAttributes(fileType, nlink,
fs.getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()),
iug.getGidAllowingUnknown(fs.getGroup()), size, 0 /* fsid */,
fs.getFileId(), fs.getModificationTime(), fs.getAccessTime(),
new Nfs3FileAttributes.Specdata3());
}
示例2: lostFoundInit
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入方法依赖的package包/类
private void lostFoundInit(DFSClient dfs) {
lfInited = true;
try {
String lfName = "/lost+found";
final HdfsFileStatus lfStatus = dfs.getFileInfo(lfName);
if (lfStatus == null) { // not exists
lfInitedOk = dfs.mkdirs(lfName, null, true);
lostFound = lfName;
} else if (!lfStatus.isDir()) { // exists but not a directory
LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
lfInitedOk = false;
} else { // exists and is a directory
lostFound = lfName;
lfInitedOk = true;
}
} catch (Exception e) {
e.printStackTrace();
lfInitedOk = false;
}
if (lostFound == null) {
LOG.warn("Cannot initialize /lost+found .");
lfInitedOk = false;
internalError = true;
}
}
示例3: writeInfo
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入方法依赖的package包/类
/**
* Write a node to output.
* Node information includes path, modification, permission, owner and group.
* For files, it also includes size, replication and block-size.
*/
static void writeInfo(final Path fullpath, final HdfsFileStatus i,
final XMLOutputter doc) throws IOException {
final SimpleDateFormat ldf = df.get();
doc.startTag(i.isDir() ? "directory" : "file");
doc.attribute("path", fullpath.toUri().getPath());
doc.attribute("modified", ldf.format(new Date(i.getModificationTime())));
doc.attribute("accesstime", ldf.format(new Date(i.getAccessTime())));
if (!i.isDir()) {
doc.attribute("size", String.valueOf(i.getLen()));
doc.attribute("replication", String.valueOf(i.getReplication()));
doc.attribute("blocksize", String.valueOf(i.getBlockSize()));
}
doc.attribute("permission", (i.isDir()? "d": "-") + i.getPermission());
doc.attribute("owner", i.getOwner());
doc.attribute("group", i.getGroup());
doc.endTag();
}
示例4: recoverAllLeases
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入方法依赖的package包/类
static void recoverAllLeases(DFSClient dfs,
Path path) throws IOException {
String pathStr = path.toString();
HdfsFileStatus status = dfs.getFileInfo(pathStr);
if (!status.isDir()) {
dfs.recoverLease(pathStr);
return;
}
byte prev[] = HdfsFileStatus.EMPTY_NAME;
DirectoryListing dirList;
do {
dirList = dfs.listPaths(pathStr, prev);
HdfsFileStatus files[] = dirList.getPartialListing();
for (HdfsFileStatus f : files) {
recoverAllLeases(dfs, f.getFullPath(path));
}
prev = dirList.getLastName();
} while (dirList.hasMore());
}
示例5: getWccAttr
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入方法依赖的package包/类
public static WccAttr getWccAttr(DFSClient client, String fileIdPath)
throws IOException {
HdfsFileStatus fstat = getFileStatus(client, fileIdPath);
if (fstat == null) {
return null;
}
long size = fstat.isDir() ? getDirSize(fstat.getChildrenNum()) : fstat
.getLen();
return new WccAttr(size, new NfsTime(fstat.getModificationTime()),
new NfsTime(fstat.getModificationTime()));
}
示例6: makeQualified
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入方法依赖的package包/类
private FileStatus makeQualified(HdfsFileStatus f, Path parent) {
return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
f.getPermission(), f.getOwner(), f.getGroup(),
f.isSymlink() ? new Path(f.getSymlink()) : null,
f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory()));
}
示例7: convert
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入方法依赖的package包/类
public static HdfsFileStatusProto convert(HdfsFileStatus fs) {
if (fs == null)
return null;
FileType fType = FileType.IS_FILE;
if (fs.isDir()) {
fType = FileType.IS_DIR;
} else if (fs.isSymlink()) {
fType = FileType.IS_SYMLINK;
}
HdfsFileStatusProto.Builder builder =
HdfsFileStatusProto.newBuilder().
setLength(fs.getLen()).
setFileType(fType).
setBlockReplication(fs.getReplication()).
setBlocksize(fs.getBlockSize()).
setModificationTime(fs.getModificationTime()).
setAccessTime(fs.getAccessTime()).
setPermission(PBHelper.convert(fs.getPermission())).
setOwner(fs.getOwner()).
setGroup(fs.getGroup()).
setFileId(fs.getFileId()).
setChildrenNum(fs.getChildrenNum()).
setPath(ByteString.copyFrom(fs.getLocalNameInBytes())).
setStoragePolicy(fs.getStoragePolicy());
if (fs.isSymlink()) {
builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes()));
}
if (fs.getFileEncryptionInfo() != null) {
builder.setFileEncryptionInfo(convert(fs.getFileEncryptionInfo()));
}
if (fs instanceof HdfsLocatedFileStatus) {
final HdfsLocatedFileStatus lfs = (HdfsLocatedFileStatus) fs;
LocatedBlocks locations = lfs.getBlockLocations();
if (locations != null) {
builder.setLocations(PBHelper.convert(locations));
}
}
return builder.build();
}
示例8: toFileStatus
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入方法依赖的package包/类
static FileStatus toFileStatus(HdfsFileStatus f, String parent) {
return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
f.getPermission(), f.getOwner(), f.getGroup(),
f.isSymlink() ? new Path(f.getSymlink()) : null,
new Path(f.getFullName(parent)));
}
示例9: verifyRecursively
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入方法依赖的package包/类
private void verifyRecursively(final Path parent,
final HdfsFileStatus status) throws Exception {
if (status.isDir()) {
Path fullPath = parent == null ?
new Path("/") : status.getFullPath(parent);
DirectoryListing children = dfs.getClient().listPaths(
fullPath.toString(), HdfsFileStatus.EMPTY_NAME, true);
for (HdfsFileStatus child : children.getPartialListing()) {
verifyRecursively(fullPath, child);
}
} else if (!status.isSymlink()) { // is file
verifyFile(parent, status, null);
}
}