本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.getFileStatus方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.getFileStatus方法的具体用法?Java FileSystem.getFileStatus怎么用?Java FileSystem.getFileStatus使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.FileSystem
的用法示例。
在下文中一共展示了FileSystem.getFileStatus方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testBlockSize
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
@SuppressWarnings("deprecation")
public void testBlockSize() throws Exception {
FileSystem fs = getFileSystem();
long defaultBlockSize = fs.getDefaultBlockSize();
assertEquals("incorrect blocksize",
S3AFileSystem.DEFAULT_BLOCKSIZE, defaultBlockSize);
long newBlockSize = defaultBlockSize * 2;
fs.getConf().setLong(Constants.FS_S3A_BLOCK_SIZE, newBlockSize);
Path dir = path("testBlockSize");
Path file = new Path(dir, "file");
createFile(fs, file, true, dataset(1024, 'a', 'z' - 'a'));
FileStatus fileStatus = fs.getFileStatus(file);
assertEquals("Double default block size in stat(): " + fileStatus,
newBlockSize,
fileStatus.getBlockSize());
// check the listing & assert that the block size is picked up by
// this route too.
boolean found = false;
FileStatus[] listing = fs.listStatus(dir);
for (FileStatus stat : listing) {
LOG.info("entry: {}", stat);
if (file.equals(stat.getPath())) {
found = true;
assertEquals("Double default block size in ls(): " + stat,
newBlockSize,
stat.getBlockSize());
}
}
assertTrue("Did not find " + fileStatsToString(listing, ", "), found);
}
示例2: addResource
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public void addResource(FileSystem fs, Configuration conf, Path destPath,
Map<String, LocalResource> localResources, LocalResourceType resourceType, String link,
Map<URI, FileStatus> statCache, boolean appMasterOnly) throws IOException {
FileStatus destStatus = fs.getFileStatus(destPath);
LocalResource amJarRsrc = Records.newRecord(LocalResource.class);
amJarRsrc.setType(resourceType);
LocalResourceVisibility visibility = getVisibility(conf, destPath.toUri(), statCache);
amJarRsrc.setVisibility(visibility);
amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(destPath));
amJarRsrc.setTimestamp(destStatus.getModificationTime());
amJarRsrc.setSize(destStatus.getLen());
if (link == null || link.isEmpty())
throw new IOException("You must specify a valid link name");
localResources.put(link, amJarRsrc);
}
示例3: getBlockLocationsOutput
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static BlockLocation[] getBlockLocationsOutput(int fileSize,
int blockSize, long start, long len, String blockLocationHost)
throws Exception {
Configuration conf = new Configuration();
conf.set(NativeAzureFileSystem.AZURE_BLOCK_SIZE_PROPERTY_NAME, ""
+ blockSize);
if (blockLocationHost != null) {
conf.set(NativeAzureFileSystem.AZURE_BLOCK_LOCATION_HOST_PROPERTY_NAME,
blockLocationHost);
}
AzureBlobStorageTestAccount testAccount = AzureBlobStorageTestAccount
.createMock(conf);
FileSystem fs = testAccount.getFileSystem();
Path testFile = createTestFile(fs, fileSize);
FileStatus stat = fs.getFileStatus(testFile);
BlockLocation[] locations = fs.getFileBlockLocations(stat, start, len);
testAccount.cleanup();
return locations;
}
示例4: testCreate
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void testCreate(Path path, boolean override) throws Exception {
FileSystem fs = getHttpFSFileSystem();
FsPermission permission = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
OutputStream os = fs.create(new Path(path.toUri().getPath()), permission, override, 1024,
(short) 2, 100 * 1024 * 1024, null);
os.write(1);
os.close();
fs.close();
fs = FileSystem.get(getProxiedFSConf());
FileStatus status = fs.getFileStatus(path);
if (!isLocalFS()) {
Assert.assertEquals(status.getReplication(), 2);
Assert.assertEquals(status.getBlockSize(), 100 * 1024 * 1024);
}
Assert.assertEquals(status.getPermission(), permission);
InputStream is = fs.open(path);
Assert.assertEquals(is.read(), 1);
is.close();
fs.close();
}
示例5: getSplits
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Override
public List<InputSplit> getSplits(JobContext jobCtxt) throws IOException {
final JobConf jobConf = new JobConf(jobCtxt.getConfiguration());
final JobClient client = new JobClient(jobConf);
ClusterStatus stat = client.getClusterStatus(true);
int numTrackers = stat.getTaskTrackers();
final int fileCount = jobConf.getInt(GRIDMIX_DISTCACHE_FILE_COUNT, -1);
// Total size of distributed cache files to be generated
final long totalSize = jobConf.getLong(GRIDMIX_DISTCACHE_BYTE_COUNT, -1);
// Get the path of the special file
String distCacheFileList = jobConf.get(GRIDMIX_DISTCACHE_FILE_LIST);
if (fileCount < 0 || totalSize < 0 || distCacheFileList == null) {
throw new RuntimeException("Invalid metadata: #files (" + fileCount
+ "), total_size (" + totalSize + "), filelisturi ("
+ distCacheFileList + ")");
}
Path sequenceFile = new Path(distCacheFileList);
FileSystem fs = sequenceFile.getFileSystem(jobConf);
FileStatus srcst = fs.getFileStatus(sequenceFile);
// Consider the number of TTs * mapSlotsPerTracker as number of mappers.
int numMapSlotsPerTracker = jobConf.getInt(TTConfig.TT_MAP_SLOTS, 2);
int numSplits = numTrackers * numMapSlotsPerTracker;
List<InputSplit> splits = new ArrayList<InputSplit>(numSplits);
LongWritable key = new LongWritable();
BytesWritable value = new BytesWritable();
// Average size of data to be generated by each map task
final long targetSize = Math.max(totalSize / numSplits,
DistributedCacheEmulator.AVG_BYTES_PER_MAP);
long splitStartPosition = 0L;
long splitEndPosition = 0L;
long acc = 0L;
long bytesRemaining = srcst.getLen();
SequenceFile.Reader reader = null;
try {
reader = new SequenceFile.Reader(fs, sequenceFile, jobConf);
while (reader.next(key, value)) {
// If adding this file would put this split past the target size,
// cut the last split and put this file in the next split.
if (acc + key.get() > targetSize && acc != 0) {
long splitSize = splitEndPosition - splitStartPosition;
splits.add(new FileSplit(
sequenceFile, splitStartPosition, splitSize, (String[])null));
bytesRemaining -= splitSize;
splitStartPosition = splitEndPosition;
acc = 0L;
}
acc += key.get();
splitEndPosition = reader.getPosition();
}
} finally {
if (reader != null) {
reader.close();
}
}
if (bytesRemaining != 0) {
splits.add(new FileSplit(
sequenceFile, splitStartPosition, bytesRemaining, (String[])null));
}
return splits;
}
示例6: copy
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private Path copy(Path sCopy, Path dstdir) throws IOException {
FileSystem sourceFs = sCopy.getFileSystem(conf);
Path dCopy = new Path(dstdir, "tmp_"+sCopy.getName());
FileStatus sStat = sourceFs.getFileStatus(sCopy);
if (sStat.getModificationTime() != resource.getTimestamp()) {
throw new IOException("Resource " + sCopy +
" changed on src filesystem (expected " + resource.getTimestamp() +
", was " + sStat.getModificationTime());
}
if (resource.getVisibility() == LocalResourceVisibility.PUBLIC) {
if (!isPublic(sourceFs, sCopy, sStat, statCache)) {
throw new IOException("Resource " + sCopy +
" is not publicly accessable and as such cannot be part of the" +
" public cache.");
}
}
FileUtil.copy(sourceFs, sStat, FileSystem.getLocal(conf), dCopy, false,
true, conf);
return dCopy;
}
示例7: verifyCopy
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void verifyCopy(FileSystem fs, boolean preserveChecksum)
throws Exception {
for (Path path : pathList) {
final Path targetPath = new Path(path.toString().replaceAll(SOURCE_PATH,
TARGET_PATH));
Assert.assertTrue(fs.exists(targetPath));
Assert.assertTrue(fs.isFile(targetPath) == fs.isFile(path));
FileStatus sourceStatus = fs.getFileStatus(path);
FileStatus targetStatus = fs.getFileStatus(targetPath);
Assert.assertEquals(sourceStatus.getReplication(),
targetStatus.getReplication());
if (preserveChecksum) {
Assert.assertEquals(sourceStatus.getBlockSize(),
targetStatus.getBlockSize());
}
Assert.assertTrue(!fs.isFile(targetPath)
|| fs.getFileChecksum(targetPath).equals(fs.getFileChecksum(path)));
}
}
示例8: load
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
* Load from a Hadoop filesystem
* @param fs filesystem
* @param path path
* @return a loaded CD
* @throws IOException IO problems
* @throws EOFException if not enough bytes were read in
* @throws JsonParseException parse problems
* @throws JsonMappingException O/J mapping problems
*/
public T load(FileSystem fs, Path path)
throws IOException, JsonParseException, JsonMappingException {
FileStatus status = fs.getFileStatus(path);
long len = status.getLen();
byte[] b = new byte[(int) len];
FSDataInputStream dataInputStream = fs.open(path);
int count = dataInputStream.read(b);
if (count != len) {
throw new EOFException(path.toString() + ": read finished prematurely");
}
return fromBytes(path.toString(), b);
}
示例9: testPreserveGroupOnFile
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testPreserveGroupOnFile() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.GROUP);
Path dst = new Path("/tmp/dest2");
Path src = new Path("/tmp/src2");
createFile(fs, src);
createFile(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setReplication(src, (short) 1);
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
fs.setTimes(dst, 100, 100);
fs.setReplication(dst, (short) 2);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertTrue(srcStatus.getGroup().equals(dstStatus.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication());
}
示例10: testGetFileStatusWithInvalidPath
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test(expected = FileNotFoundException.class)
public void testGetFileStatusWithInvalidPath() throws Exception {
{
setupRPC(
DFS.RpcType.GET_FILE_STATUS_REQUEST, DFS.GetFileStatusRequest.newBuilder().setPath("/foo/bar").build(),
DFS.GetFileStatusResponse.class, newRPCException(LOCAL_ENDPOINT, new FileNotFoundException("File not found")));
}
FileSystem fs = newRemoteNodeFileSystem();
Path path = new Path("/foo/bar");
fs.getFileStatus(path);
}
示例11: touchFile
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static void touchFile(String path, boolean createMultipleBlocks,
ChecksumOpt checksumOpt) throws Exception {
FileSystem fs;
DataOutputStream outputStream = null;
try {
fs = cluster.getFileSystem();
final Path qualifiedPath = new Path(path).makeQualified(fs.getUri(),
fs.getWorkingDirectory());
final long blockSize = createMultipleBlocks ? NON_DEFAULT_BLOCK_SIZE : fs
.getDefaultBlockSize(qualifiedPath) * 2;
FsPermission permission = FsPermission.getFileDefault().applyUMask(
FsPermission.getUMask(fs.getConf()));
outputStream = fs.create(qualifiedPath, permission,
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), 0,
(short) (fs.getDefaultReplication(qualifiedPath) * 2), blockSize,
null, checksumOpt);
byte[] bytes = new byte[DEFAULT_FILE_SIZE];
outputStream.write(bytes);
long fileSize = DEFAULT_FILE_SIZE;
if (createMultipleBlocks) {
while (fileSize < 2*blockSize) {
outputStream.write(bytes);
outputStream.flush();
fileSize += DEFAULT_FILE_SIZE;
}
}
pathList.add(qualifiedPath);
++nFiles;
FileStatus fileStatus = fs.getFileStatus(qualifiedPath);
System.out.println(fileStatus.getBlockSize());
System.out.println(fileStatus.getReplication());
}
finally {
IOUtils.cleanup(null, outputStream);
}
}
示例12: checkExists
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private boolean checkExists(FileSystem fs, Path path, FsPermission fsPerm)
throws IOException {
boolean exists = true;
try {
FileStatus appDirStatus = fs.getFileStatus(path);
if (!APP_DIR_PERMISSIONS.equals(appDirStatus.getPermission())) {
fs.setPermission(path, APP_DIR_PERMISSIONS);
}
} catch (FileNotFoundException fnfe) {
exists = false;
}
return exists;
}
示例13: testListStatus
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void testListStatus() throws Exception {
FileSystem fs = FileSystem.get(getProxiedFSConf());
Path path = new Path(getProxiedFSTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
FileStatus status1 = fs.getFileStatus(path);
fs.close();
fs = getHttpFSFileSystem();
FileStatus status2 = fs.getFileStatus(new Path(path.toUri().getPath()));
fs.close();
Assert.assertEquals(status2.getPermission(), status1.getPermission());
Assert.assertEquals(status2.getPath().toUri().getPath(), status1.getPath().toUri().getPath());
Assert.assertEquals(status2.getReplication(), status1.getReplication());
Assert.assertEquals(status2.getBlockSize(), status1.getBlockSize());
Assert.assertEquals(status2.getAccessTime(), status1.getAccessTime());
Assert.assertEquals(status2.getModificationTime(), status1.getModificationTime());
Assert.assertEquals(status2.getOwner(), status1.getOwner());
Assert.assertEquals(status2.getGroup(), status1.getGroup());
Assert.assertEquals(status2.getLen(), status1.getLen());
FileStatus[] stati = fs.listStatus(path.getParent());
Assert.assertEquals(stati.length, 1);
Assert.assertEquals(stati[0].getPath().getName(), path.getName());
}
示例14: testPreserveReplicationOnFile
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testPreserveReplicationOnFile() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.REPLICATION);
Path dst = new Path("/tmp/dest2");
Path src = new Path("/tmp/src2");
createFile(fs, src);
createFile(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setReplication(src, (short) 1);
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
fs.setTimes(dst, 100, 100);
fs.setReplication(dst, (short) 2);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
Assert.assertTrue(srcStatus.getReplication() == dstStatus.getReplication());
}
示例15: testRootDirPermission
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testRootDirPermission() throws IOException {
FileSystem fs = FileSystem.getLocal(new YarnConfiguration());
FileStatus file = fs.getFileStatus(
new Path(fsPath.getAbsolutePath(), LeveldbTimelineStore.FILENAME));
assertNotNull(file);
assertEquals(LeveldbTimelineStore.LEVELDB_DIR_UMASK, file.getPermission());
}