本文整理汇总了Java中org.apache.hadoop.hdfs.DistributedFileSystem.listStatus方法的典型用法代码示例。如果您正苦于以下问题:Java DistributedFileSystem.listStatus方法的具体用法?Java DistributedFileSystem.listStatus怎么用?Java DistributedFileSystem.listStatus使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.DistributedFileSystem
的用法示例。
在下文中一共展示了DistributedFileSystem.listStatus方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: print
import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
* 查看输出结果
*
* @param path
*/
public void print(String path) {
log.info("mapreduce输出结果:...................................................");
DistributedFileSystem distributedFileSystem = distributedFileSystem();
try {
FileStatus[] fileStatuses = distributedFileSystem.listStatus(new Path(path));
for (FileStatus fs : fileStatuses) {
log.info(fs);
FSDataInputStream fsDataInputStream = distributedFileSystem.open(fs.getPath());
byte[] bs = new byte[fsDataInputStream.available()];
fsDataInputStream.read(bs);
log.info("\n" + new String(bs) + "\n");
}
} catch (IOException e) {
log.error(e);
} finally {
close(distributedFileSystem);
}
}
示例2: printMessage
import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
public void printMessage(String path) {
System.out.println("\nprint result:");
DistributedFileSystem distributedFileSystem = distributedFileSystem();
try {
FileStatus[] fileStatuses = distributedFileSystem.listStatus(new Path(path));
for (FileStatus fileStatus : fileStatuses) {
System.out.println(fileStatus);
if (fileStatus.isFile()) {
FSDataInputStream fsDataInputStream = distributedFileSystem.open(fileStatus.getPath());
byte[] bs = new byte[fsDataInputStream.available()];
fsDataInputStream.read(bs);
fsDataInputStream.close();
System.out.println(new String(bs));
}
}
} catch (IOException e) {
e.printStackTrace();
} finally {
close(distributedFileSystem);
}
}
示例3: mapreduce
import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
@Test
public void mapreduce() {
String inputPath = ParquetConfiguration.HDFS_URI + "//parquet/mapreduce/input";
String outputPath = ParquetConfiguration.HDFS_URI + "//parquet/mapreduce/output" + DateFormatUtils.format(new Date(), "yyyyMMddHHmmss");
try {
MapReduceParquetMapReducer.main(new String[]{inputPath, outputPath});
DistributedFileSystem distributedFileSystem = new ParquetConfiguration().distributedFileSystem();
FileStatus[] fileStatuses = distributedFileSystem.listStatus(new Path(outputPath));
for (FileStatus fileStatus : fileStatuses) {
System.out.println(fileStatus);
}
distributedFileSystem.close();
} catch (Exception e) {
e.printStackTrace();
}
}
示例4: collectFileNames
import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
private void collectFileNames(DistributedFileSystem fs, String zonepath, List<String> names)
throws IOException
{
FileStatus[] statuses = fs.listStatus(new Path(zonepath));
// System.out.println("## cheking path " + new Path(zonepath).toString() + " iter " + statuses.length);
for (FileStatus status : statuses) {
String fname = zonepath + "/" + status.getPath().getName();
if (status.isDirectory())
collectFileNames(fs, fname, names);
else
names.add(fname);
}
}
示例5: collectFileNames
import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
public static void collectFileNames(DistributedFileSystem fs, String zonepath, List<String> names)
throws IOException
{
FileStatus[] statuses = fs.listStatus(new Path(zonepath));
// System.out.println("## cheking path " + new Path(zonepath).toString() + " iter " + statuses.length);
for (FileStatus status : statuses) {
String fname = zonepath + "/" + status.getPath().getName();
if (status.isDirectory())
collectFileNames(fs, fname, names);
else
names.add(fname);
}
}
示例6: concat
import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
public static void concat(String dir) throws IOException {
String directory = NodeConfig.HDFS_PATH + dir;
Configuration conf = new Configuration();
DistributedFileSystem fs = (DistributedFileSystem)FileSystem.get(URI.create(directory), conf);
FileStatus fileList[] = fs.listStatus(new Path(directory));
if (fileList.length>=2) {
ArrayList<Path> srcs = new ArrayList<Path>(fileList.length);
for (FileStatus fileStatus : fileList) {
if ( fileStatus.isFile() &&
(fileStatus.getLen()&~fileStatus.getBlockSize())<fileStatus.getBlockSize()/2 ) {
srcs.add(fileStatus.getPath());
}
}
if (srcs.size()>=2) {
Logger.println("come to here");
Path appended = srcs.get(0);
Path[] sources = new Path[srcs.size()-1];
for (int i=0; i<srcs.size()-1; i++) {
sources[i] = srcs.get(i+1);
}
Logger.println(fs==null);
Logger.println(appended==null);
Logger.println(sources==null);
fs.concat(appended, sources);
Logger.println("concat to : " + appended.getName());
Logger.println(Arrays.toString(sources));
}
fs.close();
}
}
示例7: checkSnapshotCreation
import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
* Check the functionality of a snapshot.
*
* @param hdfs DistributedFileSystem instance
* @param snapshotRoot The root of the snapshot
* @param snapshottedDir The snapshotted directory
*/
public static void checkSnapshotCreation(DistributedFileSystem hdfs,
Path snapshotRoot, Path snapshottedDir) throws Exception {
// Currently we only check if the snapshot was created successfully
assertTrue(hdfs.exists(snapshotRoot));
// Compare the snapshot with the current dir
FileStatus[] currentFiles = hdfs.listStatus(snapshottedDir);
FileStatus[] snapshotFiles = hdfs.listStatus(snapshotRoot);
assertEquals("snapshottedDir=" + snapshottedDir
+ ", snapshotRoot=" + snapshotRoot,
currentFiles.length, snapshotFiles.length);
}