当前位置: 首页>>代码示例>>Java>>正文


Java DistributedFileSystem.listStatus方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DistributedFileSystem.listStatus方法的典型用法代码示例。如果您正苦于以下问题:Java DistributedFileSystem.listStatus方法的具体用法?Java DistributedFileSystem.listStatus怎么用?Java DistributedFileSystem.listStatus使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DistributedFileSystem的用法示例。


在下文中一共展示了DistributedFileSystem.listStatus方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: print

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * 查看输出结果
 *
 * @param path
 */
public void print(String path) {
    log.info("mapreduce输出结果:...................................................");
    DistributedFileSystem distributedFileSystem = distributedFileSystem();
    try {
        FileStatus[] fileStatuses = distributedFileSystem.listStatus(new Path(path));
        for (FileStatus fs : fileStatuses) {
            log.info(fs);
            FSDataInputStream fsDataInputStream = distributedFileSystem.open(fs.getPath());
            byte[] bs = new byte[fsDataInputStream.available()];
            fsDataInputStream.read(bs);
            log.info("\n" + new String(bs) + "\n");
        }
    } catch (IOException e) {
        log.error(e);
    } finally {
        close(distributedFileSystem);
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-mapreduce,代码行数:24,代码来源:MapReduceConfiguration.java

示例2: printMessage

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
public void printMessage(String path) {
    System.out.println("\nprint result:");
    DistributedFileSystem distributedFileSystem = distributedFileSystem();
    try {
        FileStatus[] fileStatuses = distributedFileSystem.listStatus(new Path(path));
        for (FileStatus fileStatus : fileStatuses) {
            System.out.println(fileStatus);
            if (fileStatus.isFile()) {
                FSDataInputStream fsDataInputStream = distributedFileSystem.open(fileStatus.getPath());
                byte[] bs = new byte[fsDataInputStream.available()];
                fsDataInputStream.read(bs);
                fsDataInputStream.close();
                System.out.println(new String(bs));
            }
        }
    } catch (IOException e) {
        e.printStackTrace();
    } finally {
        close(distributedFileSystem);
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-pig,代码行数:22,代码来源:MumuPigConfiguration.java

示例3: mapreduce

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
@Test
public void mapreduce() {
    String inputPath = ParquetConfiguration.HDFS_URI + "//parquet/mapreduce/input";
    String outputPath = ParquetConfiguration.HDFS_URI + "//parquet/mapreduce/output" + DateFormatUtils.format(new Date(), "yyyyMMddHHmmss");
    try {
        MapReduceParquetMapReducer.main(new String[]{inputPath, outputPath});
        DistributedFileSystem distributedFileSystem = new ParquetConfiguration().distributedFileSystem();
        FileStatus[] fileStatuses = distributedFileSystem.listStatus(new Path(outputPath));
        for (FileStatus fileStatus : fileStatuses) {
            System.out.println(fileStatus);
        }
        distributedFileSystem.close();
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-parquet,代码行数:17,代码来源:MapReduceParquetMapReducerTest.java

示例4: collectFileNames

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
private  void collectFileNames(DistributedFileSystem fs, String zonepath, List<String> names)
		throws IOException
{
	FileStatus[] statuses = fs.listStatus(new Path(zonepath));
	// System.out.println("## cheking path " + new Path(zonepath).toString() + " iter " + statuses.length);
	for (FileStatus status : statuses) {
		String fname = zonepath + "/" + status.getPath().getName();
		if (status.isDirectory())
			collectFileNames(fs, fname, names);
		else
			names.add(fname);
	}
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:14,代码来源:ApplicationMasterKMS.java

示例5: collectFileNames

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
public static void collectFileNames(DistributedFileSystem fs, String zonepath, List<String> names)
			throws IOException
	{
		FileStatus[] statuses = fs.listStatus(new Path(zonepath));
//		System.out.println("## cheking path " + new Path(zonepath).toString() + " iter " + statuses.length);
		for (FileStatus status : statuses) {
			String fname = zonepath + "/" + status.getPath().getName();
			if (status.isDirectory())
				collectFileNames(fs, fname, names);
			else
				names.add(fname);
		}
	}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:14,代码来源:KeyRotationBC.java

示例6: concat

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
public static void concat(String dir) throws IOException {


        String directory = NodeConfig.HDFS_PATH + dir;
        Configuration conf = new Configuration();
        DistributedFileSystem fs = (DistributedFileSystem)FileSystem.get(URI.create(directory), conf);
        FileStatus fileList[] = fs.listStatus(new Path(directory));

        if (fileList.length>=2) {

            ArrayList<Path>  srcs = new ArrayList<Path>(fileList.length);
            for (FileStatus fileStatus : fileList) {
                if ( fileStatus.isFile() &&
                        (fileStatus.getLen()&~fileStatus.getBlockSize())<fileStatus.getBlockSize()/2 ) {
                    srcs.add(fileStatus.getPath());
                }
            }

            if (srcs.size()>=2) {
                Logger.println("come to here");
                Path appended = srcs.get(0);
                Path[] sources = new Path[srcs.size()-1];
                for (int i=0; i<srcs.size()-1; i++) {
                    sources[i] = srcs.get(i+1);
                }
                Logger.println(fs==null);
                Logger.println(appended==null);
                Logger.println(sources==null);
                fs.concat(appended, sources);
                Logger.println("concat to : " + appended.getName());
                Logger.println(Arrays.toString(sources));
            }

            fs.close();
        }


    }
 
开发者ID:cuiods,项目名称:WIFIProbe,代码行数:39,代码来源:HDFSTool.java

示例7: checkSnapshotCreation

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * Check the functionality of a snapshot.
 * 
 * @param hdfs DistributedFileSystem instance
 * @param snapshotRoot The root of the snapshot
 * @param snapshottedDir The snapshotted directory
 */
public static void checkSnapshotCreation(DistributedFileSystem hdfs,
    Path snapshotRoot, Path snapshottedDir) throws Exception {
  // Currently we only check if the snapshot was created successfully
  assertTrue(hdfs.exists(snapshotRoot));
  // Compare the snapshot with the current dir
  FileStatus[] currentFiles = hdfs.listStatus(snapshottedDir);
  FileStatus[] snapshotFiles = hdfs.listStatus(snapshotRoot);
  assertEquals("snapshottedDir=" + snapshottedDir
      + ", snapshotRoot=" + snapshotRoot,
      currentFiles.length, snapshotFiles.length);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:SnapshotTestHelper.java


注:本文中的org.apache.hadoop.hdfs.DistributedFileSystem.listStatus方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。