本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.getContentSummary方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.getContentSummary方法的具体用法?Java FileSystem.getContentSummary怎么用?Java FileSystem.getContentSummary使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.FileSystem
的用法示例。
在下文中一共展示了FileSystem.getContentSummary方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testContentSummary
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void testContentSummary() throws Exception {
FileSystem fs = FileSystem.get(getProxiedFSConf());
Path path = new Path(getProxiedFSTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
ContentSummary hdfsContentSummary = fs.getContentSummary(path);
fs.close();
fs = getHttpFSFileSystem();
ContentSummary httpContentSummary = fs.getContentSummary(path);
fs.close();
Assert.assertEquals(httpContentSummary.getDirectoryCount(), hdfsContentSummary.getDirectoryCount());
Assert.assertEquals(httpContentSummary.getFileCount(), hdfsContentSummary.getFileCount());
Assert.assertEquals(httpContentSummary.getLength(), hdfsContentSummary.getLength());
Assert.assertEquals(httpContentSummary.getQuota(), hdfsContentSummary.getQuota());
Assert.assertEquals(httpContentSummary.getSpaceConsumed(), hdfsContentSummary.getSpaceConsumed());
Assert.assertEquals(httpContentSummary.getSpaceQuota(), hdfsContentSummary.getSpaceQuota());
}
示例2: getContentSummary
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
* 此方法用于获取文件的 ContentSummary
*
* @param fileSystemInfo
* 文件系统信息
* @param path
* 文件路径
* @return ContentSummary
*/
public static ContentSummary getContentSummary(FileSystemInfo fileSystemInfo, String path) {
FileSystem fs = getFileSystem(fileSystemInfo);
Path uri = new Path(path);
try {
pathNotExistCheck(path, fs, uri);
return fs.getContentSummary(uri);
} catch (IOException e) {
e.printStackTrace();
} finally {
closeFileSystem(fs);
}
return null;
}
示例3: testTargetDir
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/** test target-dir contains imported files. */
public void testTargetDir() throws IOException {
try {
String targetDir = getWarehouseDir() + "/tempTargetDir";
ArrayList args = getOutputArgv(true);
args.add("--target-dir");
args.add(targetDir);
// delete target-dir if exists and recreate it
FileSystem fs = FileSystem.get(getConf());
Path outputPath = new Path(targetDir);
if (fs.exists(outputPath)) {
fs.delete(outputPath, true);
}
String[] argv = (String[]) args.toArray(new String[0]);
runImport(argv);
ContentSummary summ = fs.getContentSummary(outputPath);
assertTrue("There's no new imported files in target-dir",
summ.getFileCount() > 0);
} catch (Exception e) {
LOG.error("Got Exception: " + StringUtils.stringifyException(e));
fail(e.toString());
}
}
示例4: testBlockAllocationAdjustsUsageConservatively
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
* Violate a space quota using files of size < 1 block. Test that block
* allocation conservatively assumes that for quota checking the entire
* space of the block is used.
*/
@Test
public void testBlockAllocationAdjustsUsageConservatively()
throws Exception {
Configuration conf = new HdfsConfiguration();
final int BLOCK_SIZE = 6 * 1024;
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
DFSAdmin admin = new DFSAdmin(conf);
final String nnAddr = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
final String webhdfsuri = WebHdfsFileSystem.SCHEME + "://" + nnAddr;
System.out.println("webhdfsuri=" + webhdfsuri);
final FileSystem webhdfs = new Path(webhdfsuri).getFileSystem(conf);
try {
Path dir = new Path("/test");
Path file1 = new Path("/test/test1");
Path file2 = new Path("/test/test2");
boolean exceededQuota = false;
final int QUOTA_SIZE = 3 * BLOCK_SIZE; // total space usage including
// repl.
final int FILE_SIZE = BLOCK_SIZE / 2;
ContentSummary c;
// Create the directory and set the quota
assertTrue(fs.mkdirs(dir));
runCommand(admin, false, "-setSpaceQuota", Integer.toString(QUOTA_SIZE),
dir.toString());
// Creating a file should use half the quota
DFSTestUtil.createFile(fs, file1, FILE_SIZE, (short) 3, 1L);
DFSTestUtil.waitReplication(fs, file1, (short) 3);
c = fs.getContentSummary(dir);
checkContentSummary(c, webhdfs.getContentSummary(dir));
assertEquals("Quota is half consumed", QUOTA_SIZE / 2,
c.getSpaceConsumed());
// We can not create the 2nd file because even though the total spaced
// used by two files (2 * 3 * 512/2) would fit within the quota (3 * 512)
// when a block for a file is created the space used is adjusted
// conservatively (3 * block size, ie assumes a full block is written)
// which will violate the quota (3 * block size) since we've already
// used half the quota for the first file.
try {
DFSTestUtil.createFile(fs, file2, FILE_SIZE, (short) 3, 1L);
} catch (QuotaExceededException e) {
exceededQuota = true;
}
assertTrue("Quota not exceeded", exceededQuota);
} finally {
cluster.shutdown();
}
}
示例5: execute
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return a Map object (JSON friendly) with the content-summary.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Map execute(FileSystem fs) throws IOException {
ContentSummary contentSummary = fs.getContentSummary(path);
return contentSummaryToJSON(contentSummary);
}