本文整理匯總了Java中org.apache.hadoop.fs.ContentSummary類的典型用法代碼示例。如果您正苦於以下問題:Java ContentSummary類的具體用法?Java ContentSummary怎麽用?Java ContentSummary使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
ContentSummary類屬於org.apache.hadoop.fs包,在下文中一共展示了ContentSummary類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testContentSummary
import org.apache.hadoop.fs.ContentSummary; //導入依賴的package包/類
private void testContentSummary() throws Exception {
FileSystem fs = FileSystem.get(getProxiedFSConf());
Path path = new Path(getProxiedFSTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
ContentSummary hdfsContentSummary = fs.getContentSummary(path);
fs.close();
fs = getHttpFSFileSystem();
ContentSummary httpContentSummary = fs.getContentSummary(path);
fs.close();
Assert.assertEquals(httpContentSummary.getDirectoryCount(), hdfsContentSummary.getDirectoryCount());
Assert.assertEquals(httpContentSummary.getFileCount(), hdfsContentSummary.getFileCount());
Assert.assertEquals(httpContentSummary.getLength(), hdfsContentSummary.getLength());
Assert.assertEquals(httpContentSummary.getQuota(), hdfsContentSummary.getQuota());
Assert.assertEquals(httpContentSummary.getSpaceConsumed(), hdfsContentSummary.getSpaceConsumed());
Assert.assertEquals(httpContentSummary.getSpaceQuota(), hdfsContentSummary.getSpaceQuota());
}
示例2: canBeSafelyDeleted
import org.apache.hadoop.fs.ContentSummary; //導入依賴的package包/類
private boolean canBeSafelyDeleted(PathData item)
throws IOException {
boolean shouldDelete = true;
if (safeDelete) {
final long deleteLimit = getConf().getLong(
HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES,
HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES_DEFAULT);
if (deleteLimit > 0) {
ContentSummary cs = item.fs.getContentSummary(item.path);
final long numFiles = cs.getFileCount();
if (numFiles > deleteLimit) {
if (!ToolRunner.confirmPrompt("Proceed deleting " + numFiles +
" files?")) {
System.err.println("Delete aborted at user request.\n");
shouldDelete = false;
}
}
}
}
return shouldDelete;
}
示例3: getContentSummary
import org.apache.hadoop.fs.ContentSummary; //導入依賴的package包/類
@Override
public ContentSummary getContentSummary(Path f) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.GETCONTENTSUMMARY.toString());
HttpURLConnection conn =
getConnection(Operation.GETCONTENTSUMMARY.getMethod(), params, f, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) ((JSONObject)
HttpFSUtils.jsonParse(conn)).get(CONTENT_SUMMARY_JSON);
return new ContentSummary.Builder().
length((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON)).
fileCount((Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON)).
directoryCount((Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON)).
quota((Long) json.get(CONTENT_SUMMARY_QUOTA_JSON)).
spaceConsumed((Long) json.get(CONTENT_SUMMARY_SPACE_CONSUMED_JSON)).
spaceQuota((Long) json.get(CONTENT_SUMMARY_SPACE_QUOTA_JSON)).build();
}
示例4: getContentSummary
import org.apache.hadoop.fs.ContentSummary; //導入依賴的package包/類
/**
* Connect to the name node and get content summary.
* @param path The path
* @return The content summary for the path.
* @throws IOException
*/
private ContentSummary getContentSummary(String path) throws IOException {
final HttpURLConnection connection = openConnection(
"/contentSummary" + ServletUtil.encodePath(path),
"ugi=" + getEncodedUgiParameter());
InputStream in = null;
try {
in = connection.getInputStream();
final XMLReader xr = XMLReaderFactory.createXMLReader();
xr.setContentHandler(this);
xr.parse(new InputSource(in));
} catch(FileNotFoundException fnfe) {
//the server may not support getContentSummary
return null;
} catch(SAXException saxe) {
final Exception embedded = saxe.getException();
if (embedded != null && embedded instanceof IOException) {
throw (IOException)embedded;
}
throw new IOException("Invalid xml format", saxe);
} finally {
if (in != null) {
in.close();
}
connection.disconnect();
}
return contentsummary;
}
示例5: getContentSummaryInt
import org.apache.hadoop.fs.ContentSummary; //導入依賴的package包/類
private static ContentSummary getContentSummaryInt(FSDirectory fsd,
INodesInPath iip) throws IOException {
fsd.readLock();
try {
INode targetNode = iip.getLastINode();
if (targetNode == null) {
throw new FileNotFoundException("File does not exist: " + iip.getPath());
}
else {
// Make it relinquish locks everytime contentCountLimit entries are
// processed. 0 means disabled. I.e. blocking for the entire duration.
ContentSummaryComputationContext cscc =
new ContentSummaryComputationContext(fsd, fsd.getFSNamesystem(),
fsd.getContentCountLimit());
ContentSummary cs = targetNode.computeAndConvertContentSummary(cscc);
fsd.addYieldCount(cscc.getYieldCount());
return cs;
}
} finally {
fsd.readUnlock();
}
}
示例6: computeAndConvertContentSummary
import org.apache.hadoop.fs.ContentSummary; //導入依賴的package包/類
/**
* Compute {@link ContentSummary}.
*/
public final ContentSummary computeAndConvertContentSummary(
ContentSummaryComputationContext summary) {
ContentCounts counts = computeContentSummary(summary).getCounts();
final QuotaCounts q = getQuotaCounts();
return new ContentSummary.Builder().
length(counts.getLength()).
fileCount(counts.getFileCount() + counts.getSymlinkCount()).
directoryCount(counts.getDirectoryCount()).
quota(q.getNameSpace()).
spaceConsumed(counts.getStoragespace()).
spaceQuota(q.getStorageSpace()).
typeConsumed(counts.getTypeSpaces()).
typeQuota(q.getTypeSpaces().asArray()).
build();
}
示例7: convert
import org.apache.hadoop.fs.ContentSummary; //導入依賴的package包/類
public static ContentSummary convert(ContentSummaryProto cs) {
if (cs == null) return null;
ContentSummary.Builder builder = new ContentSummary.Builder();
builder.length(cs.getLength()).
fileCount(cs.getFileCount()).
directoryCount(cs.getDirectoryCount()).
quota(cs.getQuota()).
spaceConsumed(cs.getSpaceConsumed()).
spaceQuota(cs.getSpaceQuota());
if (cs.hasTypeQuotaInfos()) {
for (HdfsProtos.StorageTypeQuotaInfoProto info :
cs.getTypeQuotaInfos().getTypeQuotaInfoList()) {
StorageType type = PBHelper.convertStorageType(info.getType());
builder.typeConsumed(type, info.getConsumed());
builder.typeQuota(type, info.getQuota());
}
}
return builder.build();
}
示例8: testTruncate
import org.apache.hadoop.fs.ContentSummary; //導入依賴的package包/類
@Test
public void testTruncate() throws Exception {
final short repl = 3;
final int blockSize = 1024;
final int numOfBlocks = 2;
Path dir = getTestRootPath(fSys, "test/hadoop");
Path file = getTestRootPath(fSys, "test/hadoop/file");
final byte[] data = getFileData(numOfBlocks, blockSize);
createFile(fSys, file, data, blockSize, repl);
final int newLength = blockSize;
boolean isReady = fSys.truncate(file, newLength);
Assert.assertTrue("Recovery is not expected.", isReady);
FileStatus fileStatus = fSys.getFileStatus(file);
Assert.assertEquals(fileStatus.getLen(), newLength);
AppendTestUtil.checkFullFile(fSys, file, newLength, data, file.toString());
ContentSummary cs = fSys.getContentSummary(dir);
Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
newLength * repl);
Assert.assertTrue("Deleted", fSys.delete(dir, true));
}
示例9: testContentSummaryWithoutQuotaByStorageType
import org.apache.hadoop.fs.ContentSummary; //導入依賴的package包/類
@Test(timeout = 60000)
public void testContentSummaryWithoutQuotaByStorageType() throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile1 = new Path(foo, "created_file1.data");
dfs.mkdirs(foo);
// set storage policy on directory "foo" to ONESSD
dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(!fnode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under directory "foo"
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify getContentSummary without any quota set
ContentSummary cs = dfs.getContentSummary(foo);
assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
}
示例10: testContentSummaryWithoutStoragePolicy
import org.apache.hadoop.fs.ContentSummary; //導入依賴的package包/類
@Test(timeout = 60000)
public void testContentSummaryWithoutStoragePolicy() throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile1 = new Path(foo, "created_file1.data");
dfs.mkdirs(foo);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(!fnode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under directory "foo"
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify getContentSummary without any quota set
// Expect no type quota and usage information available
ContentSummary cs = dfs.getContentSummary(foo);
assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
for (StorageType t : StorageType.values()) {
assertEquals(cs.getTypeConsumed(t), 0);
assertEquals(cs.getTypeQuota(t), -1);
}
}
示例11: convert
import org.apache.hadoop.fs.ContentSummary; //導入依賴的package包/類
public static ContentSummary convert(ContentSummaryProto cs) {
if (cs == null) return null;
ContentSummary.Builder builder = new ContentSummary.Builder();
builder.length(cs.getLength()).
fileCount(cs.getFileCount()).
directoryCount(cs.getDirectoryCount()).
quota(cs.getQuota()).
spaceConsumed(cs.getSpaceConsumed()).
spaceQuota(cs.getSpaceQuota());
if (cs.hasTypeQuotaInfos()) {
for (HdfsProtos.StorageTypeQuotaInfoProto info :
cs.getTypeQuotaInfos().getTypeQuotaInfoList()) {
StorageType type = convertStorageType(info.getType());
builder.typeConsumed(type, info.getConsumed());
builder.typeQuota(type, info.getQuota());
}
}
return builder.build();
}
示例12: getContentSummaryInt
import org.apache.hadoop.fs.ContentSummary; //導入依賴的package包/類
private static ContentSummary getContentSummaryInt(FSDirectory fsd,
INodesInPath iip) throws IOException {
fsd.readLock();
try {
INode targetNode = iip.getLastINode();
if (targetNode == null) {
throw new FileNotFoundException("File does not exist: " + iip.getPath());
}
else {
// Make it relinquish locks everytime contentCountLimit entries are
// processed. 0 means disabled. I.e. blocking for the entire duration.
ContentSummaryComputationContext cscc =
new ContentSummaryComputationContext(fsd, fsd.getFSNamesystem(),
fsd.getContentCountLimit(), fsd.getContentSleepMicroSec());
ContentSummary cs = targetNode.computeAndConvertContentSummary(
iip.getPathSnapshotId(), cscc);
fsd.addYieldCount(cscc.getYieldCount());
return cs;
}
} finally {
fsd.readUnlock();
}
}
示例13: getContentSummary
import org.apache.hadoop.fs.ContentSummary; //導入依賴的package包/類
/**
* Get the content summary for a specific file/dir.
*
* @param src The string representation of the path to the file
*
* @throws AccessControlException if access is denied
* @throws UnresolvedLinkException if a symlink is encountered.
* @throws FileNotFoundException if no file exists
* @throws StandbyException
* @throws IOException for issues with writing to the audit log
*
* @return object containing information regarding the file
* or null if file not found
*/
ContentSummary getContentSummary(final String src) throws IOException {
checkOperation(OperationCategory.READ);
readLock();
boolean success = true;
try {
checkOperation(OperationCategory.READ);
return FSDirStatAndListingOp.getContentSummary(dir, src);
} catch (AccessControlException ace) {
success = false;
throw ace;
} finally {
readUnlock();
logAuditEvent(success, "contentSummary", src);
}
}
示例14: computeAndConvertContentSummary
import org.apache.hadoop.fs.ContentSummary; //導入依賴的package包/類
/**
* Compute {@link ContentSummary}.
*/
public final ContentSummary computeAndConvertContentSummary(int snapshotId,
ContentSummaryComputationContext summary) {
ContentCounts counts = computeContentSummary(snapshotId, summary)
.getCounts();
final QuotaCounts q = getQuotaCounts();
return new ContentSummary.Builder().
length(counts.getLength()).
fileCount(counts.getFileCount() + counts.getSymlinkCount()).
directoryCount(counts.getDirectoryCount()).
quota(q.getNameSpace()).
spaceConsumed(counts.getStoragespace()).
spaceQuota(q.getStorageSpace()).
typeConsumed(counts.getTypeSpaces()).
typeQuota(q.getTypeSpaces().asArray()).
build();
}
示例15: testContentSummary
import org.apache.hadoop.fs.ContentSummary; //導入依賴的package包/類
@Test
public void testContentSummary() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
final Path path = new Path("/QuotaDir");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
final WebHdfsFileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(
conf, WebHdfsConstants.WEBHDFS_SCHEME);
final DistributedFileSystem dfs = cluster.getFileSystem();
dfs.mkdirs(path);
dfs.setQuotaByStorageType(path, StorageType.DISK, 100000);
ContentSummary contentSummary = webHdfs.getContentSummary(path);
Assert.assertTrue((contentSummary.getTypeQuota(
StorageType.DISK) == 100000));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}