本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota类的典型用法代码示例。如果您正苦于以下问题:Java INodeDirectoryWithQuota类的具体用法?Java INodeDirectoryWithQuota怎么用?Java INodeDirectoryWithQuota使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
INodeDirectoryWithQuota类属于org.apache.hadoop.hdfs.server.namenode包,在下文中一共展示了INodeDirectoryWithQuota类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkQuotaUsageComputation
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota; //导入依赖的package包/类
private void checkQuotaUsageComputation(final Path dirPath,
final long expectedNs, final long expectedDs) throws IOException {
INode node = fsdir.getINode(dirPath.toString());
assertTrue(node.isDirectory() && node.isQuotaSet());
INodeDirectoryWithQuota dirNode = (INodeDirectoryWithQuota) node;
assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
dirNode.getNamespace());
assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
dirNode.getDiskspace());
Quota.Counts counts = Quota.Counts.newInstance();
dirNode.computeQuotaUsage(counts, false);
assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
counts.get(Quota.NAMESPACE));
assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
counts.get(Quota.DISKSPACE));
}
示例2: testSpaceCommands
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota; //导入依赖的package包/类
/**
* Test HDFS operations that change disk space consumed by a directory tree.
* namely create, rename, delete, append, and setReplication.
*
* This is based on testNamespaceCommands() above.
*/
public void testSpaceCommands() throws Exception {
final Configuration conf = new Configuration();
// set a smaller block size so that we can test with smaller
// diskspace quotas
conf.set("dfs.block.size", "512");
conf.setBoolean("dfs.support.append", true);
final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
final FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(),
fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs = (DistributedFileSystem)fs;
FSDirectory fsd = cluster.getNameNode().namesystem.dir;
INodeDirectoryWithQuota rootDir = (INodeDirectoryWithQuota) (fsd
.getExistingPathINodes("/")[0]);
try {
generateFiles(dfs, rootDir, 1024, 512);
generateFiles(dfs, rootDir, 1019, 512);
} finally {
cluster.shutdown();
}
}
示例3: testSpaceCommands
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota; //导入依赖的package包/类
/**
* Test HDFS operations that change disk space consumed by a directory tree.
* namely create, rename, delete, append, and setReplication.
*
* This is based on testNamespaceCommands() above.
*/
@Test
public void testSpaceCommands() throws Exception {
// smaller block size, support append
setUp(true, true);
FSDirectory fsd = cluster.getNameNode().namesystem.dir;
INodeDirectoryWithQuota rootDir = (INodeDirectoryWithQuota) (fsd
.getExistingPathINodes("/")[0]);
generateFiles(dfs, rootDir, 1024, 512);
generateFiles(dfs, rootDir, 1019, 512);
}
示例4: acquireINodeAttributes
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota; //导入依赖的package包/类
protected void acquireINodeAttributes()
throws StorageException, TransactionContextException {
List<INodeCandidatePrimaryKey> pks =
new ArrayList<>();
for (INode inode : getAllResolvedINodes()) {
if (inode instanceof INodeDirectoryWithQuota) {
INodeCandidatePrimaryKey pk =
new INodeCandidatePrimaryKey(inode.getId());
pks.add(pk);
}
}
acquireLockList(DEFAULT_LOCK_TYPE, INodeAttributes.Finder.ByINodeIds, pks);
}
示例5: convertHDFStoDAL
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota; //导入依赖的package包/类
@Override
public INode convertHDFStoDAL(
org.apache.hadoop.hdfs.server.namenode.INode inode)
throws StorageException {
INode hopINode = null;
if (inode != null) {
hopINode = new INode();
hopINode.setModificationTime(inode.getModificationTime());
hopINode.setAccessTime(inode.getAccessTime());
hopINode.setName(inode.getLocalName());
hopINode.setUserID(inode.getUserID());
hopINode.setGroupID(inode.getGroupID());
hopINode.setPermission(inode.getFsPermission().toShort());
hopINode.setParentId(inode.getParentId());
hopINode.setId(inode.getId());
hopINode.setIsDir(inode.isDirectory());
hopINode.setPartitionId(inode.getPartitionId());
hopINode.setLogicalTime(inode.getLogicalTime());
if (inode.isDirectory()) {
hopINode.setUnderConstruction(false);
hopINode.setDirWithQuota(inode instanceof INodeDirectoryWithQuota ?
true : false);
hopINode.setMetaEnabled(((INodeDirectory) inode).isMetaEnabled());
}
if (inode instanceof INodeFile) {
hopINode
.setUnderConstruction(inode.isUnderConstruction() ? true : false);
hopINode.setDirWithQuota(false);
if (inode instanceof INodeFileUnderConstruction) {
hopINode.setClientName(
((INodeFileUnderConstruction) inode).getClientName());
hopINode.setClientMachine(
((INodeFileUnderConstruction) inode).getClientMachine());
hopINode.setClientNode(
((INodeFileUnderConstruction) inode).getClientNode() == null ?
null : ((INodeFileUnderConstruction) inode).getClientNode()
.getXferAddr());
}
hopINode.setGenerationStamp(((INodeFile) inode).getGenerationStamp());
hopINode.setFileSize(((INodeFile) inode).getSize());
hopINode.setFileStoredInDB(((INodeFile)inode).isFileStoredInDB());
}
if (inode instanceof INodeSymlink) {
hopINode.setUnderConstruction(false);
hopINode.setDirWithQuota(false);
String linkValue =
DFSUtil.bytes2String(((INodeSymlink) inode).getSymlink());
hopINode.setSymlink(linkValue);
}
hopINode.setSubtreeLocked(inode.isSubtreeLocked());
hopINode.setSubtreeLockOwner(inode.getSubtreeLockOwner());
}
hopINode.setHeader(inode.getHeader());
return hopINode;
}