当前位置: 首页>>代码示例>>Java>>正文


Java INodeDirectoryWithQuota类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota的典型用法代码示例。如果您正苦于以下问题:Java INodeDirectoryWithQuota类的具体用法?Java INodeDirectoryWithQuota怎么用?Java INodeDirectoryWithQuota使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


INodeDirectoryWithQuota类属于org.apache.hadoop.hdfs.server.namenode包,在下文中一共展示了INodeDirectoryWithQuota类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkQuotaUsageComputation

import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota; //导入依赖的package包/类
private void checkQuotaUsageComputation(final Path dirPath,
    final long expectedNs, final long expectedDs) throws IOException {
  INode node = fsdir.getINode(dirPath.toString());
  assertTrue(node.isDirectory() && node.isQuotaSet());
  INodeDirectoryWithQuota dirNode = (INodeDirectoryWithQuota) node;
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
      dirNode.getNamespace());
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
      dirNode.getDiskspace());
  Quota.Counts counts = Quota.Counts.newInstance();
  dirNode.computeQuotaUsage(counts, false);
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
      counts.get(Quota.NAMESPACE));
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
      counts.get(Quota.DISKSPACE));
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:17,代码来源:TestSnapshotDeletion.java

示例2: testSpaceCommands

import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota; //导入依赖的package包/类
/**
 * Test HDFS operations that change disk space consumed by a directory tree.
 * namely create, rename, delete, append, and setReplication.
 * 
 * This is based on testNamespaceCommands() above.
 */
public void testSpaceCommands() throws Exception {
  final Configuration conf = new Configuration();
  // set a smaller block size so that we can test with smaller 
  // diskspace quotas
  conf.set("dfs.block.size", "512");
  conf.setBoolean("dfs.support.append", true);
  final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
  final FileSystem fs = cluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
              fs instanceof DistributedFileSystem);

  final DistributedFileSystem dfs = (DistributedFileSystem)fs;
  FSDirectory fsd = cluster.getNameNode().namesystem.dir;
INodeDirectoryWithQuota rootDir = (INodeDirectoryWithQuota) (fsd
		.getExistingPathINodes("/")[0]);
  try {
    generateFiles(dfs, rootDir, 1024, 512);
    generateFiles(dfs, rootDir, 1019, 512);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:29,代码来源:TestQuota.java

示例3: testSpaceCommands

import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota; //导入依赖的package包/类
/**
  * Test HDFS operations that change disk space consumed by a directory tree.
  * namely create, rename, delete, append, and setReplication.
  * 
  * This is based on testNamespaceCommands() above.
  */
@Test
 public void testSpaceCommands() throws Exception {
  // smaller block size, support append
  setUp(true, true);

  FSDirectory fsd = cluster.getNameNode().namesystem.dir;
	INodeDirectoryWithQuota rootDir = (INodeDirectoryWithQuota) (fsd
			.getExistingPathINodes("/")[0]);
   generateFiles(dfs, rootDir, 1024, 512);
   generateFiles(dfs, rootDir, 1019, 512);
 }
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:18,代码来源:TestQuota.java

示例4: acquireINodeAttributes

import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota; //导入依赖的package包/类
protected void acquireINodeAttributes()
    throws StorageException, TransactionContextException {
  List<INodeCandidatePrimaryKey> pks =
      new ArrayList<>();
  for (INode inode : getAllResolvedINodes()) {
    if (inode instanceof INodeDirectoryWithQuota) {
      INodeCandidatePrimaryKey pk =
          new INodeCandidatePrimaryKey(inode.getId());
      pks.add(pk);
    }
  }
  acquireLockList(DEFAULT_LOCK_TYPE, INodeAttributes.Finder.ByINodeIds, pks);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:14,代码来源:BaseINodeLock.java

示例5: convertHDFStoDAL

import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota; //导入依赖的package包/类
@Override
public INode convertHDFStoDAL(
    org.apache.hadoop.hdfs.server.namenode.INode inode)
    throws StorageException {
  INode hopINode = null;
  if (inode != null) {
    hopINode = new INode();
    hopINode.setModificationTime(inode.getModificationTime());
    hopINode.setAccessTime(inode.getAccessTime());
    hopINode.setName(inode.getLocalName());

    hopINode.setUserID(inode.getUserID());
    hopINode.setGroupID(inode.getGroupID());
    hopINode.setPermission(inode.getFsPermission().toShort());
    hopINode.setParentId(inode.getParentId());
    hopINode.setId(inode.getId());
    hopINode.setIsDir(inode.isDirectory());
    hopINode.setPartitionId(inode.getPartitionId());
    hopINode.setLogicalTime(inode.getLogicalTime());

    if (inode.isDirectory()) {
      hopINode.setUnderConstruction(false);
      hopINode.setDirWithQuota(inode instanceof INodeDirectoryWithQuota ?
          true : false);
      hopINode.setMetaEnabled(((INodeDirectory) inode).isMetaEnabled());
    }
    if (inode instanceof INodeFile) {
      hopINode
          .setUnderConstruction(inode.isUnderConstruction() ? true : false);
      hopINode.setDirWithQuota(false);
      if (inode instanceof INodeFileUnderConstruction) {
        hopINode.setClientName(
            ((INodeFileUnderConstruction) inode).getClientName());
        hopINode.setClientMachine(
            ((INodeFileUnderConstruction) inode).getClientMachine());
        hopINode.setClientNode(
            ((INodeFileUnderConstruction) inode).getClientNode() == null ?
                null : ((INodeFileUnderConstruction) inode).getClientNode()
                .getXferAddr());
      }
      hopINode.setGenerationStamp(((INodeFile) inode).getGenerationStamp());
      hopINode.setFileSize(((INodeFile) inode).getSize());
      hopINode.setFileStoredInDB(((INodeFile)inode).isFileStoredInDB());
    }
    if (inode instanceof INodeSymlink) {
      hopINode.setUnderConstruction(false);
      hopINode.setDirWithQuota(false);

      String linkValue =
          DFSUtil.bytes2String(((INodeSymlink) inode).getSymlink());
      hopINode.setSymlink(linkValue);
    }
    hopINode.setSubtreeLocked(inode.isSubtreeLocked());
    hopINode.setSubtreeLockOwner(inode.getSubtreeLockOwner());
  }
  hopINode.setHeader(inode.getHeader());
  return hopINode;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:59,代码来源:INodeDALAdaptor.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。