当前位置: 首页>>代码示例>>Java>>正文


Java Quota.Counts方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.Quota.Counts方法的典型用法代码示例。如果您正苦于以下问题:Java Quota.Counts方法的具体用法?Java Quota.Counts怎么用?Java Quota.Counts使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.Quota的用法示例。


在下文中一共展示了Quota.Counts方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: combinePosteriorAndCollectBlocks

import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
@Override
Quota.Counts combinePosteriorAndCollectBlocks(
    final INodeDirectory currentDir, final DirectoryDiff posterior,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  final Quota.Counts counts = Quota.Counts.newInstance();
  diff.combinePosterior(posterior.diff, new Diff.Processor<INode>() {
    /** Collect blocks for deleted files. */
    @Override
    public void process(INode inode) {
      if (inode != null) {
        inode.computeQuotaUsage(counts, false);
        inode.destroyAndCollectBlocks(collectedBlocks, removedINodes);
      }
    }
  });
  return counts;
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:19,代码来源:DirectoryWithSnapshotFeature.java

示例2: updateQuotaAndCollectBlocks

import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
public Quota.Counts updateQuotaAndCollectBlocks(INodeFile file,
    FileDiff removed, BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  long oldDiskspace = file.diskspaceConsumed();
  if (removed.snapshotINode != null) {
    short replication = removed.snapshotINode.getFileReplication();
    short currentRepl = file.getBlockReplication();
    if (currentRepl == 0) {
      oldDiskspace = file.computeFileSize(true, true) * replication;
    } else if (replication > currentRepl) {  
      oldDiskspace = oldDiskspace / file.getBlockReplication() * replication;
    }
  }
  
  collectBlocksAndClear(file, collectedBlocks, removedINodes);
  
  long dsDelta = oldDiskspace - file.diskspaceConsumed();
  return Quota.Counts.newInstance(0, dsDelta);
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:20,代码来源:FileWithSnapshotFeature.java

示例3: computeQuotaUsage

import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
@Override
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
    boolean useCache, int lastSnapshotId) {
  if ((useCache && isQuotaSet()) || lastSnapshotId == Snapshot.INVALID_ID) {
    return super.computeQuotaUsage(counts, useCache, lastSnapshotId);
  }
  
  Snapshot lastSnapshot = diffs.getSnapshotById(lastSnapshotId);
  
  ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshot);
  for (INode child : childrenList) {
    child.computeQuotaUsage(counts, useCache, lastSnapshotId);
  }
  
  counts.add(Quota.NAMESPACE, 1);
  return counts;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:18,代码来源:INodeDirectoryWithSnapshot.java

示例4: updateQuotaAndCollectBlocks

import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
private static Quota.Counts updateQuotaAndCollectBlocks(
    INodeFile currentINode, FileDiff removed,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  FileWithSnapshot sFile = (FileWithSnapshot) currentINode;
  long oldDiskspace = currentINode.diskspaceConsumed();
  if (removed.snapshotINode != null) {
    short replication = removed.snapshotINode.getFileReplication();
    short currentRepl = currentINode.getBlockReplication();
    if (currentRepl == 0) {
      oldDiskspace = currentINode.computeFileSize(true, true) * replication;
    } else if (replication > currentRepl) {  
      oldDiskspace = oldDiskspace / currentINode.getBlockReplication()
          * replication;
    }
  }
  
  Util.collectBlocksAndClear(sFile, collectedBlocks, removedINodes);
  
  long dsDelta = oldDiskspace - currentINode.diskspaceConsumed();
  return Quota.Counts.newInstance(0, dsDelta);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:22,代码来源:FileWithSnapshot.java

示例5: cleanSubtree

import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
@Override
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes, final boolean countDiffChange) 
    throws QuotaExceededException {
  if (snapshot == null) { // delete the current file
    recordModification(prior, null);
    isCurrentFileDeleted = true;
    Util.collectBlocksAndClear(this, collectedBlocks, removedINodes);
    return Quota.Counts.newInstance();
  } else { // delete a snapshot
    prior = getDiffs().updatePrior(snapshot, prior);
    return diffs.deleteSnapshotDiff(snapshot, prior, this, collectedBlocks,
        removedINodes, countDiffChange);
  }
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:17,代码来源:INodeFileWithSnapshot.java

示例6: destroyCreatedList

import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
/** clear the created list */
private Quota.Counts destroyCreatedList(
    final INodeDirectoryWithSnapshot currentINode,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  Quota.Counts counts = Quota.Counts.newInstance();
  final List<INode> createdList = getList(ListType.CREATED);
  for (INode c : createdList) {
    c.computeQuotaUsage(counts, true);
    c.destroyAndCollectBlocks(collectedBlocks, removedINodes);
    // c should be contained in the children list, remove it
    currentINode.removeChild(c);
  }
  createdList.clear();
  return counts;
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:17,代码来源:INodeDirectoryWithSnapshot.java

示例7: undoRename4DstParent

import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
/**
 * Undo the rename operation for the dst tree, i.e., if the rename operation
 * (with OVERWRITE option) removes a file/dir from the dst tree, add it back
 * and delete possible record in the deleted list.  
 */
public void undoRename4DstParent(final INode deletedChild,
    Snapshot latestSnapshot) throws QuotaExceededException {
  boolean removeDeletedChild = diffs.removeChild(ListType.DELETED,
      deletedChild);
  // pass null for inodeMap since the parent node will not get replaced when
  // undoing rename
  final boolean added = addChild(deletedChild, true, removeDeletedChild ? null
      : latestSnapshot, null);
  // update quota usage if adding is successfully and the old child has not
  // been stored in deleted list before
  if (added && !removeDeletedChild) {
    final Quota.Counts counts = deletedChild.computeQuotaUsage();
    addSpaceConsumed(counts.get(Quota.NAMESPACE),
        counts.get(Quota.DISKSPACE), false);
  }
}
 
开发者ID:huiyi-learning,项目名称:hardfs,代码行数:22,代码来源:INodeDirectoryWithSnapshot.java

示例8: combinePosteriorAndCollectBlocks

import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
@Override
Quota.Counts combinePosteriorAndCollectBlocks(INodeFile currentINode,
    FileDiff posterior, BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  return currentINode.getFileWithSnapshotFeature()
      .updateQuotaAndCollectBlocks(currentINode, posterior, collectedBlocks,
          removedINodes);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:9,代码来源:FileDiff.java

示例9: destroyDiffAndCollectBlocks

import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
@Override
Quota.Counts destroyDiffAndCollectBlocks(INodeFile currentINode,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  return currentINode.getFileWithSnapshotFeature()
      .updateQuotaAndCollectBlocks(currentINode, this, collectedBlocks,
          removedINodes);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:8,代码来源:FileDiff.java

示例10: removeSnapshot

import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
/**
 * Remove the snapshot with the given name from {@link #snapshotsByNames},
 * and delete all the corresponding DirectoryDiff.
 *
 * @param snapshotRoot The directory where we take snapshots
 * @param snapshotName The name of the snapshot to be removed
 * @param collectedBlocks Used to collect information to update blocksMap
 * @return The removed snapshot. Null if no snapshot with the given name
 *         exists.
 */
public Snapshot removeSnapshot(INodeDirectory snapshotRoot,
    String snapshotName, BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) throws SnapshotException {
  final int i = searchSnapshot(DFSUtil.string2Bytes(snapshotName));
  if (i < 0) {
    throw new SnapshotException("Cannot delete snapshot " + snapshotName
        + " from path " + snapshotRoot.getFullPathName()
        + ": the snapshot does not exist.");
  } else {
    final Snapshot snapshot = snapshotsByNames.get(i);
    int prior = Snapshot.findLatestSnapshot(snapshotRoot, snapshot.getId());
    try {
      Quota.Counts counts = snapshotRoot.cleanSubtree(snapshot.getId(),
          prior, collectedBlocks, removedINodes);
      INodeDirectory parent = snapshotRoot.getParent();
      if (parent != null) {
        // there will not be any WithName node corresponding to the deleted
        // snapshot, thus only update the quota usage in the current tree
        parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
            -counts.get(Quota.DISKSPACE), true);
      }
    } catch(QuotaExceededException e) {
      INode.LOG.error("BUG: removeSnapshot increases namespace usage.", e);
    }
    // remove from snapshotsByNames after successfully cleaning the subtree
    snapshotsByNames.remove(i);
    return snapshot;
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:40,代码来源:DirectorySnapshottableFeature.java

示例11: destroyCreatedList

import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
/** clear the created list */
private Quota.Counts destroyCreatedList(final INodeDirectory currentINode,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  Quota.Counts counts = Quota.Counts.newInstance();
  final List<INode> createdList = getList(ListType.CREATED);
  for (INode c : createdList) {
    c.computeQuotaUsage(counts, true);
    c.destroyAndCollectBlocks(collectedBlocks, removedINodes);
    // c should be contained in the children list, remove it
    currentINode.removeChild(c);
  }
  createdList.clear();
  return counts;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:16,代码来源:DirectoryWithSnapshotFeature.java

示例12: testRenameExceedQuota

import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
/**
 * Test the rename undo when quota of dst tree is exceeded after rename.
 */
@Test
public void testRenameExceedQuota() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  final Path sub_dir2 = new Path(dir2, "subdir");
  final Path subfile_dir2 = new Path(sub_dir2, "subfile");
  hdfs.mkdirs(dir1);
  DFSTestUtil.createFile(hdfs, subfile_dir2, BLOCKSIZE, REPL, SEED);
  
  final Path foo = new Path(dir1, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
  
  // set ns quota of dir2 to 4, so the current remaining is 1 (already has
  // dir2, sub_dir2, subfile_dir2, and s2)
  hdfs.setQuota(dir2, 5, Long.MAX_VALUE - 1);
  
  // rename /test/dir1/foo to /test/dir2/sub_dir2/subfile_dir2. 
  // FSDirectory#verifyQuota4Rename will pass since foo only be counted 
  // as 1 in NS quota. The rename operation will succeed while the real quota 
  // of dir2 will become 7 (dir2, s2 in dir2, sub_dir2, s2 in sub_dir2,
  // subfile_dir2 in deleted list, new subfile, s1 in new subfile).
  hdfs.rename(foo, subfile_dir2, Rename.OVERWRITE);
  
  // check dir2
  INode dir2Node = fsdir.getINode4Write(dir2.toString());
  assertTrue(dir2Node.asDirectory().isSnapshottable());
  Quota.Counts counts = dir2Node.computeQuotaUsage();
  assertEquals(7, counts.get(Quota.NAMESPACE));
  assertEquals(BLOCKSIZE * REPL * 2, counts.get(Quota.DISKSPACE));
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:38,代码来源:TestRenameWithSnapshots.java

示例13: testRenameExceedQuota

import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
/**
 * Test the rename undo when quota of dst tree is exceeded after rename.
 */
@Test
public void testRenameExceedQuota() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  final Path sub_dir2 = new Path(dir2, "subdir");
  final Path subfile_dir2 = new Path(sub_dir2, "subfile");
  hdfs.mkdirs(dir1);
  DFSTestUtil.createFile(hdfs, subfile_dir2, BLOCKSIZE, REPL, SEED);
  
  final Path foo = new Path(dir1, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
  
  // set ns quota of dir2 to 4, so the current remaining is 1 (already has
  // dir2, sub_dir2, subfile_dir2, and s2)
  hdfs.setQuota(dir2, 5, Long.MAX_VALUE - 1);
  
  // rename /test/dir1/foo to /test/dir2/sub_dir2/subfile_dir2. 
  // FSDirectory#verifyQuota4Rename will pass since foo only be counted 
  // as 1 in NS quota. The rename operation will succeed while the real quota 
  // of dir2 will become 7 (dir2, s2 in dir2, sub_dir2, s2 in sub_dir2,
  // subfile_dir2 in deleted list, new subfile, s1 in new subfile).
  hdfs.rename(foo, subfile_dir2, Rename.OVERWRITE);
  
  // check dir2
  INode dir2Node = fsdir.getINode4Write(dir2.toString());
  assertTrue(dir2Node.getClass() == INodeDirectorySnapshottable.class);
  Quota.Counts counts = dir2Node.computeQuotaUsage();
  assertEquals(7, counts.get(Quota.NAMESPACE));
  assertEquals(BLOCKSIZE * REPL * 2, counts.get(Quota.DISKSPACE));
}
 
开发者ID:huiyi-learning,项目名称:hardfs,代码行数:38,代码来源:TestRenameWithSnapshots.java

示例14: removeSnapshot

import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
/**
 * Remove the snapshot with the given name from {@link #snapshotsByNames},
 * and delete all the corresponding DirectoryDiff.
 * 
 * @param snapshotName The name of the snapshot to be removed
 * @param collectedBlocks Used to collect information to update blocksMap
 * @return The removed snapshot. Null if no snapshot with the given name 
 *         exists.
 */
Snapshot removeSnapshot(String snapshotName,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
    throws SnapshotException {
  final int i = searchSnapshot(DFSUtil.string2Bytes(snapshotName));
  if (i < 0) {
    throw new SnapshotException("Cannot delete snapshot " + snapshotName
        + " from path " + this.getFullPathName()
        + ": the snapshot does not exist.");
  } else {
    final Snapshot snapshot = snapshotsByNames.get(i);
    Snapshot prior = Snapshot.findLatestSnapshot(this, snapshot);
    try {
      Quota.Counts counts = cleanSubtree(snapshot, prior, collectedBlocks,
          removedINodes, true);
      INodeDirectory parent = getParent();
      if (parent != null) {
        // there will not be any WithName node corresponding to the deleted 
        // snapshot, thus only update the quota usage in the current tree
        parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
            -counts.get(Quota.DISKSPACE), true);
      }
    } catch(QuotaExceededException e) {
      LOG.error("BUG: removeSnapshot increases namespace usage.", e);
    }
    // remove from snapshotsByNames after successfully cleaning the subtree
    snapshotsByNames.remove(i);
    return snapshot;
  }
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:39,代码来源:INodeDirectorySnapshottable.java

示例15: computeQuotaUsage4CurrentDirectory

import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
public Quota.Counts computeQuotaUsage4CurrentDirectory(Quota.Counts counts) {
  for(DirectoryDiff d : diffs) {
    for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
      deleted.computeQuotaUsage(counts, false, Snapshot.CURRENT_STATE_ID);
    }
  }
  counts.add(Quota.NAMESPACE, diffs.asList().size());
  return counts;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:10,代码来源:DirectoryWithSnapshotFeature.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.Quota.Counts方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。