当前位置: 首页>>代码示例>>Java>>正文


Java INodeFile.computeFileSize方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.INodeFile.computeFileSize方法的典型用法代码示例。如果您正苦于以下问题:Java INodeFile.computeFileSize方法的具体用法?Java INodeFile.computeFileSize怎么用?Java INodeFile.computeFileSize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.INodeFile的用法示例。


在下文中一共展示了INodeFile.computeFileSize方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: collectBlocksAndClear

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
public void collectBlocksAndClear(final BlockStoragePolicySuite bsps, final INodeFile file,
    final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.destroyAndCollectBlocks(bsps, info, removedINodes);
    return;
  }
  // find max file size.
  final long max;
  FileDiff diff = getDiffs().getLast();
  if (isCurrentFileDeleted()) {
    max = diff == null? 0: diff.getFileSize();
  } else { 
    max = file.computeFileSize();
  }

  // Collect blocks that should be deleted
  FileDiff last = diffs.getLast();
  BlockInfoContiguous[] snapshotBlocks = last == null ? null : last.getBlocks();
  if(snapshotBlocks == null)
    file.collectBlocksBeyondMax(max, info);
  else
    file.collectBlocksBeyondSnapshot(snapshotBlocks, info);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:FileWithSnapshotFeature.java

示例2: collectBlocksAndClear

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
public void collectBlocksAndClear(
    INode.ReclaimContext reclaimContext, final INodeFile file) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.clearFile(reclaimContext);
    return;
  }
  // find max file size.
  final long max;
  FileDiff diff = getDiffs().getLast();
  if (isCurrentFileDeleted()) {
    max = diff == null? 0: diff.getFileSize();
  } else {
    max = file.computeFileSize();
  }

  // Collect blocks that should be deleted
  FileDiff last = diffs.getLast();
  BlockInfo[] snapshotBlocks = last == null ? null : last.getBlocks();
  if(snapshotBlocks == null)
    file.collectBlocksBeyondMax(max, reclaimContext.collectedBlocks());
  else
    file.collectBlocksBeyondSnapshot(snapshotBlocks,
                                     reclaimContext.collectedBlocks());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:30,代码来源:FileWithSnapshotFeature.java

示例3: updateQuotaAndCollectBlocks

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
public Quota.Counts updateQuotaAndCollectBlocks(INodeFile file,
    FileDiff removed, BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  long oldDiskspace = file.diskspaceConsumed();
  if (removed.snapshotINode != null) {
    short replication = removed.snapshotINode.getFileReplication();
    short currentRepl = file.getBlockReplication();
    if (currentRepl == 0) {
      oldDiskspace = file.computeFileSize(true, true) * replication;
    } else if (replication > currentRepl) {  
      oldDiskspace = oldDiskspace / file.getBlockReplication() * replication;
    }
  }
  
  collectBlocksAndClear(file, collectedBlocks, removedINodes);
  
  long dsDelta = oldDiskspace - file.diskspaceConsumed();
  return Quota.Counts.newInstance(0, dsDelta);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:20,代码来源:FileWithSnapshotFeature.java

示例4: collectBlocksAndClear

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
private void collectBlocksAndClear(final INodeFile file,
    final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.destroyAndCollectBlocks(info, removedINodes);
    return;
  }
  // find max file size.
  final long max;
  if (isCurrentFileDeleted()) {
    final FileDiff last = getDiffs().getLast();
    max = last == null? 0: last.getFileSize();
  } else { 
    max = file.computeFileSize();
  }

  collectBlocksBeyondMax(file, max, info);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:23,代码来源:FileWithSnapshotFeature.java

示例5: updateQuotaAndCollectBlocks

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
private static Quota.Counts updateQuotaAndCollectBlocks(
    INodeFile currentINode, FileDiff removed,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  FileWithSnapshot sFile = (FileWithSnapshot) currentINode;
  long oldDiskspace = currentINode.diskspaceConsumed();
  if (removed.snapshotINode != null) {
    short replication = removed.snapshotINode.getFileReplication();
    short currentRepl = currentINode.getBlockReplication();
    if (currentRepl == 0) {
      oldDiskspace = currentINode.computeFileSize(true, true) * replication;
    } else if (replication > currentRepl) {  
      oldDiskspace = oldDiskspace / currentINode.getBlockReplication()
          * replication;
    }
  }
  
  Util.collectBlocksAndClear(sFile, collectedBlocks, removedINodes);
  
  long dsDelta = oldDiskspace - currentINode.diskspaceConsumed();
  return Quota.Counts.newInstance(0, dsDelta);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:22,代码来源:FileWithSnapshot.java

示例6: changedBetweenSnapshots

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
boolean changedBetweenSnapshots(INodeFile file, Snapshot from, Snapshot to) {
  int[] diffIndexPair = diffs.changedBetweenSnapshots(from, to);
  if (diffIndexPair == null) {
    return false;
  }
  int earlierDiffIndex = diffIndexPair[0];
  int laterDiffIndex = diffIndexPair[1];

  final List<FileDiff> diffList = diffs.asList();
  final long earlierLength = diffList.get(earlierDiffIndex).getFileSize();
  final long laterLength = laterDiffIndex == diffList.size() ? file
      .computeFileSize(true, false) : diffList.get(laterDiffIndex)
      .getFileSize();
  if (earlierLength != laterLength) { // file length has been changed
    return true;
  }

  INodeFileAttributes earlierAttr = null; // check the metadata
  for (int i = earlierDiffIndex; i < laterDiffIndex; i++) {
    FileDiff diff = diffList.get(i);
    if (diff.snapshotINode != null) {
      earlierAttr = diff.snapshotINode;
      break;
    }
  }
  if (earlierAttr == null) { // no meta-change at all, return false
    return false;
  }
  INodeFileAttributes laterAttr = diffs.getSnapshotINode(
      Math.max(Snapshot.getSnapshotId(from), Snapshot.getSnapshotId(to)),
      file);
  return !earlierAttr.metadataEquals(laterAttr);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:FileWithSnapshotFeature.java

示例7: FileDiff

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
FileDiff(int snapshotId, INodeFile file) {
  super(snapshotId, null, null);
  fileSize = file.computeFileSize();
  blocks = null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:FileDiff.java

示例8: FileDiff

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
FileDiff(int snapshotId, INodeFile file) {
  super(snapshotId, null, null);
  fileSize = file.computeFileSize();
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:5,代码来源:FileDiff.java

示例9: FileDiff

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
private FileDiff(Snapshot snapshot, INodeFile file) {
  super(snapshot, null, null);
  fileSize = file.computeFileSize();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:5,代码来源:FileWithSnapshot.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.INodeFile.computeFileSize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。