当前位置: 首页>>代码示例>>Java>>正文


Java FileWithSnapshotFeature类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature的典型用法代码示例。如果您正苦于以下问题:Java FileWithSnapshotFeature类的具体用法?Java FileWithSnapshotFeature怎么用?Java FileWithSnapshotFeature使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


FileWithSnapshotFeature类属于org.apache.hadoop.hdfs.server.namenode.snapshot包,在下文中一共展示了FileWithSnapshotFeature类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: destroyAndCollectBlocks

import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; //导入依赖的package包/类
@Override
public void destroyAndCollectBlocks(BlockStoragePolicySuite bsps,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  if (blocks != null && collectedBlocks != null) {
    for (BlockInfoContiguous blk : blocks) {
      collectedBlocks.addDeleteBlock(blk);
      blk.setBlockCollection(null);
    }
  }
  setBlocks(BlockInfoContiguous.EMPTY_ARRAY);
  if (getAclFeature() != null) {
    AclStorage.removeAclFeature(getAclFeature());
  }
  clear();
  removedINodes.add(this);
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    sf.getDiffs().destroyAndCollectSnapshotBlocks(collectedBlocks);
    sf.clearDiffs();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:INodeFile.java

示例2: excludeSnapshotBlocks

import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; //导入依赖的package包/类
/** Exclude blocks collected for deletion that belong to a snapshot. */
void excludeSnapshotBlocks(int snapshotId,
                           BlocksMapUpdateInfo collectedBlocks) {
  if(collectedBlocks == null || collectedBlocks.getToDeleteList().isEmpty())
    return;
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if(sf == null)
    return;
  BlockInfoContiguous[] snapshotBlocks =
      getDiffs().findEarlierSnapshotBlocks(snapshotId);
  if(snapshotBlocks == null)
    return;
  List<Block> toDelete = collectedBlocks.getToDeleteList();
  for(Block blk : snapshotBlocks) {
    if(toDelete.contains(blk))
      collectedBlocks.removeDeleteBlock(blk);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:INodeFile.java

示例3: getPreferredBlockReplication

import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; //导入依赖的package包/类
public short getPreferredBlockReplication() {
  short max = getFileReplication(CURRENT_STATE_ID);
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (sf != null) {
    short maxInSnapshot = sf.getMaxBlockRepInDiffs(null);
    if (sf.isCurrentFileDeleted()) {
      return maxInSnapshot;
    }
    max = maxInSnapshot > max ? maxInSnapshot : max;
  }
  if(!isStriped()){
    return max;
  }
  // TODO support more policies based on policyId
  ErasureCodingPolicy ecPolicy =
      ErasureCodingPolicyManager.getSystemDefaultPolicy();
  return (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:19,代码来源:INodeFile.java

示例4: destroyAndCollectBlocks

import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; //导入依赖的package包/类
@Override
public void destroyAndCollectBlocks(ReclaimContext reclaimContext) {
  // TODO pass in the storage policy
  reclaimContext.quotaDelta().add(computeQuotaUsage(reclaimContext.bsps,
      false));
  clearFile(reclaimContext);
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    sf.getDiffs().destroyAndCollectSnapshotBlocks(
        reclaimContext.collectedBlocks);
    sf.clearDiffs();
  }
  if (isUnderConstruction() && reclaimContext.removedUCFiles != null) {
    reclaimContext.removedUCFiles.add(getId());
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:17,代码来源:INodeFile.java

示例5: excludeSnapshotBlocks

import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; //导入依赖的package包/类
/** Exclude blocks collected for deletion that belong to a snapshot. */
void excludeSnapshotBlocks(int snapshotId,
                           BlocksMapUpdateInfo collectedBlocks) {
  if(collectedBlocks == null || collectedBlocks.getToDeleteList().isEmpty())
    return;
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if(sf == null)
    return;
  BlockInfo[] snapshotBlocks =
      getDiffs().findEarlierSnapshotBlocks(snapshotId);
  if(snapshotBlocks == null)
    return;
  List<BlockInfo> toDelete = collectedBlocks.getToDeleteList();
  for(BlockInfo blk : snapshotBlocks) {
    if(toDelete.contains(blk))
      collectedBlocks.removeDeleteBlock(blk);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:19,代码来源:INodeFile.java

示例6: destroyAndCollectBlocks

import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; //导入依赖的package包/类
@Override
public void destroyAndCollectBlocks(BlockStoragePolicySuite bsps,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  if (blocks != null && collectedBlocks != null) {
    for (BlockInfoContiguous blk : blocks) {
      collectedBlocks.addDeleteBlock(blk);
      blk.setBlockCollection(null);
    }
  }
  setBlocks(null);
  if (getAclFeature() != null) {
    AclStorage.removeAclFeature(getAclFeature());
  }
  clear();
  removedINodes.add(this);
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    sf.getDiffs().destroyAndCollectSnapshotBlocks(collectedBlocks);
    sf.clearDiffs();
  }
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:22,代码来源:INodeFile.java

示例7: destroyAndCollectBlocks

import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; //导入依赖的package包/类
@Override
public void destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (blocks != null && collectedBlocks != null) {
    for (BlockInfo blk : blocks) {
      collectedBlocks.addDeleteBlock(blk);
      blk.setBlockCollection(null);
    }
  }
  setBlocks(null);
  clear();
  removedINodes.add(this);
  
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    sf.clearDiffs();
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:19,代码来源:INodeFile.java

示例8: computeQuotaUsage

import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; //导入依赖的package包/类
@Override
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
    boolean useCache, int lastSnapshotId) {
  long nsDelta = 1;
  final long dsDelta;
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    FileDiffList fileDiffList = sf.getDiffs();
    int last = fileDiffList.getLastSnapshotId();

    if (lastSnapshotId == Snapshot.CURRENT_STATE_ID
        || last == Snapshot.CURRENT_STATE_ID) {
      dsDelta = diskspaceConsumed();
    } else if (last < lastSnapshotId) {
      dsDelta = computeFileSize(true, false) * getFileReplication();
    } else {      
      int sid = fileDiffList.getSnapshotById(lastSnapshotId);
      dsDelta = diskspaceConsumed(sid);
    }
  } else {
    dsDelta = diskspaceConsumed();
  }
  counts.add(Quota.NAMESPACE, nsDelta);
  counts.add(Quota.DISKSPACE, dsDelta);
  return counts;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:27,代码来源:INodeFile.java

示例9: computeContentSummary4Snapshot

import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; //导入依赖的package包/类
private void computeContentSummary4Snapshot(final Content.Counts counts) {
  // file length and diskspace only counted for the latest state of the file
  // i.e. either the current state or the last snapshot
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    final FileDiffList diffs = sf.getDiffs();
    final int n = diffs.asList().size();
    counts.add(Content.FILE, n);
    if (n > 0 && sf.isCurrentFileDeleted()) {
      counts.add(Content.LENGTH, diffs.getLast().getFileSize());
    }

    if (sf.isCurrentFileDeleted()) {
      final long lastFileSize = diffs.getLast().getFileSize();
      counts.add(Content.DISKSPACE, lastFileSize * getBlockReplication());
    }
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:19,代码来源:INodeFile.java

示例10: cleanSubtree

import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; //导入依赖的package包/类
@Override
public Quota.Counts cleanSubtree(final int snapshot, int priorSnapshotId,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes, final boolean countDiffChange)
    throws QuotaExceededException {
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    return sf.cleanFile(this, snapshot, priorSnapshotId, collectedBlocks,
        removedINodes, countDiffChange);
  }
  Quota.Counts counts = Quota.Counts.newInstance();
  if (snapshot == CURRENT_STATE_ID && priorSnapshotId == NO_SNAPSHOT_ID) {
    // this only happens when deleting the current file and the file is not
    // in any snapshot
    computeQuotaUsage(counts, false);
    destroyAndCollectBlocks(collectedBlocks, removedINodes);
  } else if (snapshot == CURRENT_STATE_ID && priorSnapshotId != NO_SNAPSHOT_ID) {
    // when deleting the current file and the file is in snapshot, we should
    // clean the 0-sized block if the file is UC
    FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
    if (uc != null) {
      uc.cleanZeroSizeBlock(this, collectedBlocks);
    }
  }
  return counts;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:27,代码来源:INodeFile.java

示例11: addSnapshotFeature

import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; //导入依赖的package包/类
public FileWithSnapshotFeature addSnapshotFeature(FileDiffList diffs) {
  Preconditions.checkState(!isWithSnapshot(), 
      "File is already with snapshot");
  FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
  this.addFeature(sf);
  return sf;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:INodeFile.java

示例12: getSnapshotINode

import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; //导入依赖的package包/类
@Override
public INodeFileAttributes getSnapshotINode(final int snapshotId) {
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (sf != null) {
    return sf.getDiffs().getSnapshotINode(snapshotId, this);
  } else {
    return this;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:INodeFile.java

示例13: recordModification

import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; //导入依赖的package包/类
public void recordModification(final int latestSnapshotId, boolean withBlocks) {
  if (isInLatestSnapshot(latestSnapshotId)
      && !shouldRecordInSrcSnapshot(latestSnapshotId)) {
    // the file is in snapshot, create a snapshot feature if it does not have
    FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
    if (sf == null) {
      sf = addSnapshotFeature(null);
    }
    // record self in the diff list if necessary
    sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null, withBlocks);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:INodeFile.java

示例14: getDiffs

import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; //导入依赖的package包/类
public FileDiffList getDiffs() {
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (sf != null) {
    return sf.getDiffs();
  }
  return null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:INodeFile.java

示例15: getBlockReplication

import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; //导入依赖的package包/类
@Override // BlockCollection
public short getBlockReplication() {
  short max = getFileReplication(CURRENT_STATE_ID);
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (sf != null) {
    short maxInSnapshot = sf.getMaxBlockRepInDiffs();
    if (sf.isCurrentFileDeleted()) {
      return maxInSnapshot;
    }
    max = maxInSnapshot > max ? maxInSnapshot : max;
  }
  return max;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:INodeFile.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。