当前位置: 首页>>代码示例>>Java>>正文


Java INodeFile类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.INodeFile的典型用法代码示例。如果您正苦于以下问题:Java INodeFile类的具体用法?Java INodeFile怎么用?Java INodeFile使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


INodeFile类属于org.apache.hadoop.hdfs.server.namenode包,在下文中一共展示了INodeFile类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: cleanFile

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
public QuotaCounts cleanFile(final BlockStoragePolicySuite bsps,
    final INodeFile file, final int snapshotId,
    int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    collectBlocksAndClear(bsps, file, collectedBlocks, removedINodes);
    return new QuotaCounts.Builder().build();
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    return diffs.deleteSnapshotDiff(bsps, snapshotId, priorSnapshotId, file,
        collectedBlocks, removedINodes);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:FileWithSnapshotFeature.java

示例2: collectBlocksAndClear

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
public void collectBlocksAndClear(final BlockStoragePolicySuite bsps, final INodeFile file,
    final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.destroyAndCollectBlocks(bsps, info, removedINodes);
    return;
  }
  // find max file size.
  final long max;
  FileDiff diff = getDiffs().getLast();
  if (isCurrentFileDeleted()) {
    max = diff == null? 0: diff.getFileSize();
  } else { 
    max = file.computeFileSize();
  }

  // Collect blocks that should be deleted
  FileDiff last = diffs.getLast();
  BlockInfoContiguous[] snapshotBlocks = last == null ? null : last.getBlocks();
  if(snapshotBlocks == null)
    file.collectBlocksBeyondMax(max, info);
  else
    file.collectBlocksBeyondSnapshot(snapshotBlocks, info);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:FileWithSnapshotFeature.java

示例3: checkSnapshotFileReplication

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
/**
 * Check the replication for both the current file and all its prior snapshots
 * 
 * @param currentFile
 *          the Path of the current file
 * @param snapshotRepMap
 *          A map maintaining all the snapshots of the current file, as well
 *          as their expected replication number stored in their corresponding
 *          INodes
 * @param expectedBlockRep
 *          The expected replication number
 * @throws Exception
 */
private void checkSnapshotFileReplication(Path currentFile,
    Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
  // First check the getBlockReplication for the INode of the currentFile
  final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
  assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
  // Then check replication for every snapshot
  for (Path ss : snapshotRepMap.keySet()) {
    final INodesInPath iip = fsdir.getINodesInPath(ss.toString(), true);
    final INodeFile ssInode = iip.getLastINode().asFile();
    // The replication number derived from the
    // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
    assertEquals(expectedBlockRep, ssInode.getBlockReplication());
    // Also check the number derived from INodeFile#getFileReplication
    assertEquals(snapshotRepMap.get(ss).shortValue(),
        ssInode.getFileReplication(iip.getPathSnapshotId()));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestSnapshotReplication.java

示例4: cleanFile

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
public void cleanFile(INode.ReclaimContext reclaimContext,
    final INodeFile file, final int snapshotId, int priorSnapshotId,
    byte storagePolicyId) {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    final BlockStoragePolicy policy = reclaimContext.storagePolicySuite()
        .getPolicy(storagePolicyId);
    QuotaCounts old = file.storagespaceConsumed(policy);
    collectBlocksAndClear(reclaimContext, file);
    QuotaCounts current = file.storagespaceConsumed(policy);
    reclaimContext.quotaDelta().add(old.subtract(current));
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    diffs.deleteSnapshotDiff(reclaimContext, snapshotId, priorSnapshotId,
        file);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:22,代码来源:FileWithSnapshotFeature.java

示例5: collectBlocksAndClear

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
public void collectBlocksAndClear(
    INode.ReclaimContext reclaimContext, final INodeFile file) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.clearFile(reclaimContext);
    return;
  }
  // find max file size.
  final long max;
  FileDiff diff = getDiffs().getLast();
  if (isCurrentFileDeleted()) {
    max = diff == null? 0: diff.getFileSize();
  } else {
    max = file.computeFileSize();
  }

  // Collect blocks that should be deleted
  FileDiff last = diffs.getLast();
  BlockInfo[] snapshotBlocks = last == null ? null : last.getBlocks();
  if(snapshotBlocks == null)
    file.collectBlocksBeyondMax(max, reclaimContext.collectedBlocks());
  else
    file.collectBlocksBeyondSnapshot(snapshotBlocks,
                                     reclaimContext.collectedBlocks());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:30,代码来源:FileWithSnapshotFeature.java

示例6: acquire

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
@Override
protected void acquire(TransactionLocks locks) throws IOException {
  BlockLock blockLock = (BlockLock) locks.getLock(Type.Block);
  for (INodeFile iNodeFile : blockLock.getFiles()) {
    Block lastBlock = iNodeFile.getLastBlock();
    if (iNodeFile.getLastBlock() != null) {
      List<Replica> replicas = (List<Replica>) EntityManager
          .findList(Replica.Finder.ByBlockIdAndINodeId,
              lastBlock.getBlockId(),
              iNodeFile.getId());
      if (replicas != null) {
        Collections.sort(replicas, new Comparator<Replica>() {
          @Override
          public int compare(Replica o1, Replica o2) {
            return new Integer(o1.getBucketId()).compareTo(o2.getBucketId());
          }
        });

        for (Replica replica : replicas) {
          EntityManager.find(HashBucket.Finder.ByStorageIdAndBucketId, replica
              .getStorageId(), replica.getBucketId());
        }
      }
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:27,代码来源:LastBlockReplicasHashBucketLock.java

示例7: cleanFile

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
public Quota.Counts cleanFile(final INodeFile file, final int snapshotId,
    int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    collectBlocksAndClear(file, collectedBlocks, removedINodes);
    return Quota.Counts.newInstance();
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    return diffs.deleteSnapshotDiff(snapshotId, priorSnapshotId, file,
        collectedBlocks, removedINodes);
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:18,代码来源:FileWithSnapshotFeature.java

示例8: updateQuotaAndCollectBlocks

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
public Quota.Counts updateQuotaAndCollectBlocks(INodeFile file,
    FileDiff removed, BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  long oldDiskspace = file.diskspaceConsumed();
  if (removed.snapshotINode != null) {
    short replication = removed.snapshotINode.getFileReplication();
    short currentRepl = file.getBlockReplication();
    if (currentRepl == 0) {
      oldDiskspace = file.computeFileSize(true, true) * replication;
    } else if (replication > currentRepl) {  
      oldDiskspace = oldDiskspace / file.getBlockReplication() * replication;
    }
  }
  
  collectBlocksAndClear(file, collectedBlocks, removedINodes);
  
  long dsDelta = oldDiskspace - file.diskspaceConsumed();
  return Quota.Counts.newInstance(0, dsDelta);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:20,代码来源:FileWithSnapshotFeature.java

示例9: collectBlocksAndClear

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
private void collectBlocksAndClear(final INodeFile file,
    final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.destroyAndCollectBlocks(info, removedINodes);
    return;
  }
  // find max file size.
  final long max;
  if (isCurrentFileDeleted()) {
    final FileDiff last = getDiffs().getLast();
    max = last == null? 0: last.getFileSize();
  } else { 
    max = file.computeFileSize();
  }

  collectBlocksBeyondMax(file, max, info);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:23,代码来源:FileWithSnapshotFeature.java

示例10: serializeFileDiffList

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
private void serializeFileDiffList(INodeFile file, OutputStream out)
    throws IOException {
  FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature();
  if (sf != null) {
    List<FileDiff> diffList = sf.getDiffs().asList();
    SnapshotDiffSection.DiffEntry entry = SnapshotDiffSection.DiffEntry
        .newBuilder().setInodeId(file.getId()).setType(Type.FILEDIFF)
        .setNumOfDiff(diffList.size()).build();
    entry.writeDelimitedTo(out);
    for (int i = diffList.size() - 1; i >= 0; i--) {
      FileDiff diff = diffList.get(i);
      SnapshotDiffSection.FileDiff.Builder fb = SnapshotDiffSection.FileDiff
          .newBuilder().setSnapshotId(diff.getSnapshotId())
          .setFileSize(diff.getFileSize());
      INodeFileAttributes copy = diff.snapshotINode;
      if (copy != null) {
        fb.setName(ByteString.copyFrom(copy.getLocalNameBytes()))
            .setSnapshotCopy(buildINodeFile(copy, parent.getSaverContext()));
      }
      fb.build().writeDelimitedTo(out);
    }
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:24,代码来源:FSImageFormatPBSnapshot.java

示例11: checkSnapshotFileReplication

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
/**
 * Check the replication for both the current file and all its prior snapshots
 * 
 * @param currentFile
 *          the Path of the current file
 * @param snapshotRepMap
 *          A map maintaining all the snapshots of the current file, as well
 *          as their expected replication number stored in their corresponding
 *          INodes
 * @param expectedBlockRep
 *          The expected replication number that should be returned by
 *          {@link INodeFileWithSnapshot#getBlockReplication()} of all the INodes
 * @throws Exception
 */
private void checkSnapshotFileReplication(Path currentFile,
    Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
  // First check the getBlockReplication for the INode of the currentFile
  final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
  assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
  // Then check replication for every snapshot
  for (Path ss : snapshotRepMap.keySet()) {
    final INodesInPath iip = fsdir.getLastINodeInPath(ss.toString());
    final INodeFile ssInode = (INodeFile)iip.getLastINode();
    // The replication number derived from the
    // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
    assertEquals(expectedBlockRep, ssInode.getBlockReplication());
    // Also check the number derived from INodeFile#getFileReplication
    assertEquals(snapshotRepMap.get(ss).shortValue(),
        ssInode.getFileReplication(iip.getPathSnapshotId()));
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:32,代码来源:TestSnapshotReplication.java

示例12: checkRaidProgress

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
/**
 * Count the number of live replicas of each parity block in the raided file
 * If any stripe has not enough parity block replicas, add the stripe to 
 *  raidEncodingTasks to schedule encoding.
 * If forceAdd is true, we always add the stripe to raidEncodingTasks 
 * without checking
 * @param sourceINode
 * @param raidTasks
 * @param fs
 * @param forceAdd
 * @return true if all parity blocks of the file have enough replicas
 * @throws IOException
 */
public boolean checkRaidProgress(INodeFile sourceINode, 
    LightWeightLinkedSet<RaidBlockInfo> raidEncodingTasks, FSNamesystem fs,
    boolean forceAdd) throws IOException {
  boolean result = true;
  BlockInfo[] blocks = sourceINode.getBlocks();
  for (int i = 0; i < blocks.length;
      i += numStripeBlocks) {
    boolean hasParity = true;
    if (!forceAdd) {
      for (int j = 0; j < numParityBlocks; j++) {
        if (fs.countLiveNodes(blocks[i + j]) < this.parityReplication) {
          hasParity = false;
          break;
        }
      }
    }
    if (!hasParity || forceAdd) {
      raidEncodingTasks.add(new RaidBlockInfo(blocks[i], parityReplication, i));
      result = false; 
    }
  }
  return result;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:37,代码来源:RaidCodec.java

示例13: updateQuotaAndCollectBlocks

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
private static Quota.Counts updateQuotaAndCollectBlocks(
    INodeFile currentINode, FileDiff removed,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  FileWithSnapshot sFile = (FileWithSnapshot) currentINode;
  long oldDiskspace = currentINode.diskspaceConsumed();
  if (removed.snapshotINode != null) {
    short replication = removed.snapshotINode.getFileReplication();
    short currentRepl = currentINode.getBlockReplication();
    if (currentRepl == 0) {
      oldDiskspace = currentINode.computeFileSize(true, true) * replication;
    } else if (replication > currentRepl) {  
      oldDiskspace = oldDiskspace / currentINode.getBlockReplication()
          * replication;
    }
  }
  
  Util.collectBlocksAndClear(sFile, collectedBlocks, removedINodes);
  
  long dsDelta = oldDiskspace - currentINode.diskspaceConsumed();
  return Quota.Counts.newInstance(0, dsDelta);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:22,代码来源:FileWithSnapshot.java

示例14: checkSnapshotFileReplication

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
/**
 * Check the replication for both the current file and all its prior snapshots
 * 
 * @param currentFile
 *          the Path of the current file
 * @param snapshotRepMap
 *          A map maintaining all the snapshots of the current file, as well
 *          as their expected replication number stored in their corresponding
 *          INodes
 * @param expectedBlockRep
 *          The expected replication number that should be returned by
 *          {@link INodeFileWithSnapshot#getBlockReplication()} of all the INodes
 * @throws Exception
 */
private void checkSnapshotFileReplication(Path currentFile,
    Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
  // First check the getBlockReplication for the INode of the currentFile
  final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
  assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
  // Then check replication for every snapshot
  for (Path ss : snapshotRepMap.keySet()) {
    final INodesInPath iip = fsdir.getLastINodeInPath(ss.toString());
    final INodeFile ssInode = (INodeFile)iip.getLastINode();
    // The replication number derived from the
    // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
    assertEquals(expectedBlockRep, ssInode.getBlockReplication());
    // Also check the number derived from INodeFile#getFileReplication
    assertEquals(snapshotRepMap.get(ss).shortValue(),
        ssInode.getFileReplication(iip.getPathSnapshot()));
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:32,代码来源:TestSnapshotReplication.java

示例15: cleanFile

import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
public Quota.Counts cleanFile(final INodeFile file, final int snapshotId,
    int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes, final boolean countDiffChange)
    throws QuotaExceededException {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    collectBlocksAndClear(file, collectedBlocks, removedINodes);
    return Quota.Counts.newInstance();
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    return diffs.deleteSnapshotDiff(snapshotId, priorSnapshotId, file,
        collectedBlocks, removedINodes, countDiffChange);
  }
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:19,代码来源:FileWithSnapshotFeature.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.INodeFile类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。