当前位置: 首页>>代码示例>>Java>>正文


Java BlockStoragePolicy.chooseStorageTypes方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.BlockStoragePolicy.chooseStorageTypes方法的典型用法代码示例。如果您正苦于以下问题:Java BlockStoragePolicy.chooseStorageTypes方法的具体用法?Java BlockStoragePolicy.chooseStorageTypes怎么用?Java BlockStoragePolicy.chooseStorageTypes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.protocol.BlockStoragePolicy的用法示例。


在下文中一共展示了BlockStoragePolicy.chooseStorageTypes方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: verifyQuotaForTruncate

import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入方法依赖的package包/类
private void verifyQuotaForTruncate(INodesInPath iip, INodeFile file,
    long newLength, QuotaCounts delta) throws QuotaExceededException {
  if (!getFSNamesystem().isImageLoaded() || shouldSkipQuotaChecks()) {
    // Do not check quota if edit log is still being processed
    return;
  }
  final long diff = file.computeQuotaDeltaForTruncate(newLength);
  final short repl = file.getBlockReplication();
  delta.addStorageSpace(diff * repl);
  final BlockStoragePolicy policy = getBlockStoragePolicySuite()
      .getPolicy(file.getStoragePolicyID());
  List<StorageType> types = policy.chooseStorageTypes(repl);
  for (StorageType t : types) {
    if (t.supportTypeQuota()) {
      delta.addTypeSpace(t, diff);
    }
  }
  if (diff > 0) {
    readLock();
    try {
      verifyQuota(iip, iip.length() - 1, delta, null);
    } finally {
      readUnlock();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:FSDirectory.java

示例2: computeQuotaDeltaForUCBlock

import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入方法依赖的package包/类
/** Compute quota change for converting a complete block to a UC block */
private QuotaCounts computeQuotaDeltaForUCBlock(INodeFile file) {
  final QuotaCounts delta = new QuotaCounts.Builder().build();
  final BlockInfoContiguous lastBlock = file.getLastBlock();
  if (lastBlock != null) {
    final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
    final short repl = file.getBlockReplication();
    delta.addStorageSpace(diff * repl);
    final BlockStoragePolicy policy = dir.getBlockStoragePolicySuite()
        .getPolicy(file.getStoragePolicyID());
    List<StorageType> types = policy.chooseStorageTypes(repl);
    for (StorageType t : types) {
      if (t.supportTypeQuota()) {
        delta.addTypeSpace(t, diff);
      }
    }
  }
  return delta;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:FSNamesystem.java

示例3: verifyFile

import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入方法依赖的package包/类
private void verifyFile(final Path parent, final HdfsFileStatus status,
    final Byte expectedPolicyId) throws Exception {
  HdfsLocatedFileStatus fileStatus = (HdfsLocatedFileStatus) status;
  byte policyId = fileStatus.getStoragePolicy();
  BlockStoragePolicy policy = policies.getPolicy(policyId);
  if (expectedPolicyId != null) {
    Assert.assertEquals((byte)expectedPolicyId, policy.getId());
  }
  final List<StorageType> types = policy.chooseStorageTypes(
      status.getReplication());
  for(LocatedBlock lb : fileStatus.getBlockLocations().getLocatedBlocks()) {
    final Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types,
        lb.getStorageTypes());
    Assert.assertTrue(fileStatus.getFullName(parent.toString())
        + " with policy " + policy + " has non-empty overlap: " + diff
        + ", the corresponding block is " + lb.getBlock().getLocalBlock(),
        diff.removeOverlap(true));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestStorageMover.java

示例4: computeQuotaDeltaForUCBlock

import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入方法依赖的package包/类
/** Compute quota change for converting a complete block to a UC block. */
private static QuotaCounts computeQuotaDeltaForUCBlock(FSNamesystem fsn,
    INodeFile file) {
  final QuotaCounts delta = new QuotaCounts.Builder().build();
  final BlockInfo lastBlock = file.getLastBlock();
  if (lastBlock != null) {
    final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
    final short repl = lastBlock.getReplication();
    delta.addStorageSpace(diff * repl);
    final BlockStoragePolicy policy = fsn.getFSDirectory()
        .getBlockStoragePolicySuite().getPolicy(file.getStoragePolicyID());
    List<StorageType> types = policy.chooseStorageTypes(repl);
    for (StorageType t : types) {
      if (t.supportTypeQuota()) {
        delta.addTypeSpace(t, diff);
      }
    }
  }
  return delta;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:21,代码来源:FSDirAppendOp.java

示例5: computeContentSummary

import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入方法依赖的package包/类
@Override
public final ContentSummaryComputationContext computeContentSummary(
    int snapshotId, final ContentSummaryComputationContext summary) {
  final ContentCounts counts = summary.getCounts();
  counts.addContent(Content.FILE, 1);
  final long fileLen = computeFileSize(snapshotId);
  counts.addContent(Content.LENGTH, fileLen);
  counts.addContent(Content.DISKSPACE, storagespaceConsumed(null)
      .getStorageSpace());

  if (getStoragePolicyID() != BLOCK_STORAGE_POLICY_ID_UNSPECIFIED){
    BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite().
        getPolicy(getStoragePolicyID());
    List<StorageType> storageTypes = bsp.chooseStorageTypes(getFileReplication());
    for (StorageType t : storageTypes) {
      if (!t.supportTypeQuota()) {
        continue;
      }
      counts.addTypeSpace(t, fileLen);
    }
  }
  return summary;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:INodeFile.java

示例6: computeContentSummary

import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入方法依赖的package包/类
@Override
public final ContentSummaryComputationContext computeContentSummary(
    final ContentSummaryComputationContext summary) {
  final ContentCounts counts = summary.getCounts();
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  long fileLen = 0;
  if (sf == null) {
    fileLen = computeFileSize();
    counts.addContent(Content.FILE, 1);
  } else {
    final FileDiffList diffs = sf.getDiffs();
    final int n = diffs.asList().size();
    counts.addContent(Content.FILE, n);
    if (n > 0 && sf.isCurrentFileDeleted()) {
      fileLen =  diffs.getLast().getFileSize();
    } else {
      fileLen = computeFileSize();
    }
  }
  counts.addContent(Content.LENGTH, fileLen);
  counts.addContent(Content.DISKSPACE, storagespaceConsumed());

  if (getStoragePolicyID() != ID_UNSPECIFIED){
    BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite().
        getPolicy(getStoragePolicyID());
    List<StorageType> storageTypes = bsp.chooseStorageTypes(getFileReplication());
    for (StorageType t : storageTypes) {
      if (!t.supportTypeQuota()) {
        continue;
      }
      counts.addTypeSpace(t, fileLen);
    }
  }
  return summary;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:INodeFile.java

示例7: storagespaceConsumedContiguous

import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入方法依赖的package包/类
public final QuotaCounts storagespaceConsumedContiguous(
    BlockStoragePolicy bsp) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  final Iterable<BlockInfo> blocks;
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf == null) {
    blocks = Arrays.asList(getBlocks());
  } else {
    // Collect all distinct blocks
    Set<BlockInfo> allBlocks = new HashSet<>(Arrays.asList(getBlocks()));
    List<FileDiff> diffs = sf.getDiffs().asList();
    for(FileDiff diff : diffs) {
      BlockInfo[] diffBlocks = diff.getBlocks();
      if (diffBlocks != null) {
        allBlocks.addAll(Arrays.asList(diffBlocks));
      }
    }
    blocks = allBlocks;
  }

  final short replication = getPreferredBlockReplication();
  for (BlockInfo b : blocks) {
    long blockSize = b.isComplete() ? b.getNumBytes() :
        getPreferredBlockSize();
    counts.addStorageSpace(blockSize * replication);
    if (bsp != null) {
      List<StorageType> types = bsp.chooseStorageTypes(replication);
      for (StorageType t : types) {
        if (t.supportTypeQuota()) {
          counts.addTypeSpace(t, blockSize);
        }
      }
    }
  }
  return counts;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:37,代码来源:INodeFile.java

示例8: computeQuotaUsage

import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入方法依赖的package包/类
@Override
public final QuotaCounts computeQuotaUsage(
    BlockStoragePolicySuite bsps, byte blockStoragePolicyId,
    QuotaCounts counts, boolean useCache,
    int lastSnapshotId) {
  long nsDelta = 1;
  final long ssDeltaNoReplication;
  short replication;
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    FileDiffList fileDiffList = sf.getDiffs();
    int last = fileDiffList.getLastSnapshotId();

    if (lastSnapshotId == Snapshot.CURRENT_STATE_ID
        || last == Snapshot.CURRENT_STATE_ID) {
      ssDeltaNoReplication = storagespaceConsumedNoReplication();
      replication = getBlockReplication();
    } else if (last < lastSnapshotId) {
      ssDeltaNoReplication = computeFileSize(true, false);
      replication = getFileReplication();
    } else {
      int sid = fileDiffList.getSnapshotById(lastSnapshotId);
      ssDeltaNoReplication = storagespaceConsumedNoReplication(sid);
      replication = getReplication(sid);
    }
  } else {
    ssDeltaNoReplication = storagespaceConsumedNoReplication();
    replication = getBlockReplication();
  }
  counts.addNameSpace(nsDelta);
  counts.addStorageSpace(ssDeltaNoReplication * replication);

  if (blockStoragePolicyId != ID_UNSPECIFIED){
    BlockStoragePolicy bsp = bsps.getPolicy(blockStoragePolicyId);
    List<StorageType> storageTypes = bsp.chooseStorageTypes(replication);
    for (StorageType t : storageTypes) {
      if (!t.supportTypeQuota()) {
        continue;
      }
      counts.addTypeSpace(t, ssDeltaNoReplication);
    }
  }
  return counts;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:45,代码来源:INodeFile.java

示例9: chooseTarget

import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入方法依赖的package包/类
@Override
DatanodeStorageInfo[] chooseTarget(String src,
    int numOfReplicas,
    Node writer,
    Set<Node> excludedNodes,
    long blocksize,
    List<DatanodeDescriptor> favoredNodes,
    BlockStoragePolicy storagePolicy) {
  try {
    if (favoredNodes == null || favoredNodes.size() == 0) {
      // Favored nodes not specified, fall back to regular block placement.
      return chooseTarget(src, numOfReplicas, writer,
          new ArrayList<DatanodeStorageInfo>(numOfReplicas), false, 
          excludedNodes, blocksize, storagePolicy);
    }

    Set<Node> favoriteAndExcludedNodes = excludedNodes == null ?
        new HashSet<Node>() : new HashSet<Node>(excludedNodes);
    final List<StorageType> requiredStorageTypes = storagePolicy
        .chooseStorageTypes((short)numOfReplicas);
    final EnumMap<StorageType, Integer> storageTypes =
        getRequiredStorageTypes(requiredStorageTypes);

    // Choose favored nodes
    List<DatanodeStorageInfo> results = new ArrayList<DatanodeStorageInfo>();
    boolean avoidStaleNodes = stats != null
        && stats.isAvoidingStaleDataNodesForWrite();

    int maxNodesAndReplicas[] = getMaxNodesPerRack(0, numOfReplicas);
    numOfReplicas = maxNodesAndReplicas[0];
    int maxNodesPerRack = maxNodesAndReplicas[1];

    for (int i = 0; i < favoredNodes.size() && results.size() < numOfReplicas; i++) {
      DatanodeDescriptor favoredNode = favoredNodes.get(i);
      // Choose a single node which is local to favoredNode.
      // 'results' is updated within chooseLocalNode
      final DatanodeStorageInfo target = chooseLocalStorage(favoredNode,
          favoriteAndExcludedNodes, blocksize, maxNodesPerRack,
          results, avoidStaleNodes, storageTypes, false);
      if (target == null) {
        LOG.warn("Could not find a target for file " + src
            + " with favored node " + favoredNode); 
        continue;
      }
      favoriteAndExcludedNodes.add(target.getDatanodeDescriptor());
    }

    if (results.size() < numOfReplicas) {
      // Not enough favored nodes, choose other nodes.
      numOfReplicas -= results.size();
      DatanodeStorageInfo[] remainingTargets = 
          chooseTarget(src, numOfReplicas, writer, results,
              false, favoriteAndExcludedNodes, blocksize, storagePolicy);
      for (int i = 0; i < remainingTargets.length; i++) {
        results.add(remainingTargets[i]);
      }
    }
    return getPipeline(writer,
        results.toArray(new DatanodeStorageInfo[results.size()]));
  } catch (NotEnoughReplicasException nr) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Failed to choose with favored nodes (=" + favoredNodes
          + "), disregard favored nodes hint and retry.", nr);
    }
    // Fall back to regular block placement disregarding favored nodes hint
    return chooseTarget(src, numOfReplicas, writer, 
        new ArrayList<DatanodeStorageInfo>(numOfReplicas), false, 
        excludedNodes, blocksize, storagePolicy);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:71,代码来源:BlockPlacementPolicyDefault.java

示例10: computeQuotaUsage

import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入方法依赖的package包/类
@Override
public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps,
    byte blockStoragePolicyId, boolean useCache, int lastSnapshotId) {
  final QuotaCounts counts = new QuotaCounts.Builder().nameSpace(1).build();

  final BlockStoragePolicy bsp = (blockStoragePolicyId ==
      BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) ? null :
      bsps.getPolicy(blockStoragePolicyId);
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf == null) {
    counts.add(storagespaceConsumed(bsp));
    return counts;
  }

  FileDiffList fileDiffList = sf.getDiffs();
  int last = fileDiffList.getLastSnapshotId();

  if (lastSnapshotId == Snapshot.CURRENT_STATE_ID
      || last == Snapshot.CURRENT_STATE_ID) {
    counts.add(storagespaceConsumed(bsp));
    return counts;
  }

  final long ssDeltaNoReplication;
  short replication;
  if (isStriped()) {
    return computeQuotaUsageWithStriped(bsp, counts);
  }

  if (last < lastSnapshotId) {
    ssDeltaNoReplication = computeFileSize(true, false);
    replication = getFileReplication();
  } else {
    int sid = fileDiffList.getSnapshotById(lastSnapshotId);
    ssDeltaNoReplication = computeFileSize(sid);
    replication = getFileReplication(sid);
  }

  counts.addStorageSpace(ssDeltaNoReplication * replication);
  if (bsp != null) {
    List<StorageType> storageTypes = bsp.chooseStorageTypes(replication);
    for (StorageType t : storageTypes) {
      if (!t.supportTypeQuota()) {
        continue;
      }
      counts.addTypeSpace(t, ssDeltaNoReplication);
    }
  }
  return counts;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:51,代码来源:INodeFile.java

示例11: computeQuotaDeltaForTruncate

import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入方法依赖的package包/类
/**
 * compute the quota usage change for a truncate op
 * @param newLength the length for truncation
 * TODO: properly handle striped blocks (HDFS-7622)
 **/
void computeQuotaDeltaForTruncate(
    long newLength, BlockStoragePolicy bsps,
    QuotaCounts delta) {
  final BlockInfo[] blocks = getBlocks();
  if (blocks == null || blocks.length == 0) {
    return;
  }

  long size = 0;
  for (BlockInfo b : blocks) {
    size += b.getNumBytes();
  }

  BlockInfo[] sblocks = null;
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    FileDiff diff = sf.getDiffs().getLast();
    sblocks = diff != null ? diff.getBlocks() : null;
  }

  for (int i = blocks.length - 1; i >= 0 && size > newLength;
       size -= blocks[i].getNumBytes(), --i) {
    BlockInfo bi = blocks[i];
    long truncatedBytes;
    if (size - newLength < bi.getNumBytes()) {
      // Record a full block as the last block will be copied during
      // recovery
      truncatedBytes = bi.getNumBytes() - getPreferredBlockSize();
    } else {
      truncatedBytes = bi.getNumBytes();
    }

    // The block exist in snapshot, adding back the truncated bytes in the
    // existing files
    if (sblocks != null && i < sblocks.length && bi.equals(sblocks[i])) {
      truncatedBytes -= bi.getNumBytes();
    }

    delta.addStorageSpace(-truncatedBytes * bi.getReplication());
    if (bsps != null) {
      List<StorageType> types = bsps.chooseStorageTypes(bi.getReplication());
      for (StorageType t : types) {
        if (t.supportTypeQuota()) {
          delta.addTypeSpace(t, -truncatedBytes);
        }
      }
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:55,代码来源:INodeFile.java

示例12: chooseTarget

import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入方法依赖的package包/类
@Override
DatanodeStorageInfo[] chooseTarget(String src,
    int numOfReplicas,
    Node writer,
    Set<Node> excludedNodes,
    long blocksize,
    List<DatanodeDescriptor> favoredNodes,
    BlockStoragePolicy storagePolicy) {
  try {
    if (favoredNodes == null || favoredNodes.size() == 0) {
      // Favored nodes not specified, fall back to regular block placement.
      return chooseTarget(src, numOfReplicas, writer,
          new ArrayList<DatanodeStorageInfo>(numOfReplicas), false, 
          excludedNodes, blocksize, storagePolicy);
    }

    Set<Node> favoriteAndExcludedNodes = excludedNodes == null ?
        new HashSet<Node>() : new HashSet<>(excludedNodes);
    final List<StorageType> requiredStorageTypes = storagePolicy
        .chooseStorageTypes((short)numOfReplicas);
    final EnumMap<StorageType, Integer> storageTypes =
        getRequiredStorageTypes(requiredStorageTypes);

    // Choose favored nodes
    List<DatanodeStorageInfo> results = new ArrayList<>();
    boolean avoidStaleNodes = stats != null
        && stats.isAvoidingStaleDataNodesForWrite();

    int maxNodesAndReplicas[] = getMaxNodesPerRack(0, numOfReplicas);
    numOfReplicas = maxNodesAndReplicas[0];
    int maxNodesPerRack = maxNodesAndReplicas[1];

    chooseFavouredNodes(src, numOfReplicas, favoredNodes,
        favoriteAndExcludedNodes, blocksize, maxNodesPerRack, results,
        avoidStaleNodes, storageTypes);

    if (results.size() < numOfReplicas) {
      // Not enough favored nodes, choose other nodes, based on block
      // placement policy (HDFS-9393).
      numOfReplicas -= results.size();
      for (DatanodeStorageInfo storage : results) {
        // add localMachine and related nodes to favoriteAndExcludedNodes
        addToExcludedNodes(storage.getDatanodeDescriptor(),
            favoriteAndExcludedNodes);
      }
      DatanodeStorageInfo[] remainingTargets =
          chooseTarget(src, numOfReplicas, writer,
              new ArrayList<DatanodeStorageInfo>(numOfReplicas), false,
              favoriteAndExcludedNodes, blocksize, storagePolicy);
      for (int i = 0; i < remainingTargets.length; i++) {
        results.add(remainingTargets[i]);
      }
    }
    return getPipeline(writer,
        results.toArray(new DatanodeStorageInfo[results.size()]));
  } catch (NotEnoughReplicasException nr) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Failed to choose with favored nodes (=" + favoredNodes
          + "), disregard favored nodes hint and retry.", nr);
    }
    // Fall back to regular block placement disregarding favored nodes hint
    return chooseTarget(src, numOfReplicas, writer, 
        new ArrayList<DatanodeStorageInfo>(numOfReplicas), false, 
        excludedNodes, blocksize, storagePolicy);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:67,代码来源:BlockPlacementPolicyDefault.java


注:本文中的org.apache.hadoop.hdfs.protocol.BlockStoragePolicy.chooseStorageTypes方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。