当前位置: 首页>>代码示例>>Java>>正文


Java ReplicaInfo.getVolume方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.ReplicaInfo.getVolume方法的典型用法代码示例。如果您正苦于以下问题:Java ReplicaInfo.getVolume方法的具体用法?Java ReplicaInfo.getVolume怎么用?Java ReplicaInfo.getVolume使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.datanode.ReplicaInfo的用法示例。


在下文中一共展示了ReplicaInfo.getVolume方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: removeOldReplica

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
private void removeOldReplica(ReplicaInfo replicaInfo,
    ReplicaInfo newReplicaInfo, File blockFile, File metaFile,
    long blockFileUsed, long metaFileUsed, final String bpid) {
  // Before deleting the files from old storage we must notify the
  // NN that the files are on the new storage. Else a blockReport from
  // the transient storage might cause the NN to think the blocks are lost.
  // Replicas must be evicted from client short-circuit caches, because the
  // storage will no longer be same, and thus will require validating
  // checksum.  This also stops a client from holding file descriptors,
  // which would prevent the OS from reclaiming the memory.
  ExtendedBlock extendedBlock =
      new ExtendedBlock(bpid, newReplicaInfo);
  datanode.getShortCircuitRegistry().processBlockInvalidation(
      ExtendedBlockId.fromExtendedBlock(extendedBlock));
  datanode.notifyNamenodeReceivedBlock(
      extendedBlock, null, newReplicaInfo.getStorageUuid());

  // Remove the old replicas
  if (blockFile.delete() || !blockFile.exists()) {
    FsVolumeImpl volume = (FsVolumeImpl) replicaInfo.getVolume();
    volume.onBlockFileDeletion(bpid, blockFileUsed);
    if (metaFile.delete() || !metaFile.exists()) {
      volume.onMetaFileDeletion(bpid, metaFileUsed);
    }
  }

  // If deletion failed then the directory scanner will cleanup the blocks
  // eventually.
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:30,代码来源:FsDatasetImpl.java

示例2: getHdfsBlocksMetadata

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
@Override // FsDatasetSpi
public HdfsBlocksMetadata getHdfsBlocksMetadata(String poolId,
    long[] blockIds) throws IOException {
  // List of VolumeIds, one per volume on the datanode
  List<byte[]> blocksVolumeIds = new ArrayList<byte[]>(volumes.volumes.size());
  // List of indexes into the list of VolumeIds, pointing at the VolumeId of
  // the volume that the block is on
  List<Integer> blocksVolumeIndexes = new ArrayList<Integer>(blockIds.length);
  // Initialize the list of VolumeIds simply by enumerating the volumes
  for (int i = 0; i < volumes.volumes.size(); i++) {
    blocksVolumeIds.add(ByteBuffer.allocate(4).putInt(i).array());
  }
  // Determine the index of the VolumeId of each block's volume, by comparing 
  // the block's volume against the enumerated volumes
  for (int i = 0; i < blockIds.length; i++) {
    long blockId = blockIds[i];
    boolean isValid = false;

    ReplicaInfo info = volumeMap.get(poolId, blockId);
    int volumeIndex = 0;
    if (info != null) {
      FsVolumeSpi blockVolume = info.getVolume();
      for (FsVolumeImpl volume : volumes.volumes) {
        // This comparison of references should be safe
        if (blockVolume == volume) {
          isValid = true;
          break;
        }
        volumeIndex++;
      }
    }
    // Indicates that the block is not present, or not found in a data dir
    if (!isValid) {
      volumeIndex = Integer.MAX_VALUE;
    }
    blocksVolumeIndexes.add(volumeIndex);
  }
  return new HdfsBlocksMetadata(poolId, blockIds,
      blocksVolumeIds, blocksVolumeIndexes);
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:41,代码来源:FsDatasetImpl.java

示例3: getVolume

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
@Override
public synchronized FsVolumeImpl getVolume(final ExtendedBlock b) {
  final ReplicaInfo r =  volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
  return r != null? (FsVolumeImpl)r.getVolume(): null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:FsDatasetImpl.java

示例4: cacheBlock

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * Asynchronously attempts to cache a single block via {@link FsDatasetCache}.
 */
private void cacheBlock(String bpid, long blockId) {
  FsVolumeImpl volume;
  String blockFileName;
  long length, genstamp;
  Executor volumeExecutor;

  synchronized (this) {
    ReplicaInfo info = volumeMap.get(bpid, blockId);
    boolean success = false;
    try {
      if (info == null) {
        LOG.warn("Failed to cache block with id " + blockId + ", pool " +
            bpid + ": ReplicaInfo not found.");
        return;
      }
      if (info.getState() != ReplicaState.FINALIZED) {
        LOG.warn("Failed to cache block with id " + blockId + ", pool " +
            bpid + ": replica is not finalized; it is in state " +
            info.getState());
        return;
      }
      try {
        volume = (FsVolumeImpl)info.getVolume();
        if (volume == null) {
          LOG.warn("Failed to cache block with id " + blockId + ", pool " +
              bpid + ": volume not found.");
          return;
        }
      } catch (ClassCastException e) {
        LOG.warn("Failed to cache block with id " + blockId +
            ": volume was not an instance of FsVolumeImpl.");
        return;
      }
      if (volume.isTransientStorage()) {
        LOG.warn("Caching not supported on block with id " + blockId +
            " since the volume is backed by RAM.");
        return;
      }
      success = true;
    } finally {
      if (!success) {
        cacheManager.numBlocksFailedToCache.incrementAndGet();
      }
    }
    blockFileName = info.getBlockFile().getAbsolutePath();
    length = info.getVisibleLength();
    genstamp = info.getGenerationStamp();
    volumeExecutor = volume.getCacheExecutor();
  }
  cacheManager.cacheBlock(blockId, bpid, 
      blockFileName, length, genstamp, volumeExecutor);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:56,代码来源:FsDatasetImpl.java

示例5: getHdfsBlocksMetadata

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
@Override // FsDatasetSpi
public HdfsBlocksMetadata getHdfsBlocksMetadata(String poolId,
    long[] blockIds) throws IOException {
  List<FsVolumeImpl> curVolumes = getVolumes();
  // List of VolumeIds, one per volume on the datanode
  List<byte[]> blocksVolumeIds = new ArrayList<>(curVolumes.size());
  // List of indexes into the list of VolumeIds, pointing at the VolumeId of
  // the volume that the block is on
  List<Integer> blocksVolumeIndexes = new ArrayList<Integer>(blockIds.length);
  // Initialize the list of VolumeIds simply by enumerating the volumes
  for (int i = 0; i < curVolumes.size(); i++) {
    blocksVolumeIds.add(ByteBuffer.allocate(4).putInt(i).array());
  }
  // Determine the index of the VolumeId of each block's volume, by comparing 
  // the block's volume against the enumerated volumes
  for (int i = 0; i < blockIds.length; i++) {
    long blockId = blockIds[i];
    boolean isValid = false;

    ReplicaInfo info = volumeMap.get(poolId, blockId);
    int volumeIndex = 0;
    if (info != null) {
      FsVolumeSpi blockVolume = info.getVolume();
      for (FsVolumeImpl volume : curVolumes) {
        // This comparison of references should be safe
        if (blockVolume == volume) {
          isValid = true;
          break;
        }
        volumeIndex++;
      }
    }
    // Indicates that the block is not present, or not found in a data dir
    if (!isValid) {
      volumeIndex = Integer.MAX_VALUE;
    }
    blocksVolumeIndexes.add(volumeIndex);
  }
  return new HdfsBlocksMetadata(poolId, blockIds,
      blocksVolumeIds, blocksVolumeIndexes);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:FsDatasetImpl.java

示例6: invalidate

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * We're informed that a block is no longer valid.  We
 * could lazily garbage-collect the block, but why bother?
 * just get rid of it.
 */
@Override // FsDatasetSpi
public void invalidate(String bpid, Block invalidBlks[]) throws IOException {
  boolean error = false;
  for (int i = 0; i < invalidBlks.length; i++) {
    final File f;
    final FsVolumeImpl v;
    synchronized (this) {
      f = getFile(bpid, invalidBlks[i].getBlockId());
      ReplicaInfo info = volumeMap.get(bpid, invalidBlks[i]);
      if (info == null) {
        LOG.warn("Failed to delete replica " + invalidBlks[i]
            + ": ReplicaInfo not found.");
        error = true;
        continue;
      }
      if (info.getGenerationStamp() != invalidBlks[i].getGenerationStamp()) {
        LOG.warn("Failed to delete replica " + invalidBlks[i]
            + ": GenerationStamp not matched, info=" + info);
        error = true;
        continue;
      }
      v = (FsVolumeImpl)info.getVolume();
      if (f == null) {
        LOG.warn("Failed to delete replica " + invalidBlks[i]
            +  ": File not found, volume=" + v);
        error = true;
        continue;
      }
      if (v == null) {
        LOG.warn("Failed to delete replica " + invalidBlks[i]
            +  ". No volume for this replica, file=" + f + ".");
        error = true;
        continue;
      }
      File parent = f.getParentFile();
      if (parent == null) {
        LOG.warn("Failed to delete replica " + invalidBlks[i]
            +  ". Parent not found for file " + f + ".");
        error = true;
        continue;
      }
      ReplicaState replicaState = info.getState();
      if (replicaState == ReplicaState.FINALIZED || 
          (replicaState == ReplicaState.RUR && 
              ((ReplicaUnderRecovery)info).getOriginalReplica().getState() == 
                ReplicaState.FINALIZED)) {
        v.clearPath(bpid, parent);
      }
      volumeMap.remove(bpid, invalidBlks[i]);
    }

    // Delete the block asynchronously to make sure we can do it fast enough
    asyncDiskService.deleteAsync(v, f,
        FsDatasetUtil.getMetaFile(f, invalidBlks[i].getGenerationStamp()),
        new ExtendedBlock(bpid, invalidBlks[i]));
  }
  if (error) {
    throw new IOException("Error in deleting blocks.");
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:66,代码来源:FsDatasetImpl.java

示例7: getVolume

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
@Override
public synchronized FsVolumeImpl getVolume(final ExtendedBlock b) {
  final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
  return r != null ? (FsVolumeImpl) r.getVolume() : null;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:6,代码来源:FsDatasetImpl.java

示例8: invalidate

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * We're informed that a block is no longer valid.  We
 * could lazily garbage-collect the block, but why bother?
 * just get rid of it.
 */
@Override // FsDatasetSpi
public void invalidate(String bpid, Block invalidBlks[]) throws IOException {
  boolean error = false;
  for (Block invalidBlk : invalidBlks) {
    final File f;
    final FsVolumeImpl v;
    synchronized (this) {
      f = getFile(bpid, invalidBlk.getBlockId());
      ReplicaInfo info = volumeMap.get(bpid, invalidBlk);
      if (info == null) {
        LOG.warn("Failed to delete replica " + invalidBlk +
            ": ReplicaInfo not found.");
        error = true;
        continue;
      }
      if (info.getGenerationStamp() != invalidBlk.getGenerationStamp()) {
        LOG.warn("Failed to delete replica " + invalidBlk +
            ": GenerationStamp not matched, info=" + info);
        error = true;
        continue;
      }
      v = (FsVolumeImpl) info.getVolume();
      if (f == null) {
        LOG.warn("Failed to delete replica " + invalidBlk +
            ": File not found, volume=" + v);
        error = true;
        continue;
      }
      if (v == null) {
        LOG.warn("Failed to delete replica " + invalidBlk +
            ". No volume for this replica, file=" + f + ".");
        error = true;
        continue;
      }
      File parent = f.getParentFile();
      if (parent == null) {
        LOG.warn("Failed to delete replica " + invalidBlk +
            ". Parent not found for file " + f + ".");
        error = true;
        continue;
      }
      ReplicaState replicaState = info.getState();
      if (replicaState == ReplicaState.FINALIZED ||
          (replicaState == ReplicaState.RUR &&
              ((ReplicaUnderRecovery) info).getOriginalReplica().getState() ==
                  ReplicaState.FINALIZED)) {
        v.clearPath(bpid, parent);
      }
      volumeMap.remove(bpid, invalidBlk);
    }
  
    // Delete the block asynchronously to make sure we can do it fast enough
    asyncDiskService.deleteAsync(v, f,
        FsDatasetUtil.getMetaFile(f, invalidBlk.getGenerationStamp()),
        new ExtendedBlock(bpid, invalidBlk));
  }
  if (error) {
    throw new IOException("Error in deleting blocks.");
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:66,代码来源:FsDatasetImpl.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.ReplicaInfo.getVolume方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。