当前位置: 首页>>代码示例>>Java>>正文


Java ReplicaInfo.getBlockFile方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.ReplicaInfo.getBlockFile方法的典型用法代码示例。如果您正苦于以下问题:Java ReplicaInfo.getBlockFile方法的具体用法?Java ReplicaInfo.getBlockFile怎么用?Java ReplicaInfo.getBlockFile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.datanode.ReplicaInfo的用法示例。


在下文中一共展示了ReplicaInfo.getBlockFile方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkReplicaFiles

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/** Check the files of a replica. */
static void checkReplicaFiles(final ReplicaInfo r) throws IOException {
  //check replica's file
  final File f = r.getBlockFile();
  if (!f.exists()) {
    throw new FileNotFoundException("File " + f + " not found, r=" + r);
  }
  if (r.getBytesOnDisk() != f.length()) {
    throw new IOException("File length mismatched.  The length of "
        + f + " is " + f.length() + " but r=" + r);
  }

  //check replica's meta file
  final File metafile = FsDatasetUtil.getMetaFile(f, r.getGenerationStamp());
  if (!metafile.exists()) {
    throw new IOException("Metafile " + metafile + " does not exist, r=" + r);
  }
  if (metafile.length() == 0) {
    throw new IOException("Metafile " + metafile + " is empty, r=" + r);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:FsDatasetImpl.java

示例2: createUnlinkTmpFile

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
private static void createUnlinkTmpFile(ReplicaInfo replicaInfo, 
    boolean changeBlockFile, 
    boolean isRename) throws IOException {
  File src;
  if (changeBlockFile) {
    src = replicaInfo.getBlockFile();
  } else {
    src = replicaInfo.getMetaFile();
  }
  File dst = DatanodeUtil.getUnlinkTmpFile(src);
  if (isRename) {
    src.renameTo(dst);
  } else {
    FileInputStream in = new FileInputStream(src);
    try {
      FileOutputStream out = new FileOutputStream(dst);
      try {
        IOUtils.copyBytes(in, out, 1);
      } finally {
        out.close();
      }
    } finally {
      in.close();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestDatanodeRestart.java

示例3: getTmpInputStreams

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * Returns handles to the block file and its metadata file
 */
@Override // FsDatasetSpi
public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, 
                        long blkOffset, long ckoff) throws IOException {
  ReplicaInfo info = getReplicaInfo(b);
  FsVolumeReference ref = info.getVolume().obtainReference();
  try {
    File blockFile = info.getBlockFile();
    RandomAccessFile blockInFile = new RandomAccessFile(blockFile, "r");
    if (blkOffset > 0) {
      blockInFile.seek(blkOffset);
    }
    File metaFile = info.getMetaFile();
    RandomAccessFile metaInFile = new RandomAccessFile(metaFile, "r");
    if (ckoff > 0) {
      metaInFile.seek(ckoff);
    }
    return new ReplicaInputStreams(
        blockInFile.getFD(), metaInFile.getFD(), ref);
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:27,代码来源:FsDatasetImpl.java

示例4: getTmpInputStreams

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * Returns handles to the block file and its metadata file
 */
@Override // FsDatasetSpi
public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, 
                        long blkOffset, long ckoff) throws IOException {
  ReplicaInfo info = getReplicaInfo(b);
  File blockFile = info.getBlockFile();
  RandomAccessFile blockInFile = new RandomAccessFile(blockFile, "r");
  if (blkOffset > 0) {
    blockInFile.seek(blkOffset);
  }
  File metaFile = info.getMetaFile();
  RandomAccessFile metaInFile = new RandomAccessFile(metaFile, "r");
  if (ckoff > 0) {
    metaInFile.seek(ckoff);
  }
  return new ReplicaInputStreams(blockInFile.getFD(), metaInFile.getFD());
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:20,代码来源:FsDatasetImpl.java

示例5: getTmpInputStreams

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * Returns handles to the block file and its metadata file
 */
@Override // FsDatasetSpi
public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b,
    long blkOffset, long ckoff) throws IOException {
  ReplicaInfo info = getReplicaInfo(b);
  File blockFile = info.getBlockFile();
  RandomAccessFile blockInFile = new RandomAccessFile(blockFile, "r");
  if (blkOffset > 0) {
    blockInFile.seek(blkOffset);
  }
  File metaFile = info.getMetaFile();
  RandomAccessFile metaInFile = new RandomAccessFile(metaFile, "r");
  if (ckoff > 0) {
    metaInFile.seek(ckoff);
  }
  return new ReplicaInputStreams(blockInFile.getFD(), metaInFile.getFD());
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:20,代码来源:FsDatasetImpl.java

示例6: checkReplicaFiles

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * Check the files of a replica.
 */
static void checkReplicaFiles(final ReplicaInfo r) throws IOException {
  //check replica's file
  final File f = r.getBlockFile();
  if (!f.exists()) {
    throw new FileNotFoundException("File " + f + " not found, r=" + r);
  }
  if (r.getBytesOnDisk() != f.length()) {
    throw new IOException(
        "File length mismatched.  The length of " + f + " is " + f.length() +
            " but r=" + r);
  }

  //check replica's meta file
  final File metafile = FsDatasetUtil.getMetaFile(f, r.getGenerationStamp());
  if (!metafile.exists()) {
    throw new IOException("Metafile " + metafile + " does not exist, r=" + r);
  }
  if (metafile.length() == 0) {
    throw new IOException("Metafile " + metafile + " is empty, r=" + r);
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:25,代码来源:FsDatasetImpl.java

示例7: createUnlinkTmpFile

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
private static void createUnlinkTmpFile(ReplicaInfo replicaInfo,
    boolean changeBlockFile, boolean isRename) throws IOException {
  File src;
  if (changeBlockFile) {
    src = replicaInfo.getBlockFile();
  } else {
    src = replicaInfo.getMetaFile();
  }
  File dst = DatanodeUtil.getUnlinkTmpFile(src);
  if (isRename) {
    src.renameTo(dst);
  } else {
    FileInputStream in = new FileInputStream(src);
    try {
      FileOutputStream out = new FileOutputStream(dst);
      try {
        IOUtils.copyBytes(in, out, 1);
      } finally {
        out.close();
      }
    } finally {
      in.close();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:26,代码来源:TestDatanodeRestart.java

示例8: getFile

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * Turn the block identifier into a filename
 * @param bpid Block pool Id
 * @param blockId a block's id
 * @return on disk data file path; null if the replica does not exist
 */
File getFile(final String bpid, final long blockId, boolean touch) {
  ReplicaInfo info = volumeMap.get(bpid, blockId);
  if (info != null) {
    if (touch && info.getVolume().isTransientStorage()) {
      ramDiskReplicaTracker.touch(bpid, blockId);
      datanode.getMetrics().incrRamDiskBlocksReadHits();
    }
    return info.getBlockFile();
  }
  return null;    
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:FsDatasetImpl.java

示例9: deleteReplica

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
private void deleteReplica(final ReplicaInfo replicaToDelete) {
  // Delete the files on disk. Failure here is okay.
  final File blockFile = replicaToDelete.getBlockFile();
  if (!blockFile.delete()) {
    LOG.warn("Failed to delete block file " + blockFile);
  }
  final File metaFile = replicaToDelete.getMetaFile();
  if (!metaFile.delete()) {
    LOG.warn("Failed to delete meta file " + metaFile);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:BlockPoolSlice.java

示例10: getFile

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * Turn the block identifier into a filename
 * @param bpid Block pool Id
 * @param blockId a block's id
 * @return on disk data file path; null if the replica does not exist
 */
File getFile(final String bpid, final long blockId) {
  ReplicaInfo info = volumeMap.get(bpid, blockId);
  if (info != null) {
    return info.getBlockFile();
  }
  return null;    
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:14,代码来源:FsDatasetImpl.java

示例11: moveBlockAcrossStorage

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * Move block files from one storage to another storage.
 * @return Returns the Old replicaInfo
 * @throws IOException
 */
@Override
public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block,
    StorageType targetStorageType) throws IOException {
  ReplicaInfo replicaInfo = getReplicaInfo(block);
  if (replicaInfo.getState() != ReplicaState.FINALIZED) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_REPLICA + block);
  }
  if (replicaInfo.getNumBytes() != block.getNumBytes()) {
    throw new IOException("Corrupted replica " + replicaInfo
        + " with a length of " + replicaInfo.getNumBytes()
        + " expected length is " + block.getNumBytes());
  }
  if (replicaInfo.getVolume().getStorageType() == targetStorageType) {
    throw new ReplicaAlreadyExistsException("Replica " + replicaInfo
        + " already exists on storage " + targetStorageType);
  }

  if (replicaInfo.isOnTransientStorage()) {
    // Block movement from RAM_DISK will be done by LazyPersist mechanism
    throw new IOException("Replica " + replicaInfo
        + " cannot be moved from storageType : "
        + replicaInfo.getVolume().getStorageType());
  }

  try (FsVolumeReference volumeRef = volumes.getNextVolume(
      targetStorageType, block.getNumBytes())) {
    File oldBlockFile = replicaInfo.getBlockFile();
    File oldMetaFile = replicaInfo.getMetaFile();
    FsVolumeImpl targetVolume = (FsVolumeImpl) volumeRef.getVolume();
    // Copy files to temp dir first
    File[] blockFiles = copyBlockFiles(block.getBlockId(),
        block.getGenerationStamp(), oldMetaFile, oldBlockFile,
        targetVolume.getTmpDir(block.getBlockPoolId()),
        replicaInfo.isOnTransientStorage());

    ReplicaInfo newReplicaInfo = new ReplicaInPipeline(
        replicaInfo.getBlockId(), replicaInfo.getGenerationStamp(),
        targetVolume, blockFiles[0].getParentFile(), 0);
    newReplicaInfo.setNumBytes(blockFiles[1].length());
    // Finalize the copied files
    newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);

    removeOldReplica(replicaInfo, newReplicaInfo, oldBlockFile, oldMetaFile,
        oldBlockFile.length(), oldMetaFile.length(), block.getBlockPoolId());
  }

  // Replace the old block if any to reschedule the scanning.
  return replicaInfo;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:56,代码来源:FsDatasetImpl.java

示例12: moveBlockAcrossStorage

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * Move block files from one storage to another storage.
 * @return Returns the Old replicaInfo
 * @throws IOException
 */
@Override
public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block,
    StorageType targetStorageType) throws IOException {
  ReplicaInfo replicaInfo = getReplicaInfo(block);
  if (replicaInfo.getState() != ReplicaState.FINALIZED) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_REPLICA + block);
  }
  if (replicaInfo.getNumBytes() != block.getNumBytes()) {
    throw new IOException("Corrupted replica " + replicaInfo
        + " with a length of " + replicaInfo.getNumBytes()
        + " expected length is " + block.getNumBytes());
  }
  if (replicaInfo.getVolume().getStorageType() == targetStorageType) {
    throw new ReplicaAlreadyExistsException("Replica " + replicaInfo
        + " already exists on storage " + targetStorageType);
  }

  if (replicaInfo.isOnTransientStorage()) {
    // Block movement from RAM_DISK will be done by LazyPersist mechanism
    throw new IOException("Replica " + replicaInfo
        + " cannot be moved from storageType : "
        + replicaInfo.getVolume().getStorageType());
  }

  try (FsVolumeReference volumeRef = volumes.getNextVolume(
      targetStorageType, block.getNumBytes())) {
    File oldBlockFile = replicaInfo.getBlockFile();
    File oldMetaFile = replicaInfo.getMetaFile();
    FsVolumeImpl targetVolume = (FsVolumeImpl) volumeRef.getVolume();
    // Copy files to temp dir first
    File[] blockFiles = copyBlockFiles(block.getBlockId(),
        block.getGenerationStamp(), oldMetaFile, oldBlockFile,
        targetVolume.getTmpDir(block.getBlockPoolId()),
        replicaInfo.isOnTransientStorage(), smallBufferSize, conf);

    ReplicaInfo newReplicaInfo = new ReplicaInPipeline(
        replicaInfo.getBlockId(), replicaInfo.getGenerationStamp(),
        targetVolume, blockFiles[0].getParentFile(), 0);
    newReplicaInfo.setNumBytes(blockFiles[1].length());
    // Finalize the copied files
    newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);

    removeOldReplica(replicaInfo, newReplicaInfo, oldBlockFile, oldMetaFile,
        oldBlockFile.length(), oldMetaFile.length(), block.getBlockPoolId());
  }

  // Replace the old block if any to reschedule the scanning.
  return replicaInfo;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:56,代码来源:FsDatasetImpl.java

示例13: resolveDuplicateReplicas

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * This method is invoked during DN startup when volumes are scanned to
 * build up the volumeMap.
 *
 * Given two replicas, decide which one to keep. The preference is as
 * follows:
 *   1. Prefer the replica with the higher generation stamp.
 *   2. If generation stamps are equal, prefer the replica with the
 *      larger on-disk length.
 *   3. If on-disk length is the same, prefer the replica on persistent
 *      storage volume.
 *   4. All other factors being equal, keep replica1.
 *
 * The other replica is removed from the volumeMap and is deleted from
 * its storage volume.
 *
 * @param replica1
 * @param replica2
 * @param volumeMap
 * @return the replica that is retained.
 * @throws IOException
 */
ReplicaInfo resolveDuplicateReplicas(
    final ReplicaInfo replica1, final ReplicaInfo replica2,
    final ReplicaMap volumeMap) throws IOException {

  if (!deleteDuplicateReplicas) {
    // Leave both block replicas in place.
    return replica1;
  }

  ReplicaInfo replicaToKeep;
  ReplicaInfo replicaToDelete;

  if (replica1.getGenerationStamp() != replica2.getGenerationStamp()) {
    replicaToKeep = replica1.getGenerationStamp() > replica2.getGenerationStamp()
        ? replica1 : replica2;
  } else if (replica1.getNumBytes() != replica2.getNumBytes()) {
    replicaToKeep = replica1.getNumBytes() > replica2.getNumBytes() ?
        replica1 : replica2;
  } else if (replica1.getVolume().isTransientStorage() &&
             !replica2.getVolume().isTransientStorage()) {
    replicaToKeep = replica2;
  } else {
    replicaToKeep = replica1;
  }

  replicaToDelete = (replicaToKeep == replica1) ? replica2 : replica1;

  if (LOG.isDebugEnabled()) {
    LOG.debug("resolveDuplicateReplicas decide to keep " + replicaToKeep
        + ".  Will try to delete " + replicaToDelete);
  }

  // Update volumeMap.
  volumeMap.add(bpid, replicaToKeep);

  // Delete the files on disk. Failure here is okay.
  final File blockFile = replicaToDelete.getBlockFile();
  if (!blockFile.delete()) {
    LOG.warn("Failed to delete block file " + blockFile);
  }
  final File metaFile = replicaToDelete.getMetaFile();
  if (!metaFile.delete()) {
    LOG.warn("Failed to delete meta file " + metaFile);
  }

  return replicaToKeep;
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:70,代码来源:BlockPoolSlice.java

示例14: getFile

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * Turn the block identifier into a filename
 *
 * @param bpid
 *     Block pool Id
 * @param blockId
 *     a block's id
 * @return on disk data file path; null if the replica does not exist
 */
File getFile(final String bpid, final long blockId) {
  ReplicaInfo info = volumeMap.get(bpid, blockId);
  if (info != null) {
    return info.getBlockFile();
  }
  return null;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:17,代码来源:FsDatasetImpl.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.ReplicaInfo.getBlockFile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。