当前位置: 首页>>代码示例>>Java>>正文


Java ReplicaInfo.getMetaFile方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.ReplicaInfo.getMetaFile方法的典型用法代码示例。如果您正苦于以下问题:Java ReplicaInfo.getMetaFile方法的具体用法?Java ReplicaInfo.getMetaFile怎么用?Java ReplicaInfo.getMetaFile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.datanode.ReplicaInfo的用法示例。


在下文中一共展示了ReplicaInfo.getMetaFile方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: bumpReplicaGS

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * Bump a replica's generation stamp to a new one.
 * Its on-disk meta file name is renamed to be the new one too.
 * 
 * @param replicaInfo a replica
 * @param newGS new generation stamp
 * @throws IOException if rename fails
 */
private void bumpReplicaGS(ReplicaInfo replicaInfo, 
    long newGS) throws IOException { 
  long oldGS = replicaInfo.getGenerationStamp();
  File oldmeta = replicaInfo.getMetaFile();
  replicaInfo.setGenerationStamp(newGS);
  File newmeta = replicaInfo.getMetaFile();

  // rename meta file to new GS
  if (LOG.isDebugEnabled()) {
    LOG.debug("Renaming " + oldmeta + " to " + newmeta);
  }
  try {
    NativeIO.renameTo(oldmeta, newmeta);
  } catch (IOException e) {
    replicaInfo.setGenerationStamp(oldGS); // restore old GS
    throw new IOException("Block " + replicaInfo + " reopen failed. " +
                          " Unable to move meta file  " + oldmeta +
                          " to " + newmeta, e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:FsDatasetImpl.java

示例2: createUnlinkTmpFile

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
private static void createUnlinkTmpFile(ReplicaInfo replicaInfo, 
    boolean changeBlockFile, 
    boolean isRename) throws IOException {
  File src;
  if (changeBlockFile) {
    src = replicaInfo.getBlockFile();
  } else {
    src = replicaInfo.getMetaFile();
  }
  File dst = DatanodeUtil.getUnlinkTmpFile(src);
  if (isRename) {
    src.renameTo(dst);
  } else {
    FileInputStream in = new FileInputStream(src);
    try {
      FileOutputStream out = new FileOutputStream(dst);
      try {
        IOUtils.copyBytes(in, out, 1);
      } finally {
        out.close();
      }
    } finally {
      in.close();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestDatanodeRestart.java

示例3: getTmpInputStreams

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * Returns handles to the block file and its metadata file
 */
@Override // FsDatasetSpi
public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, 
                        long blkOffset, long ckoff) throws IOException {
  ReplicaInfo info = getReplicaInfo(b);
  FsVolumeReference ref = info.getVolume().obtainReference();
  try {
    File blockFile = info.getBlockFile();
    RandomAccessFile blockInFile = new RandomAccessFile(blockFile, "r");
    if (blkOffset > 0) {
      blockInFile.seek(blkOffset);
    }
    File metaFile = info.getMetaFile();
    RandomAccessFile metaInFile = new RandomAccessFile(metaFile, "r");
    if (ckoff > 0) {
      metaInFile.seek(ckoff);
    }
    return new ReplicaInputStreams(
        blockInFile.getFD(), metaInFile.getFD(), ref);
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:27,代码来源:FsDatasetImpl.java

示例4: getTmpInputStreams

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * Returns handles to the block file and its metadata file
 */
@Override // FsDatasetSpi
public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, 
                        long blkOffset, long ckoff) throws IOException {
  ReplicaInfo info = getReplicaInfo(b);
  File blockFile = info.getBlockFile();
  RandomAccessFile blockInFile = new RandomAccessFile(blockFile, "r");
  if (blkOffset > 0) {
    blockInFile.seek(blkOffset);
  }
  File metaFile = info.getMetaFile();
  RandomAccessFile metaInFile = new RandomAccessFile(metaFile, "r");
  if (ckoff > 0) {
    metaInFile.seek(ckoff);
  }
  return new ReplicaInputStreams(blockInFile.getFD(), metaInFile.getFD());
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:20,代码来源:FsDatasetImpl.java

示例5: getTmpInputStreams

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * Returns handles to the block file and its metadata file
 */
@Override // FsDatasetSpi
public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b,
    long blkOffset, long ckoff) throws IOException {
  ReplicaInfo info = getReplicaInfo(b);
  File blockFile = info.getBlockFile();
  RandomAccessFile blockInFile = new RandomAccessFile(blockFile, "r");
  if (blkOffset > 0) {
    blockInFile.seek(blkOffset);
  }
  File metaFile = info.getMetaFile();
  RandomAccessFile metaInFile = new RandomAccessFile(metaFile, "r");
  if (ckoff > 0) {
    metaInFile.seek(ckoff);
  }
  return new ReplicaInputStreams(blockInFile.getFD(), metaInFile.getFD());
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:20,代码来源:FsDatasetImpl.java

示例6: bumpReplicaGS

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * Bump a replica's generation stamp to a new one.
 * Its on-disk meta file name is renamed to be the new one too.
 *
 * @param replicaInfo
 *     a replica
 * @param newGS
 *     new generation stamp
 * @throws IOException
 *     if rename fails
 */
private void bumpReplicaGS(ReplicaInfo replicaInfo, long newGS)
    throws IOException {
  long oldGS = replicaInfo.getGenerationStamp();
  File oldmeta = replicaInfo.getMetaFile();
  replicaInfo.setGenerationStampNoPersistance(newGS);
  File newmeta = replicaInfo.getMetaFile();

  // rename meta file to new GS
  if (LOG.isDebugEnabled()) {
    LOG.debug("Renaming " + oldmeta + " to " + newmeta);
  }
  try {
    NativeIO.renameTo(oldmeta, newmeta);
  } catch (IOException e) {
    replicaInfo.setGenerationStampNoPersistance(oldGS); // restore old GS
    throw new IOException("Block " + replicaInfo + " reopen failed. " +
        " Unable to move meta file  " + oldmeta +
        " to " + newmeta, e);
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:32,代码来源:FsDatasetImpl.java

示例7: createUnlinkTmpFile

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
private static void createUnlinkTmpFile(ReplicaInfo replicaInfo,
    boolean changeBlockFile, boolean isRename) throws IOException {
  File src;
  if (changeBlockFile) {
    src = replicaInfo.getBlockFile();
  } else {
    src = replicaInfo.getMetaFile();
  }
  File dst = DatanodeUtil.getUnlinkTmpFile(src);
  if (isRename) {
    src.renameTo(dst);
  } else {
    FileInputStream in = new FileInputStream(src);
    try {
      FileOutputStream out = new FileOutputStream(dst);
      try {
        IOUtils.copyBytes(in, out, 1);
      } finally {
        out.close();
      }
    } finally {
      in.close();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:26,代码来源:TestDatanodeRestart.java

示例8: deleteReplica

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
private void deleteReplica(final ReplicaInfo replicaToDelete) {
  // Delete the files on disk. Failure here is okay.
  final File blockFile = replicaToDelete.getBlockFile();
  if (!blockFile.delete()) {
    LOG.warn("Failed to delete block file " + blockFile);
  }
  final File metaFile = replicaToDelete.getMetaFile();
  if (!metaFile.delete()) {
    LOG.warn("Failed to delete meta file " + metaFile);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:BlockPoolSlice.java

示例9: moveBlockAcrossStorage

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * Move block files from one storage to another storage.
 * @return Returns the Old replicaInfo
 * @throws IOException
 */
@Override
public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block,
    StorageType targetStorageType) throws IOException {
  ReplicaInfo replicaInfo = getReplicaInfo(block);
  if (replicaInfo.getState() != ReplicaState.FINALIZED) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_REPLICA + block);
  }
  if (replicaInfo.getNumBytes() != block.getNumBytes()) {
    throw new IOException("Corrupted replica " + replicaInfo
        + " with a length of " + replicaInfo.getNumBytes()
        + " expected length is " + block.getNumBytes());
  }
  if (replicaInfo.getVolume().getStorageType() == targetStorageType) {
    throw new ReplicaAlreadyExistsException("Replica " + replicaInfo
        + " already exists on storage " + targetStorageType);
  }

  if (replicaInfo.isOnTransientStorage()) {
    // Block movement from RAM_DISK will be done by LazyPersist mechanism
    throw new IOException("Replica " + replicaInfo
        + " cannot be moved from storageType : "
        + replicaInfo.getVolume().getStorageType());
  }

  try (FsVolumeReference volumeRef = volumes.getNextVolume(
      targetStorageType, block.getNumBytes())) {
    File oldBlockFile = replicaInfo.getBlockFile();
    File oldMetaFile = replicaInfo.getMetaFile();
    FsVolumeImpl targetVolume = (FsVolumeImpl) volumeRef.getVolume();
    // Copy files to temp dir first
    File[] blockFiles = copyBlockFiles(block.getBlockId(),
        block.getGenerationStamp(), oldMetaFile, oldBlockFile,
        targetVolume.getTmpDir(block.getBlockPoolId()),
        replicaInfo.isOnTransientStorage());

    ReplicaInfo newReplicaInfo = new ReplicaInPipeline(
        replicaInfo.getBlockId(), replicaInfo.getGenerationStamp(),
        targetVolume, blockFiles[0].getParentFile(), 0);
    newReplicaInfo.setNumBytes(blockFiles[1].length());
    // Finalize the copied files
    newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);

    removeOldReplica(replicaInfo, newReplicaInfo, oldBlockFile, oldMetaFile,
        oldBlockFile.length(), oldMetaFile.length(), block.getBlockPoolId());
  }

  // Replace the old block if any to reschedule the scanning.
  return replicaInfo;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:56,代码来源:FsDatasetImpl.java

示例10: moveBlockAcrossStorage

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * Move block files from one storage to another storage.
 * @return Returns the Old replicaInfo
 * @throws IOException
 */
@Override
public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block,
    StorageType targetStorageType) throws IOException {
  ReplicaInfo replicaInfo = getReplicaInfo(block);
  if (replicaInfo.getState() != ReplicaState.FINALIZED) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_REPLICA + block);
  }
  if (replicaInfo.getNumBytes() != block.getNumBytes()) {
    throw new IOException("Corrupted replica " + replicaInfo
        + " with a length of " + replicaInfo.getNumBytes()
        + " expected length is " + block.getNumBytes());
  }
  if (replicaInfo.getVolume().getStorageType() == targetStorageType) {
    throw new ReplicaAlreadyExistsException("Replica " + replicaInfo
        + " already exists on storage " + targetStorageType);
  }

  if (replicaInfo.isOnTransientStorage()) {
    // Block movement from RAM_DISK will be done by LazyPersist mechanism
    throw new IOException("Replica " + replicaInfo
        + " cannot be moved from storageType : "
        + replicaInfo.getVolume().getStorageType());
  }

  try (FsVolumeReference volumeRef = volumes.getNextVolume(
      targetStorageType, block.getNumBytes())) {
    File oldBlockFile = replicaInfo.getBlockFile();
    File oldMetaFile = replicaInfo.getMetaFile();
    FsVolumeImpl targetVolume = (FsVolumeImpl) volumeRef.getVolume();
    // Copy files to temp dir first
    File[] blockFiles = copyBlockFiles(block.getBlockId(),
        block.getGenerationStamp(), oldMetaFile, oldBlockFile,
        targetVolume.getTmpDir(block.getBlockPoolId()),
        replicaInfo.isOnTransientStorage(), smallBufferSize, conf);

    ReplicaInfo newReplicaInfo = new ReplicaInPipeline(
        replicaInfo.getBlockId(), replicaInfo.getGenerationStamp(),
        targetVolume, blockFiles[0].getParentFile(), 0);
    newReplicaInfo.setNumBytes(blockFiles[1].length());
    // Finalize the copied files
    newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);

    removeOldReplica(replicaInfo, newReplicaInfo, oldBlockFile, oldMetaFile,
        oldBlockFile.length(), oldMetaFile.length(), block.getBlockPoolId());
  }

  // Replace the old block if any to reschedule the scanning.
  return replicaInfo;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:56,代码来源:FsDatasetImpl.java

示例11: resolveDuplicateReplicas

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * This method is invoked during DN startup when volumes are scanned to
 * build up the volumeMap.
 *
 * Given two replicas, decide which one to keep. The preference is as
 * follows:
 *   1. Prefer the replica with the higher generation stamp.
 *   2. If generation stamps are equal, prefer the replica with the
 *      larger on-disk length.
 *   3. If on-disk length is the same, prefer the replica on persistent
 *      storage volume.
 *   4. All other factors being equal, keep replica1.
 *
 * The other replica is removed from the volumeMap and is deleted from
 * its storage volume.
 *
 * @param replica1
 * @param replica2
 * @param volumeMap
 * @return the replica that is retained.
 * @throws IOException
 */
ReplicaInfo resolveDuplicateReplicas(
    final ReplicaInfo replica1, final ReplicaInfo replica2,
    final ReplicaMap volumeMap) throws IOException {

  if (!deleteDuplicateReplicas) {
    // Leave both block replicas in place.
    return replica1;
  }

  ReplicaInfo replicaToKeep;
  ReplicaInfo replicaToDelete;

  if (replica1.getGenerationStamp() != replica2.getGenerationStamp()) {
    replicaToKeep = replica1.getGenerationStamp() > replica2.getGenerationStamp()
        ? replica1 : replica2;
  } else if (replica1.getNumBytes() != replica2.getNumBytes()) {
    replicaToKeep = replica1.getNumBytes() > replica2.getNumBytes() ?
        replica1 : replica2;
  } else if (replica1.getVolume().isTransientStorage() &&
             !replica2.getVolume().isTransientStorage()) {
    replicaToKeep = replica2;
  } else {
    replicaToKeep = replica1;
  }

  replicaToDelete = (replicaToKeep == replica1) ? replica2 : replica1;

  if (LOG.isDebugEnabled()) {
    LOG.debug("resolveDuplicateReplicas decide to keep " + replicaToKeep
        + ".  Will try to delete " + replicaToDelete);
  }

  // Update volumeMap.
  volumeMap.add(bpid, replicaToKeep);

  // Delete the files on disk. Failure here is okay.
  final File blockFile = replicaToDelete.getBlockFile();
  if (!blockFile.delete()) {
    LOG.warn("Failed to delete block file " + blockFile);
  }
  final File metaFile = replicaToDelete.getMetaFile();
  if (!metaFile.delete()) {
    LOG.warn("Failed to delete meta file " + metaFile);
  }

  return replicaToKeep;
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:70,代码来源:BlockPoolSlice.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.ReplicaInfo.getMetaFile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。