当前位置: 首页>>代码示例>>Java>>正文


Java ReplicaUnderRecovery类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery的典型用法代码示例。如果您正苦于以下问题:Java ReplicaUnderRecovery类的具体用法?Java ReplicaUnderRecovery怎么用?Java ReplicaUnderRecovery使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


ReplicaUnderRecovery类属于org.apache.hadoop.hdfs.server.datanode包,在下文中一共展示了ReplicaUnderRecovery类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: fetchReplicaInfo

import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery; //导入依赖的package包/类
/**
 * This should be primarily used for testing.
 * @return clone of replica store in datanode memory
 */
ReplicaInfo fetchReplicaInfo(String bpid, long blockId) {
  ReplicaInfo r = volumeMap.get(bpid, blockId);
  if(r == null)
    return null;
  switch(r.getState()) {
  case FINALIZED:
    return new FinalizedReplica((FinalizedReplica)r);
  case RBW:
    return new ReplicaBeingWritten((ReplicaBeingWritten)r);
  case RWR:
    return new ReplicaWaitingToBeRecovered((ReplicaWaitingToBeRecovered)r);
  case RUR:
    return new ReplicaUnderRecovery((ReplicaUnderRecovery)r);
  case TEMPORARY:
    return new ReplicaInPipeline((ReplicaInPipeline)r);
  }
  return null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:FsDatasetImpl.java

示例2: fetchReplicaInfo

import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery; //导入依赖的package包/类
/**
 * Returns a clone of a replica stored in data-node memory.
 * Should be primarily used for testing.
 * @param blockId
 * @return
 */
ReplicaInfo fetchReplicaInfo(String bpid, long blockId) {
  ReplicaInfo r = volumeMap.get(bpid, blockId);
  if(r == null)
    return null;
  switch(r.getState()) {
  case FINALIZED:
    return new FinalizedReplica((FinalizedReplica)r);
  case RBW:
    return new ReplicaBeingWritten((ReplicaBeingWritten)r);
  case RWR:
    return new ReplicaWaitingToBeRecovered((ReplicaWaitingToBeRecovered)r);
  case RUR:
    return new ReplicaUnderRecovery((ReplicaUnderRecovery)r);
  case TEMPORARY:
    return new ReplicaInPipeline((ReplicaInPipeline)r);
  }
  return null;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:25,代码来源:FsDatasetImpl.java

示例3: fetchReplicaInfo

import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery; //导入依赖的package包/类
/**
 * Returns a clone of a replica stored in data-node memory.
 * Should be primarily used for testing.
 *
 * @param blockId
 * @return
 */
ReplicaInfo fetchReplicaInfo(String bpid, long blockId) {
  ReplicaInfo r = volumeMap.get(bpid, blockId);
  if (r == null) {
    return null;
  }
  switch (r.getState()) {
    case FINALIZED:
      return new FinalizedReplica((FinalizedReplica) r);
    case RBW:
      return new ReplicaBeingWritten((ReplicaBeingWritten) r);
    case RWR:
      return new ReplicaWaitingToBeRecovered((ReplicaWaitingToBeRecovered) r);
    case RUR:
      return new ReplicaUnderRecovery((ReplicaUnderRecovery) r);
    case TEMPORARY:
      return new ReplicaInPipeline((ReplicaInPipeline) r);
  }
  return null;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:27,代码来源:FsDatasetImpl.java

示例4: copyReplicaWithNewBlockIdAndGS

import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery; //导入依赖的package包/类
private File[] copyReplicaWithNewBlockIdAndGS(
    ReplicaUnderRecovery replicaInfo, String bpid, long newBlkId, long newGS)
    throws IOException {
  String blockFileName = Block.BLOCK_FILE_PREFIX + newBlkId;
  FsVolumeReference v = volumes.getNextVolume(
      replicaInfo.getVolume().getStorageType(), replicaInfo.getNumBytes());
  final File tmpDir = ((FsVolumeImpl) v.getVolume())
      .getBlockPoolSlice(bpid).getTmpDir();
  final File destDir = DatanodeUtil.idToBlockDir(tmpDir, newBlkId);
  final File dstBlockFile = new File(destDir, blockFileName);
  final File dstMetaFile = FsDatasetUtil.getMetaFile(dstBlockFile, newGS);
  return copyBlockFiles(replicaInfo.getMetaFile(), replicaInfo.getBlockFile(),
      dstMetaFile, dstBlockFile, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:FsDatasetImpl.java

示例5: copyReplicaWithNewBlockIdAndGS

import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery; //导入依赖的package包/类
private File[] copyReplicaWithNewBlockIdAndGS(
    ReplicaUnderRecovery replicaInfo, String bpid, long newBlkId, long newGS)
    throws IOException {
  String blockFileName = Block.BLOCK_FILE_PREFIX + newBlkId;
  FsVolumeImpl v = (FsVolumeImpl) replicaInfo.getVolume();
  final File tmpDir = v.getBlockPoolSlice(bpid).getTmpDir();
  final File destDir = DatanodeUtil.idToBlockDir(tmpDir, newBlkId);
  final File dstBlockFile = new File(destDir, blockFileName);
  final File dstMetaFile = FsDatasetUtil.getMetaFile(dstBlockFile, newGS);
  return copyBlockFiles(replicaInfo.getMetaFile(),
      replicaInfo.getBlockFile(),
      dstMetaFile, dstBlockFile, true, smallBufferSize, conf);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:14,代码来源:FsDatasetImpl.java

示例6: createReplicaUnderRecovery

import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery; //导入依赖的package包/类
@Override
public Replica createReplicaUnderRecovery(
    ExtendedBlock block, long recoveryId) throws IOException {
  try (FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
    FsVolumeImpl volume = (FsVolumeImpl) volumes.get(0);
    ReplicaUnderRecovery rur = new ReplicaUnderRecovery(new FinalizedReplica(
        block.getLocalBlock(), volume, volume.getCurrentDir().getParentFile()),
        recoveryId
    );
    dataset.volumeMap.add(block.getBlockPoolId(), rur);
    return rur;
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:14,代码来源:FsDatasetImplTestUtils.java

示例7: updateReplicaUnderRecovery

import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery; //导入依赖的package包/类
private FinalizedReplica updateReplicaUnderRecovery(
                                        String bpid,
                                        ReplicaUnderRecovery rur,
                                        long recoveryId,
                                        long newlength) throws IOException {
  //check recovery id
  if (rur.getRecoveryID() != recoveryId) {
    throw new IOException("rur.getRecoveryID() != recoveryId = " + recoveryId
        + ", rur=" + rur);
  }

  // bump rur's GS to be recovery id
  bumpReplicaGS(rur, recoveryId);

  //update length
  final File replicafile = rur.getBlockFile();
  if (rur.getNumBytes() < newlength) {
    throw new IOException("rur.getNumBytes() < newlength = " + newlength
        + ", rur=" + rur);
  }
  if (rur.getNumBytes() > newlength) {
    rur.unlinkBlock(1);
    truncateBlock(replicafile, rur.getMetaFile(), rur.getNumBytes(), newlength);
    // update RUR with the new length
    rur.setNumBytes(newlength);
 }

  // finalize the block
  return finalizeReplica(bpid, rur);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:31,代码来源:FsDatasetImpl.java

示例8: updateReplicaUnderRecovery

import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery; //导入依赖的package包/类
private FinalizedReplica updateReplicaUnderRecovery(String bpid,
    ReplicaUnderRecovery rur, long recoveryId, long newlength)
    throws IOException {
  //check recovery id
  if (rur.getRecoveryID() != recoveryId) {
    throw new IOException(
        "rur.getRecoveryID() != recoveryId = " + recoveryId + ", rur=" + rur);
  }

  // bump rur's GS to be recovery id
  bumpReplicaGS(rur, recoveryId);

  //update length
  final File replicafile = rur.getBlockFile();
  if (rur.getNumBytes() < newlength) {
    throw new IOException(
        "rur.getNumBytes() < newlength = " + newlength + ", rur=" + rur);
  }
  if (rur.getNumBytes() > newlength) {
    rur.unlinkBlock(1);
    truncateBlock(replicafile, rur.getMetaFile(), rur.getNumBytes(),
        newlength);
    // update RUR with the new length
    rur.setNumBytesNoPersistance(newlength);
  }

  // finalize the block
  return finalizeReplica(bpid, rur);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:30,代码来源:FsDatasetImpl.java

示例9: updateReplicaUnderRecovery

import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery; //导入依赖的package包/类
private FinalizedReplica updateReplicaUnderRecovery(
                                        String bpid,
                                        ReplicaUnderRecovery rur,
                                        long recoveryId,
                                        long newBlockId,
                                        long newlength) throws IOException {
  //check recovery id
  if (rur.getRecoveryID() != recoveryId) {
    throw new IOException("rur.getRecoveryID() != recoveryId = " + recoveryId
        + ", rur=" + rur);
  }

  boolean copyOnTruncate = newBlockId > 0L && rur.getBlockId() != newBlockId;
  File blockFile;
  File metaFile;
  // bump rur's GS to be recovery id
  if(!copyOnTruncate) {
    bumpReplicaGS(rur, recoveryId);
    blockFile = rur.getBlockFile();
    metaFile = rur.getMetaFile();
  } else {
    File[] copiedReplicaFiles =
        copyReplicaWithNewBlockIdAndGS(rur, bpid, newBlockId, recoveryId);
    blockFile = copiedReplicaFiles[1];
    metaFile = copiedReplicaFiles[0];
  }

  //update length
  if (rur.getNumBytes() < newlength) {
    throw new IOException("rur.getNumBytes() < newlength = " + newlength
        + ", rur=" + rur);
  }
  if (rur.getNumBytes() > newlength) {
    rur.unlinkBlock(1);
    truncateBlock(blockFile, metaFile, rur.getNumBytes(), newlength);
    if(!copyOnTruncate) {
      // update RUR with the new length
      rur.setNumBytes(newlength);
    } else {
      // Copying block to a new block with new blockId.
      // Not truncating original block.
      ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(
          newBlockId, recoveryId, rur.getVolume(), blockFile.getParentFile(),
          newlength);
      newReplicaInfo.setNumBytes(newlength);
      volumeMap.add(bpid, newReplicaInfo);
      finalizeReplica(bpid, newReplicaInfo);
    }
 }

  // finalize the block
  return finalizeReplica(bpid, rur);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:54,代码来源:FsDatasetImpl.java

示例10: updateReplicaUnderRecovery

import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery; //导入依赖的package包/类
private FinalizedReplica updateReplicaUnderRecovery(
                                        String bpid,
                                        ReplicaUnderRecovery rur,
                                        long recoveryId,
                                        long newBlockId,
                                        long newlength) throws IOException {
  //check recovery id
  if (rur.getRecoveryID() != recoveryId) {
    throw new IOException("rur.getRecoveryID() != recoveryId = " + recoveryId
        + ", rur=" + rur);
  }

  boolean copyOnTruncate = newBlockId > 0L && rur.getBlockId() != newBlockId;
  File blockFile;
  File metaFile;
  // bump rur's GS to be recovery id
  if(!copyOnTruncate) {
    bumpReplicaGS(rur, recoveryId);
    blockFile = rur.getBlockFile();
    metaFile = rur.getMetaFile();
  } else {
    File[] copiedReplicaFiles =
        copyReplicaWithNewBlockIdAndGS(rur, bpid, newBlockId, recoveryId);
    blockFile = copiedReplicaFiles[1];
    metaFile = copiedReplicaFiles[0];
  }

  //update length
  if (rur.getNumBytes() < newlength) {
    throw new IOException("rur.getNumBytes() < newlength = " + newlength
        + ", rur=" + rur);
  }
  if (rur.getNumBytes() > newlength) {
    rur.breakHardLinksIfNeeded();
    truncateBlock(blockFile, metaFile, rur.getNumBytes(), newlength);
    if(!copyOnTruncate) {
      // update RUR with the new length
      rur.setNumBytes(newlength);
    } else {
      // Copying block to a new block with new blockId.
      // Not truncating original block.
      FsVolumeSpi volume = rur.getVolume();
      String blockPath = blockFile.getAbsolutePath();
      String volumePath = volume.getBasePath();
      assert blockPath.startsWith(volumePath) :
          "New block file: " + blockPath + " must be on " +
              "same volume as recovery replica: " + volumePath;
      ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(
          newBlockId, recoveryId, volume, blockFile.getParentFile(),
          newlength);
      newReplicaInfo.setNumBytes(newlength);
      volumeMap.add(bpid, newReplicaInfo);
      finalizeReplica(bpid, newReplicaInfo);
    }
 }

  // finalize the block
  return finalizeReplica(bpid, rur);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:60,代码来源:FsDatasetImpl.java

示例11: invalidate

import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery; //导入依赖的package包/类
/**
 * We're informed that a block is no longer valid.  We
 * could lazily garbage-collect the block, but why bother?
 * just get rid of it.
 */
@Override // FsDatasetSpi
public void invalidate(String bpid, Block invalidBlks[]) throws IOException {
  boolean error = false;
  for (int i = 0; i < invalidBlks.length; i++) {
    final File f;
    final FsVolumeImpl v;
    synchronized (this) {
      f = getFile(bpid, invalidBlks[i].getBlockId());
      ReplicaInfo info = volumeMap.get(bpid, invalidBlks[i]);
      if (info == null) {
        LOG.warn("Failed to delete replica " + invalidBlks[i]
            + ": ReplicaInfo not found.");
        error = true;
        continue;
      }
      if (info.getGenerationStamp() != invalidBlks[i].getGenerationStamp()) {
        LOG.warn("Failed to delete replica " + invalidBlks[i]
            + ": GenerationStamp not matched, info=" + info);
        error = true;
        continue;
      }
      v = (FsVolumeImpl)info.getVolume();
      if (f == null) {
        LOG.warn("Failed to delete replica " + invalidBlks[i]
            +  ": File not found, volume=" + v);
        error = true;
        continue;
      }
      if (v == null) {
        LOG.warn("Failed to delete replica " + invalidBlks[i]
            +  ". No volume for this replica, file=" + f + ".");
        error = true;
        continue;
      }
      File parent = f.getParentFile();
      if (parent == null) {
        LOG.warn("Failed to delete replica " + invalidBlks[i]
            +  ". Parent not found for file " + f + ".");
        error = true;
        continue;
      }
      ReplicaState replicaState = info.getState();
      if (replicaState == ReplicaState.FINALIZED || 
          (replicaState == ReplicaState.RUR && 
              ((ReplicaUnderRecovery)info).getOriginalReplica().getState() == 
                ReplicaState.FINALIZED)) {
        v.clearPath(bpid, parent);
      }
      volumeMap.remove(bpid, invalidBlks[i]);
    }

    // Delete the block asynchronously to make sure we can do it fast enough
    asyncDiskService.deleteAsync(v, f,
        FsDatasetUtil.getMetaFile(f, invalidBlks[i].getGenerationStamp()),
        new ExtendedBlock(bpid, invalidBlks[i]));
  }
  if (error) {
    throw new IOException("Error in deleting blocks.");
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:66,代码来源:FsDatasetImpl.java

示例12: invalidate

import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery; //导入依赖的package包/类
/**
 * We're informed that a block is no longer valid.  We
 * could lazily garbage-collect the block, but why bother?
 * just get rid of it.
 */
@Override // FsDatasetSpi
public void invalidate(String bpid, Block invalidBlks[]) throws IOException {
  boolean error = false;
  for (Block invalidBlk : invalidBlks) {
    final File f;
    final FsVolumeImpl v;
    synchronized (this) {
      f = getFile(bpid, invalidBlk.getBlockId());
      ReplicaInfo info = volumeMap.get(bpid, invalidBlk);
      if (info == null) {
        LOG.warn("Failed to delete replica " + invalidBlk +
            ": ReplicaInfo not found.");
        error = true;
        continue;
      }
      if (info.getGenerationStamp() != invalidBlk.getGenerationStamp()) {
        LOG.warn("Failed to delete replica " + invalidBlk +
            ": GenerationStamp not matched, info=" + info);
        error = true;
        continue;
      }
      v = (FsVolumeImpl) info.getVolume();
      if (f == null) {
        LOG.warn("Failed to delete replica " + invalidBlk +
            ": File not found, volume=" + v);
        error = true;
        continue;
      }
      if (v == null) {
        LOG.warn("Failed to delete replica " + invalidBlk +
            ". No volume for this replica, file=" + f + ".");
        error = true;
        continue;
      }
      File parent = f.getParentFile();
      if (parent == null) {
        LOG.warn("Failed to delete replica " + invalidBlk +
            ". Parent not found for file " + f + ".");
        error = true;
        continue;
      }
      ReplicaState replicaState = info.getState();
      if (replicaState == ReplicaState.FINALIZED ||
          (replicaState == ReplicaState.RUR &&
              ((ReplicaUnderRecovery) info).getOriginalReplica().getState() ==
                  ReplicaState.FINALIZED)) {
        v.clearPath(bpid, parent);
      }
      volumeMap.remove(bpid, invalidBlk);
    }
  
    // Delete the block asynchronously to make sure we can do it fast enough
    asyncDiskService.deleteAsync(v, f,
        FsDatasetUtil.getMetaFile(f, invalidBlk.getGenerationStamp()),
        new ExtendedBlock(bpid, invalidBlk));
  }
  if (error) {
    throw new IOException("Error in deleting blocks.");
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:66,代码来源:FsDatasetImpl.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。