当前位置: 首页>>代码示例>>Java>>正文


Java ReplicaInfo.getNumBytes方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.ReplicaInfo.getNumBytes方法的典型用法代码示例。如果您正苦于以下问题:Java ReplicaInfo.getNumBytes方法的具体用法?Java ReplicaInfo.getNumBytes怎么用?Java ReplicaInfo.getNumBytes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.datanode.ReplicaInfo的用法示例。


在下文中一共展示了ReplicaInfo.getNumBytes方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: append

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
@Override  // FsDatasetSpi
public synchronized ReplicaHandler append(ExtendedBlock b,
    long newGS, long expectedBlockLen) throws IOException {
  // If the block was successfully finalized because all packets
  // were successfully processed at the Datanode but the ack for
  // some of the packets were not received by the client. The client 
  // re-opens the connection and retries sending those packets.
  // The other reason is that an "append" is occurring to this block.
  
  // check the validity of the parameter
  if (newGS < b.getGenerationStamp()) {
    throw new IOException("The new generation stamp " + newGS + 
        " should be greater than the replica " + b + "'s generation stamp");
  }
  ReplicaInfo replicaInfo = getReplicaInfo(b);
  LOG.info("Appending to " + replicaInfo);
  if (replicaInfo.getState() != ReplicaState.FINALIZED) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
  }
  if (replicaInfo.getNumBytes() != expectedBlockLen) {
    throw new IOException("Corrupted replica " + replicaInfo + 
        " with a length of " + replicaInfo.getNumBytes() + 
        " expected length is " + expectedBlockLen);
  }

  FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
  ReplicaBeingWritten replica = null;
  try {
    replica = append(b.getBlockPoolId(), (FinalizedReplica)replicaInfo, newGS,
        b.getNumBytes());
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }
  return new ReplicaHandler(replica, ref);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:FsDatasetImpl.java

示例2: selectReplicaToDelete

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
@VisibleForTesting
static ReplicaInfo selectReplicaToDelete(final ReplicaInfo replica1,
    final ReplicaInfo replica2) {
  ReplicaInfo replicaToKeep;
  ReplicaInfo replicaToDelete;

  // it's the same block so don't ever delete it, even if GS or size
  // differs.  caller should keep the one it just discovered on disk
  if (replica1.getBlockFile().equals(replica2.getBlockFile())) {
    return null;
  }
  if (replica1.getGenerationStamp() != replica2.getGenerationStamp()) {
    replicaToKeep = replica1.getGenerationStamp() > replica2.getGenerationStamp()
        ? replica1 : replica2;
  } else if (replica1.getNumBytes() != replica2.getNumBytes()) {
    replicaToKeep = replica1.getNumBytes() > replica2.getNumBytes() ?
        replica1 : replica2;
  } else if (replica1.getVolume().isTransientStorage() &&
             !replica2.getVolume().isTransientStorage()) {
    replicaToKeep = replica2;
  } else {
    replicaToKeep = replica1;
  }

  replicaToDelete = (replicaToKeep == replica1) ? replica2 : replica1;

  if (LOG.isDebugEnabled()) {
    LOG.debug("resolveDuplicateReplicas decide to keep " + replicaToKeep
        + ".  Will try to delete " + replicaToDelete);
  }
  return replicaToDelete;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:BlockPoolSlice.java

示例3: selectReplicaToDelete

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
static ReplicaInfo selectReplicaToDelete(final ReplicaInfo replica1,
    final ReplicaInfo replica2) {
  ReplicaInfo replicaToKeep;
  ReplicaInfo replicaToDelete;

  // it's the same block so don't ever delete it, even if GS or size
  // differs.  caller should keep the one it just discovered on disk
  if (replica1.getBlockFile().equals(replica2.getBlockFile())) {
    return null;
  }
  if (replica1.getGenerationStamp() != replica2.getGenerationStamp()) {
    replicaToKeep = replica1.getGenerationStamp() > replica2.getGenerationStamp()
        ? replica1 : replica2;
  } else if (replica1.getNumBytes() != replica2.getNumBytes()) {
    replicaToKeep = replica1.getNumBytes() > replica2.getNumBytes() ?
        replica1 : replica2;
  } else if (replica1.getVolume().isTransientStorage() &&
             !replica2.getVolume().isTransientStorage()) {
    replicaToKeep = replica2;
  } else {
    replicaToKeep = replica1;
  }

  replicaToDelete = (replicaToKeep == replica1) ? replica2 : replica1;

  if (LOG.isDebugEnabled()) {
    LOG.debug("resolveDuplicateReplicas decide to keep " + replicaToKeep
        + ".  Will try to delete " + replicaToDelete);
  }
  return replicaToDelete;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:32,代码来源:BlockPoolSlice.java

示例4: append

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
@Override  // FsDatasetSpi
public synchronized ReplicaInPipeline append(ExtendedBlock b,
    long newGS, long expectedBlockLen) throws IOException {
  // If the block was successfully finalized because all packets
  // were successfully processed at the Datanode but the ack for
  // some of the packets were not received by the client. The client 
  // re-opens the connection and retries sending those packets.
  // The other reason is that an "append" is occurring to this block.
  
  // check the validity of the parameter
  if (newGS < b.getGenerationStamp()) {
    throw new IOException("The new generation stamp " + newGS + 
        " should be greater than the replica " + b + "'s generation stamp");
  }
  ReplicaInfo replicaInfo = getReplicaInfo(b);
  LOG.info("Appending to " + replicaInfo);
  if (replicaInfo.getState() != ReplicaState.FINALIZED) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
  }
  if (replicaInfo.getNumBytes() != expectedBlockLen) {
    throw new IOException("Corrupted replica " + replicaInfo + 
        " with a length of " + replicaInfo.getNumBytes() + 
        " expected length is " + expectedBlockLen);
  }

  return append(b.getBlockPoolId(), (FinalizedReplica)replicaInfo, newGS,
      b.getNumBytes());
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:30,代码来源:FsDatasetImpl.java

示例5: append

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
@Override  // FsDatasetSpi
public synchronized ReplicaInPipeline append(ExtendedBlock b, long newGS,
    long expectedBlockLen) throws IOException {
  // If the block was successfully finalized because all packets
  // were successfully processed at the Datanode but the ack for
  // some of the packets were not received by the client. The client 
  // re-opens the connection and retries sending those packets.
  // The other reason is that an "append" is occurring to this block.
  
  // check the validity of the parameter
  if (newGS < b.getGenerationStamp()) {
    throw new IOException("The new generation stamp " + newGS +
        " should be greater than the replica " + b + "'s generation stamp");
  }
  ReplicaInfo replicaInfo = getReplicaInfo(b);
  LOG.info("Appending to " + replicaInfo);
  if (replicaInfo.getState() != ReplicaState.FINALIZED) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
  }
  if (replicaInfo.getNumBytes() != expectedBlockLen) {
    throw new IOException("Corrupted replica " + replicaInfo +
        " with a length of " + replicaInfo.getNumBytes() +
        " expected length is " + expectedBlockLen);
  }

  return append(b.getBlockPoolId(), (FinalizedReplica) replicaInfo, newGS,
      b.getNumBytes());
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:30,代码来源:FsDatasetImpl.java

示例6: moveBlockAcrossStorage

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * Move block files from one storage to another storage.
 * @return Returns the Old replicaInfo
 * @throws IOException
 */
@Override
public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block,
    StorageType targetStorageType) throws IOException {
  ReplicaInfo replicaInfo = getReplicaInfo(block);
  if (replicaInfo.getState() != ReplicaState.FINALIZED) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_REPLICA + block);
  }
  if (replicaInfo.getNumBytes() != block.getNumBytes()) {
    throw new IOException("Corrupted replica " + replicaInfo
        + " with a length of " + replicaInfo.getNumBytes()
        + " expected length is " + block.getNumBytes());
  }
  if (replicaInfo.getVolume().getStorageType() == targetStorageType) {
    throw new ReplicaAlreadyExistsException("Replica " + replicaInfo
        + " already exists on storage " + targetStorageType);
  }

  if (replicaInfo.isOnTransientStorage()) {
    // Block movement from RAM_DISK will be done by LazyPersist mechanism
    throw new IOException("Replica " + replicaInfo
        + " cannot be moved from storageType : "
        + replicaInfo.getVolume().getStorageType());
  }

  try (FsVolumeReference volumeRef = volumes.getNextVolume(
      targetStorageType, block.getNumBytes())) {
    File oldBlockFile = replicaInfo.getBlockFile();
    File oldMetaFile = replicaInfo.getMetaFile();
    FsVolumeImpl targetVolume = (FsVolumeImpl) volumeRef.getVolume();
    // Copy files to temp dir first
    File[] blockFiles = copyBlockFiles(block.getBlockId(),
        block.getGenerationStamp(), oldMetaFile, oldBlockFile,
        targetVolume.getTmpDir(block.getBlockPoolId()),
        replicaInfo.isOnTransientStorage());

    ReplicaInfo newReplicaInfo = new ReplicaInPipeline(
        replicaInfo.getBlockId(), replicaInfo.getGenerationStamp(),
        targetVolume, blockFiles[0].getParentFile(), 0);
    newReplicaInfo.setNumBytes(blockFiles[1].length());
    // Finalize the copied files
    newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);

    removeOldReplica(replicaInfo, newReplicaInfo, oldBlockFile, oldMetaFile,
        oldBlockFile.length(), oldMetaFile.length(), block.getBlockPoolId());
  }

  // Replace the old block if any to reschedule the scanning.
  return replicaInfo;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:56,代码来源:FsDatasetImpl.java

示例7: recoverCheck

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
private ReplicaInfo recoverCheck(ExtendedBlock b, long newGS, 
    long expectedBlockLen) throws IOException {
  ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
  
  // check state
  if (replicaInfo.getState() != ReplicaState.FINALIZED &&
      replicaInfo.getState() != ReplicaState.RBW) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA + replicaInfo);
  }

  // check generation stamp
  long replicaGenerationStamp = replicaInfo.getGenerationStamp();
  if (replicaGenerationStamp < b.getGenerationStamp() ||
      replicaGenerationStamp > newGS) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + replicaGenerationStamp
        + ". Expected GS range is [" + b.getGenerationStamp() + ", " + 
        newGS + "].");
  }
  
  // stop the previous writer before check a replica's length
  long replicaLen = replicaInfo.getNumBytes();
  if (replicaInfo.getState() == ReplicaState.RBW) {
    ReplicaBeingWritten rbw = (ReplicaBeingWritten)replicaInfo;
    // kill the previous writer
    rbw.stopWriter(datanode.getDnConf().getXceiverStopTimeout());
    rbw.setWriter(Thread.currentThread());
    // check length: bytesRcvd, bytesOnDisk, and bytesAcked should be the same
    if (replicaLen != rbw.getBytesOnDisk() 
        || replicaLen != rbw.getBytesAcked()) {
      throw new ReplicaAlreadyExistsException("RBW replica " + replicaInfo + 
          "bytesRcvd(" + rbw.getNumBytes() + "), bytesOnDisk(" + 
          rbw.getBytesOnDisk() + "), and bytesAcked(" + rbw.getBytesAcked() +
          ") are not the same.");
    }
  }
  
  // check block length
  if (replicaLen != expectedBlockLen) {
    throw new IOException("Corrupted replica " + replicaInfo + 
        " with a length of " + replicaLen + 
        " expected length is " + expectedBlockLen);
  }
  
  return replicaInfo;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:48,代码来源:FsDatasetImpl.java

示例8: moveBlockAcrossStorage

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * Move block files from one storage to another storage.
 * @return Returns the Old replicaInfo
 * @throws IOException
 */
@Override
public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block,
    StorageType targetStorageType) throws IOException {
  ReplicaInfo replicaInfo = getReplicaInfo(block);
  if (replicaInfo.getState() != ReplicaState.FINALIZED) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_REPLICA + block);
  }
  if (replicaInfo.getNumBytes() != block.getNumBytes()) {
    throw new IOException("Corrupted replica " + replicaInfo
        + " with a length of " + replicaInfo.getNumBytes()
        + " expected length is " + block.getNumBytes());
  }
  if (replicaInfo.getVolume().getStorageType() == targetStorageType) {
    throw new ReplicaAlreadyExistsException("Replica " + replicaInfo
        + " already exists on storage " + targetStorageType);
  }

  if (replicaInfo.isOnTransientStorage()) {
    // Block movement from RAM_DISK will be done by LazyPersist mechanism
    throw new IOException("Replica " + replicaInfo
        + " cannot be moved from storageType : "
        + replicaInfo.getVolume().getStorageType());
  }

  try (FsVolumeReference volumeRef = volumes.getNextVolume(
      targetStorageType, block.getNumBytes())) {
    File oldBlockFile = replicaInfo.getBlockFile();
    File oldMetaFile = replicaInfo.getMetaFile();
    FsVolumeImpl targetVolume = (FsVolumeImpl) volumeRef.getVolume();
    // Copy files to temp dir first
    File[] blockFiles = copyBlockFiles(block.getBlockId(),
        block.getGenerationStamp(), oldMetaFile, oldBlockFile,
        targetVolume.getTmpDir(block.getBlockPoolId()),
        replicaInfo.isOnTransientStorage(), smallBufferSize, conf);

    ReplicaInfo newReplicaInfo = new ReplicaInPipeline(
        replicaInfo.getBlockId(), replicaInfo.getGenerationStamp(),
        targetVolume, blockFiles[0].getParentFile(), 0);
    newReplicaInfo.setNumBytes(blockFiles[1].length());
    // Finalize the copied files
    newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);

    removeOldReplica(replicaInfo, newReplicaInfo, oldBlockFile, oldMetaFile,
        oldBlockFile.length(), oldMetaFile.length(), block.getBlockPoolId());
  }

  // Replace the old block if any to reschedule the scanning.
  return replicaInfo;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:56,代码来源:FsDatasetImpl.java

示例9: resolveDuplicateReplicas

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
/**
 * This method is invoked during DN startup when volumes are scanned to
 * build up the volumeMap.
 *
 * Given two replicas, decide which one to keep. The preference is as
 * follows:
 *   1. Prefer the replica with the higher generation stamp.
 *   2. If generation stamps are equal, prefer the replica with the
 *      larger on-disk length.
 *   3. If on-disk length is the same, prefer the replica on persistent
 *      storage volume.
 *   4. All other factors being equal, keep replica1.
 *
 * The other replica is removed from the volumeMap and is deleted from
 * its storage volume.
 *
 * @param replica1
 * @param replica2
 * @param volumeMap
 * @return the replica that is retained.
 * @throws IOException
 */
ReplicaInfo resolveDuplicateReplicas(
    final ReplicaInfo replica1, final ReplicaInfo replica2,
    final ReplicaMap volumeMap) throws IOException {

  if (!deleteDuplicateReplicas) {
    // Leave both block replicas in place.
    return replica1;
  }

  ReplicaInfo replicaToKeep;
  ReplicaInfo replicaToDelete;

  if (replica1.getGenerationStamp() != replica2.getGenerationStamp()) {
    replicaToKeep = replica1.getGenerationStamp() > replica2.getGenerationStamp()
        ? replica1 : replica2;
  } else if (replica1.getNumBytes() != replica2.getNumBytes()) {
    replicaToKeep = replica1.getNumBytes() > replica2.getNumBytes() ?
        replica1 : replica2;
  } else if (replica1.getVolume().isTransientStorage() &&
             !replica2.getVolume().isTransientStorage()) {
    replicaToKeep = replica2;
  } else {
    replicaToKeep = replica1;
  }

  replicaToDelete = (replicaToKeep == replica1) ? replica2 : replica1;

  if (LOG.isDebugEnabled()) {
    LOG.debug("resolveDuplicateReplicas decide to keep " + replicaToKeep
        + ".  Will try to delete " + replicaToDelete);
  }

  // Update volumeMap.
  volumeMap.add(bpid, replicaToKeep);

  // Delete the files on disk. Failure here is okay.
  final File blockFile = replicaToDelete.getBlockFile();
  if (!blockFile.delete()) {
    LOG.warn("Failed to delete block file " + blockFile);
  }
  final File metaFile = replicaToDelete.getMetaFile();
  if (!metaFile.delete()) {
    LOG.warn("Failed to delete meta file " + metaFile);
  }

  return replicaToKeep;
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:70,代码来源:BlockPoolSlice.java

示例10: recoverCheck

import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; //导入方法依赖的package包/类
private ReplicaInfo recoverCheck(ExtendedBlock b, long newGS,
    long expectedBlockLen) throws IOException {
  ReplicaInfo replicaInfo =
      getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
  
  // check state
  if (replicaInfo.getState() != ReplicaState.FINALIZED &&
      replicaInfo.getState() != ReplicaState.RBW) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA +
            replicaInfo);
  }

  // check generation stamp
  long replicaGenerationStamp = replicaInfo.getGenerationStamp();
  if (replicaGenerationStamp < b.getGenerationStamp() ||
      replicaGenerationStamp > newGS) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNEXPECTED_GS_REPLICA +
            replicaGenerationStamp + ". Expected GS range is [" +
            b.getGenerationStamp() + ", " +
            newGS + "].");
  }
  
  // stop the previous writer before check a replica's length
  long replicaLen = replicaInfo.getNumBytes();
  if (replicaInfo.getState() == ReplicaState.RBW) {
    ReplicaBeingWritten rbw = (ReplicaBeingWritten) replicaInfo;
    // kill the previous writer
    rbw.stopWriter();
    rbw.setWriter(Thread.currentThread());
    // check length: bytesRcvd, bytesOnDisk, and bytesAcked should be the same
    if (replicaLen != rbw.getBytesOnDisk() ||
        replicaLen != rbw.getBytesAcked()) {
      throw new ReplicaAlreadyExistsException("RBW replica " + replicaInfo +
          "bytesRcvd(" + rbw.getNumBytes() + "), bytesOnDisk(" +
          rbw.getBytesOnDisk() + "), and bytesAcked(" + rbw.getBytesAcked() +
          ") are not the same.");
    }
  }
  
  // check block length
  if (replicaLen != expectedBlockLen) {
    throw new IOException("Corrupted replica " + replicaInfo +
        " with a length of " + replicaLen +
        " expected length is " + expectedBlockLen);
  }
  
  return replicaInfo;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:51,代码来源:FsDatasetImpl.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.ReplicaInfo.getNumBytes方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。