当前位置: 首页>>代码示例>>Java>>正文


Java ReplicaBeingWritten.getBytesAcked方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten.getBytesAcked方法的典型用法代码示例。如果您正苦于以下问题:Java ReplicaBeingWritten.getBytesAcked方法的具体用法?Java ReplicaBeingWritten.getBytesAcked怎么用?Java ReplicaBeingWritten.getBytesAcked使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten的用法示例。


在下文中一共展示了ReplicaBeingWritten.getBytesAcked方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: recoverCheck

import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten; //导入方法依赖的package包/类
private ReplicaInfo recoverCheck(ExtendedBlock b, long newGS, 
    long expectedBlockLen) throws IOException {
  ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
  
  // check state
  if (replicaInfo.getState() != ReplicaState.FINALIZED &&
      replicaInfo.getState() != ReplicaState.RBW) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA + replicaInfo);
  }

  // check generation stamp
  long replicaGenerationStamp = replicaInfo.getGenerationStamp();
  if (replicaGenerationStamp < b.getGenerationStamp() ||
      replicaGenerationStamp > newGS) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + replicaGenerationStamp
        + ". Expected GS range is [" + b.getGenerationStamp() + ", " + 
        newGS + "].");
  }
  
  // stop the previous writer before check a replica's length
  long replicaLen = replicaInfo.getNumBytes();
  if (replicaInfo.getState() == ReplicaState.RBW) {
    ReplicaBeingWritten rbw = (ReplicaBeingWritten)replicaInfo;
    // kill the previous writer
    rbw.stopWriter(datanode.getDnConf().getXceiverStopTimeout());
    rbw.setWriter(Thread.currentThread());
    // check length: bytesRcvd, bytesOnDisk, and bytesAcked should be the same
    if (replicaLen != rbw.getBytesOnDisk() 
        || replicaLen != rbw.getBytesAcked()) {
      throw new ReplicaAlreadyExistsException("RBW replica " + replicaInfo + 
          "bytesRcvd(" + rbw.getNumBytes() + "), bytesOnDisk(" + 
          rbw.getBytesOnDisk() + "), and bytesAcked(" + rbw.getBytesAcked() +
          ") are not the same.");
    }
  }
  
  // check block length
  if (replicaLen != expectedBlockLen) {
    throw new IOException("Corrupted replica " + replicaInfo + 
        " with a length of " + replicaLen + 
        " expected length is " + expectedBlockLen);
  }
  
  return replicaInfo;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:48,代码来源:FsDatasetImpl.java

示例2: recoverRbw

import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten; //导入方法依赖的package包/类
@Override // FsDatasetSpi
public synchronized ReplicaHandler recoverRbw(
    ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd)
    throws IOException {
  LOG.info("Recover RBW replica " + b);

  ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
  
  // check the replica's state
  if (replicaInfo.getState() != ReplicaState.RBW) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.NON_RBW_REPLICA + replicaInfo);
  }
  ReplicaBeingWritten rbw = (ReplicaBeingWritten)replicaInfo;
  
  LOG.info("Recovering " + rbw);

  // Stop the previous writer
  rbw.stopWriter(datanode.getDnConf().getXceiverStopTimeout());
  rbw.setWriter(Thread.currentThread());

  // check generation stamp
  long replicaGenerationStamp = rbw.getGenerationStamp();
  if (replicaGenerationStamp < b.getGenerationStamp() ||
      replicaGenerationStamp > newGS) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + b +
        ". Expected GS range is [" + b.getGenerationStamp() + ", " + 
        newGS + "].");
  }
  
  // check replica length
  long bytesAcked = rbw.getBytesAcked();
  long numBytes = rbw.getNumBytes();
  if (bytesAcked < minBytesRcvd || numBytes > maxBytesRcvd){
    throw new ReplicaNotFoundException("Unmatched length replica " + 
        replicaInfo + ": BytesAcked = " + bytesAcked + 
        " BytesRcvd = " + numBytes + " are not in the range of [" + 
        minBytesRcvd + ", " + maxBytesRcvd + "].");
  }

  FsVolumeReference ref = rbw.getVolume().obtainReference();
  try {
    // Truncate the potentially corrupt portion.
    // If the source was client and the last node in the pipeline was lost,
    // any corrupt data written after the acked length can go unnoticed.
    if (numBytes > bytesAcked) {
      final File replicafile = rbw.getBlockFile();
      truncateBlock(replicafile, rbw.getMetaFile(), numBytes, bytesAcked);
      rbw.setNumBytes(bytesAcked);
      rbw.setLastChecksumAndDataLen(bytesAcked, null);
    }

    // bump the replica's generation stamp to newGS
    bumpReplicaGS(rbw, newGS);
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }
  return new ReplicaHandler(rbw, ref);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:62,代码来源:FsDatasetImpl.java

示例3: recoverRbw

import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten; //导入方法依赖的package包/类
@Override // FsDatasetSpi
public synchronized ReplicaInPipeline recoverRbw(ExtendedBlock b,
    long newGS, long minBytesRcvd, long maxBytesRcvd)
    throws IOException {
  LOG.info("Recover RBW replica " + b);

  ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
  
  // check the replica's state
  if (replicaInfo.getState() != ReplicaState.RBW) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.NON_RBW_REPLICA + replicaInfo);
  }
  ReplicaBeingWritten rbw = (ReplicaBeingWritten)replicaInfo;
  
  LOG.info("Recovering " + rbw);

  // Stop the previous writer
  rbw.stopWriter(datanode.getDnConf().getXceiverStopTimeout());
  rbw.setWriter(Thread.currentThread());

  // check generation stamp
  long replicaGenerationStamp = rbw.getGenerationStamp();
  if (replicaGenerationStamp < b.getGenerationStamp() ||
      replicaGenerationStamp > newGS) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + b +
        ". Expected GS range is [" + b.getGenerationStamp() + ", " + 
        newGS + "].");
  }
  
  // check replica length
  long bytesAcked = rbw.getBytesAcked();
  long numBytes = rbw.getNumBytes();
  if (bytesAcked < minBytesRcvd || numBytes > maxBytesRcvd){
    throw new ReplicaNotFoundException("Unmatched length replica " + 
        replicaInfo + ": BytesAcked = " + bytesAcked + 
        " BytesRcvd = " + numBytes + " are not in the range of [" + 
        minBytesRcvd + ", " + maxBytesRcvd + "].");
  }

  // Truncate the potentially corrupt portion.
  // If the source was client and the last node in the pipeline was lost,
  // any corrupt data written after the acked length can go unnoticed. 
  if (numBytes > bytesAcked) {
    final File replicafile = rbw.getBlockFile();
    truncateBlock(replicafile, rbw.getMetaFile(), numBytes, bytesAcked);
    rbw.setNumBytes(bytesAcked);
    rbw.setLastChecksumAndDataLen(bytesAcked, null);
  }

  // bump the replica's generation stamp to newGS
  bumpReplicaGS(rbw, newGS);
  
  return rbw;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:57,代码来源:FsDatasetImpl.java

示例4: recoverCheck

import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten; //导入方法依赖的package包/类
private ReplicaInfo recoverCheck(ExtendedBlock b, long newGS,
    long expectedBlockLen) throws IOException {
  ReplicaInfo replicaInfo =
      getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
  
  // check state
  if (replicaInfo.getState() != ReplicaState.FINALIZED &&
      replicaInfo.getState() != ReplicaState.RBW) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA +
            replicaInfo);
  }

  // check generation stamp
  long replicaGenerationStamp = replicaInfo.getGenerationStamp();
  if (replicaGenerationStamp < b.getGenerationStamp() ||
      replicaGenerationStamp > newGS) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNEXPECTED_GS_REPLICA +
            replicaGenerationStamp + ". Expected GS range is [" +
            b.getGenerationStamp() + ", " +
            newGS + "].");
  }
  
  // stop the previous writer before check a replica's length
  long replicaLen = replicaInfo.getNumBytes();
  if (replicaInfo.getState() == ReplicaState.RBW) {
    ReplicaBeingWritten rbw = (ReplicaBeingWritten) replicaInfo;
    // kill the previous writer
    rbw.stopWriter();
    rbw.setWriter(Thread.currentThread());
    // check length: bytesRcvd, bytesOnDisk, and bytesAcked should be the same
    if (replicaLen != rbw.getBytesOnDisk() ||
        replicaLen != rbw.getBytesAcked()) {
      throw new ReplicaAlreadyExistsException("RBW replica " + replicaInfo +
          "bytesRcvd(" + rbw.getNumBytes() + "), bytesOnDisk(" +
          rbw.getBytesOnDisk() + "), and bytesAcked(" + rbw.getBytesAcked() +
          ") are not the same.");
    }
  }
  
  // check block length
  if (replicaLen != expectedBlockLen) {
    throw new IOException("Corrupted replica " + replicaInfo +
        " with a length of " + replicaLen +
        " expected length is " + expectedBlockLen);
  }
  
  return replicaInfo;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:51,代码来源:FsDatasetImpl.java

示例5: recoverRbw

import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten; //导入方法依赖的package包/类
@Override // FsDatasetSpi
public synchronized ReplicaInPipeline recoverRbw(ExtendedBlock b, long newGS,
    long minBytesRcvd, long maxBytesRcvd) throws IOException {
  LOG.info("Recover RBW replica " + b);

  ReplicaInfo replicaInfo =
      getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
  
  // check the replica's state
  if (replicaInfo.getState() != ReplicaState.RBW) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.NON_RBW_REPLICA + replicaInfo);
  }
  ReplicaBeingWritten rbw = (ReplicaBeingWritten) replicaInfo;
  
  LOG.info("Recovering " + rbw);

  // Stop the previous writer
  rbw.stopWriter();
  rbw.setWriter(Thread.currentThread());

  // check generation stamp
  long replicaGenerationStamp = rbw.getGenerationStamp();
  if (replicaGenerationStamp < b.getGenerationStamp() ||
      replicaGenerationStamp > newGS) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + b +
            ". Expected GS range is [" + b.getGenerationStamp() + ", " +
            newGS + "].");
  }
  
  // check replica length
  if (rbw.getBytesAcked() < minBytesRcvd ||
      rbw.getNumBytes() > maxBytesRcvd) {
    throw new ReplicaNotFoundException("Unmatched length replica " +
        replicaInfo + ": BytesAcked = " + rbw.getBytesAcked() +
        " BytesRcvd = " + rbw.getNumBytes() + " are not in the range of [" +
        minBytesRcvd + ", " + maxBytesRcvd + "].");
  }

  // bump the replica's generation stamp to newGS
  bumpReplicaGS(rbw, newGS);
  
  return rbw;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:46,代码来源:FsDatasetImpl.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten.getBytesAcked方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。