当前位置: 首页>>代码示例>>Java>>正文


Java ReplicaInPipeline类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline的典型用法代码示例。如果您正苦于以下问题:Java ReplicaInPipeline类的具体用法?Java ReplicaInPipeline怎么用?Java ReplicaInPipeline使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


ReplicaInPipeline类属于org.apache.hadoop.hdfs.server.datanode包,在下文中一共展示了ReplicaInPipeline类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: fetchReplicaInfo

import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline; //导入依赖的package包/类
/**
 * This should be primarily used for testing.
 * @return clone of replica store in datanode memory
 */
ReplicaInfo fetchReplicaInfo(String bpid, long blockId) {
  ReplicaInfo r = volumeMap.get(bpid, blockId);
  if(r == null)
    return null;
  switch(r.getState()) {
  case FINALIZED:
    return new FinalizedReplica((FinalizedReplica)r);
  case RBW:
    return new ReplicaBeingWritten((ReplicaBeingWritten)r);
  case RWR:
    return new ReplicaWaitingToBeRecovered((ReplicaWaitingToBeRecovered)r);
  case RUR:
    return new ReplicaUnderRecovery((ReplicaUnderRecovery)r);
  case TEMPORARY:
    return new ReplicaInPipeline((ReplicaInPipeline)r);
  }
  return null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:FsDatasetImpl.java

示例2: fetchReplicaInfo

import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline; //导入依赖的package包/类
/**
 * Returns a clone of a replica stored in data-node memory.
 * Should be primarily used for testing.
 * @param blockId
 * @return
 */
ReplicaInfo fetchReplicaInfo(String bpid, long blockId) {
  ReplicaInfo r = volumeMap.get(bpid, blockId);
  if(r == null)
    return null;
  switch(r.getState()) {
  case FINALIZED:
    return new FinalizedReplica((FinalizedReplica)r);
  case RBW:
    return new ReplicaBeingWritten((ReplicaBeingWritten)r);
  case RWR:
    return new ReplicaWaitingToBeRecovered((ReplicaWaitingToBeRecovered)r);
  case RUR:
    return new ReplicaUnderRecovery((ReplicaUnderRecovery)r);
  case TEMPORARY:
    return new ReplicaInPipeline((ReplicaInPipeline)r);
  }
  return null;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:25,代码来源:FsDatasetImpl.java

示例3: recoverAppend

import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline; //导入依赖的package包/类
@Override  // FsDatasetSpi
public synchronized ReplicaInPipeline recoverAppend(ExtendedBlock b,
    long newGS, long expectedBlockLen) throws IOException {
  LOG.info("Recover failed append to " + b);

  ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);

  // change the replica's state/gs etc.
  if (replicaInfo.getState() == ReplicaState.FINALIZED ) {
    return append(b.getBlockPoolId(), (FinalizedReplica) replicaInfo, newGS, 
        b.getNumBytes());
  } else { //RBW
    bumpReplicaGS(replicaInfo, newGS);
    return (ReplicaBeingWritten)replicaInfo;
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:17,代码来源:FsDatasetImpl.java

示例4: createRbw

import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline; //导入依赖的package包/类
@Override // FsDatasetSpi
public synchronized ReplicaInPipeline createRbw(ExtendedBlock b)
    throws IOException {
  ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
      b.getBlockId());
  if (replicaInfo != null) {
    throw new ReplicaAlreadyExistsException("Block " + b +
    " already exists in state " + replicaInfo.getState() +
    " and thus cannot be created.");
  }
  // create a new block
  FsVolumeImpl v = volumes.getNextVolume(b.getNumBytes());
  // create a rbw file to hold block in the designated volume
  File f = v.createRbwFile(b.getBlockPoolId(), b.getLocalBlock());
  ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(b.getBlockId(), 
      b.getGenerationStamp(), v, f.getParentFile());
  volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
  return newReplicaInfo;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:20,代码来源:FsDatasetImpl.java

示例5: createTemporary

import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline; //导入依赖的package包/类
@Override // FsDatasetSpi
public synchronized ReplicaInPipeline createTemporary(ExtendedBlock b)
    throws IOException {
  ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
  if (replicaInfo != null) {
    throw new ReplicaAlreadyExistsException("Block " + b +
        " already exists in state " + replicaInfo.getState() +
        " and thus cannot be created.");
  }
  
  FsVolumeImpl v = volumes.getNextVolume(b.getNumBytes());
  // create a temporary file to hold block in the designated volume
  File f = v.createTmpFile(b.getBlockPoolId(), b.getLocalBlock());
  ReplicaInPipeline newReplicaInfo = new ReplicaInPipeline(b.getBlockId(), 
      b.getGenerationStamp(), v, f.getParentFile());
  volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
  
  return newReplicaInfo;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:20,代码来源:FsDatasetImpl.java

示例6: createTemporary

import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline; //导入依赖的package包/类
@Override // FsDatasetSpi
public synchronized ReplicaInPipeline createTemporary(StorageType storageType,
    ExtendedBlock b) throws IOException {
  ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
  if (replicaInfo != null) {
    if (replicaInfo.getGenerationStamp() < b.getGenerationStamp()
        && replicaInfo instanceof ReplicaInPipeline) {
      // Stop the previous writer
      ((ReplicaInPipeline)replicaInfo)
                    .stopWriter(datanode.getDnConf().getXceiverStopTimeout());
      invalidate(b.getBlockPoolId(), new Block[]{replicaInfo});
    } else {
      throw new ReplicaAlreadyExistsException("Block " + b +
          " already exists in state " + replicaInfo.getState() +
          " and thus cannot be created.");
    }
  }
  
  FsVolumeImpl v = volumes.getNextVolume(storageType, b.getNumBytes());
  // create a temporary file to hold block in the designated volume
  File f = v.createTmpFile(b.getBlockPoolId(), b.getLocalBlock());
  ReplicaInPipeline newReplicaInfo = new ReplicaInPipeline(b.getBlockId(), 
      b.getGenerationStamp(), v, f.getParentFile(), 0);
  volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
  return newReplicaInfo;
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:27,代码来源:FsDatasetImpl.java

示例7: fetchReplicaInfo

import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline; //导入依赖的package包/类
/**
 * Returns a clone of a replica stored in data-node memory.
 * Should be primarily used for testing.
 *
 * @param blockId
 * @return
 */
ReplicaInfo fetchReplicaInfo(String bpid, long blockId) {
  ReplicaInfo r = volumeMap.get(bpid, blockId);
  if (r == null) {
    return null;
  }
  switch (r.getState()) {
    case FINALIZED:
      return new FinalizedReplica((FinalizedReplica) r);
    case RBW:
      return new ReplicaBeingWritten((ReplicaBeingWritten) r);
    case RWR:
      return new ReplicaWaitingToBeRecovered((ReplicaWaitingToBeRecovered) r);
    case RUR:
      return new ReplicaUnderRecovery((ReplicaUnderRecovery) r);
    case TEMPORARY:
      return new ReplicaInPipeline((ReplicaInPipeline) r);
  }
  return null;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:27,代码来源:FsDatasetImpl.java

示例8: recoverAppend

import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline; //导入依赖的package包/类
@Override  // FsDatasetSpi
public synchronized ReplicaInPipeline recoverAppend(ExtendedBlock b,
    long newGS, long expectedBlockLen) throws IOException {
  LOG.info("Recover failed append to " + b);

  ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);

  // change the replica's state/gs etc.
  if (replicaInfo.getState() == ReplicaState.FINALIZED) {
    return append(b.getBlockPoolId(), (FinalizedReplica) replicaInfo, newGS,
        b.getNumBytes());
  } else { //RBW
    bumpReplicaGS(replicaInfo, newGS);
    return (ReplicaBeingWritten) replicaInfo;
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:17,代码来源:FsDatasetImpl.java

示例9: createRbw

import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline; //导入依赖的package包/类
@Override // FsDatasetSpi
public synchronized ReplicaInPipeline createRbw(ExtendedBlock b)
    throws IOException {
  ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
  if (replicaInfo != null) {
    throw new ReplicaAlreadyExistsException("Block " + b +
        " already exists in state " + replicaInfo.getState() +
        " and thus cannot be created.");
  }
  // create a new block
  FsVolumeImpl v = volumes.getNextVolume(b.getNumBytes());
  // create a rbw file to hold block in the designated volume
  File f = v.createRbwFile(b.getBlockPoolId(), b.getLocalBlock());
  ReplicaBeingWritten newReplicaInfo =
      new ReplicaBeingWritten(b.getBlockId(), b.getGenerationStamp(), v,
          f.getParentFile());
  volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
  return newReplicaInfo;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:20,代码来源:FsDatasetImpl.java

示例10: createTemporary

import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline; //导入依赖的package包/类
@Override // FsDatasetSpi
public synchronized ReplicaInPipeline createTemporary(ExtendedBlock b)
    throws IOException {
  ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
  if (replicaInfo != null) {
    throw new ReplicaAlreadyExistsException("Block " + b +
        " already exists in state " + replicaInfo.getState() +
        " and thus cannot be created.");
  }
  
  FsVolumeImpl v = volumes.getNextVolume(b.getNumBytes());
  // create a temporary file to hold block in the designated volume
  File f = v.createTmpFile(b.getBlockPoolId(), b.getLocalBlock());
  ReplicaInPipeline newReplicaInfo =
      new ReplicaInPipeline(b.getBlockId(), b.getGenerationStamp(), v,
          f.getParentFile());
  volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
  
  return newReplicaInfo;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:21,代码来源:FsDatasetImpl.java

示例11: createReplicaInPipeline

import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline; //导入依赖的package包/类
@Override
public Replica createReplicaInPipeline(
    FsVolumeSpi volume, ExtendedBlock block) throws IOException {
  FsVolumeImpl vol = (FsVolumeImpl) volume;
  ReplicaInPipeline rip = new ReplicaInPipeline(
      block.getBlockId(), block.getGenerationStamp(), volume,
      vol.createTmpFile(
          block.getBlockPoolId(), block.getLocalBlock()).getParentFile(),
      0);
  dataset.volumeMap.add(block.getBlockPoolId(), rip);
  return rip;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:13,代码来源:FsDatasetImplTestUtils.java

示例12: createTemporary

import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline; //导入依赖的package包/类
@Override // FsDatasetSpi
public synchronized ReplicaHandler createTemporary(
    StorageType storageType, ExtendedBlock b) throws IOException {
  ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
  if (replicaInfo != null) {
    if (replicaInfo.getGenerationStamp() < b.getGenerationStamp()
        && replicaInfo instanceof ReplicaInPipeline) {
      // Stop the previous writer
      ((ReplicaInPipeline)replicaInfo)
                    .stopWriter(datanode.getDnConf().getXceiverStopTimeout());
      invalidate(b.getBlockPoolId(), new Block[]{replicaInfo});
    } else {
      throw new ReplicaAlreadyExistsException("Block " + b +
          " already exists in state " + replicaInfo.getState() +
          " and thus cannot be created.");
    }
  }

  FsVolumeReference ref = volumes.getNextVolume(storageType, b.getNumBytes());
  FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
  // create a temporary file to hold block in the designated volume
  File f;
  try {
    f = v.createTmpFile(b.getBlockPoolId(), b.getLocalBlock());
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }

  ReplicaInPipeline newReplicaInfo = new ReplicaInPipeline(b.getBlockId(), 
      b.getGenerationStamp(), v, f.getParentFile(), 0);
  volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
  return new ReplicaHandler(newReplicaInfo, ref);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:35,代码来源:FsDatasetImpl.java

示例13: append

import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline; //导入依赖的package包/类
@Override  // FsDatasetSpi
public synchronized ReplicaInPipeline append(ExtendedBlock b,
    long newGS, long expectedBlockLen) throws IOException {
  // If the block was successfully finalized because all packets
  // were successfully processed at the Datanode but the ack for
  // some of the packets were not received by the client. The client 
  // re-opens the connection and retries sending those packets.
  // The other reason is that an "append" is occurring to this block.
  
  // check the validity of the parameter
  if (newGS < b.getGenerationStamp()) {
    throw new IOException("The new generation stamp " + newGS + 
        " should be greater than the replica " + b + "'s generation stamp");
  }
  ReplicaInfo replicaInfo = getReplicaInfo(b);
  LOG.info("Appending to " + replicaInfo);
  if (replicaInfo.getState() != ReplicaState.FINALIZED) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
  }
  if (replicaInfo.getNumBytes() != expectedBlockLen) {
    throw new IOException("Corrupted replica " + replicaInfo + 
        " with a length of " + replicaInfo.getNumBytes() + 
        " expected length is " + expectedBlockLen);
  }

  return append(b.getBlockPoolId(), (FinalizedReplica)replicaInfo, newGS,
      b.getNumBytes());
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:30,代码来源:FsDatasetImpl.java

示例14: createRbw

import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline; //导入依赖的package包/类
@Override // FsDatasetSpi
public synchronized ReplicaInPipeline createRbw(StorageType storageType,
    ExtendedBlock b, boolean allowLazyPersist) throws IOException {
  ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
      b.getBlockId());
  if (replicaInfo != null) {
    throw new ReplicaAlreadyExistsException("Block " + b +
    " already exists in state " + replicaInfo.getState() +
    " and thus cannot be created.");
  }
  // create a new block
  FsVolumeImpl v;
  while (true) {
    try {
      if (allowLazyPersist) {
        // First try to place the block on a transient volume.
        v = volumes.getNextTransientVolume(b.getNumBytes());
        datanode.getMetrics().incrRamDiskBlocksWrite();
      } else {
        v = volumes.getNextVolume(storageType, b.getNumBytes());
      }
    } catch (DiskOutOfSpaceException de) {
      if (allowLazyPersist) {
        datanode.getMetrics().incrRamDiskBlocksWriteFallback();
        allowLazyPersist = false;
        continue;
      }
      throw de;
    }
    break;
  }
  // create an rbw file to hold block in the designated volume
  File f = v.createRbwFile(b.getBlockPoolId(), b.getLocalBlock());
  ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(b.getBlockId(), 
      b.getGenerationStamp(), v, f.getParentFile(), b.getNumBytes());
  volumeMap.add(b.getBlockPoolId(), newReplicaInfo);

  return newReplicaInfo;
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:40,代码来源:FsDatasetImpl.java

示例15: append

import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline; //导入依赖的package包/类
@Override  // FsDatasetSpi
public synchronized ReplicaInPipeline append(ExtendedBlock b, long newGS,
    long expectedBlockLen) throws IOException {
  // If the block was successfully finalized because all packets
  // were successfully processed at the Datanode but the ack for
  // some of the packets were not received by the client. The client 
  // re-opens the connection and retries sending those packets.
  // The other reason is that an "append" is occurring to this block.
  
  // check the validity of the parameter
  if (newGS < b.getGenerationStamp()) {
    throw new IOException("The new generation stamp " + newGS +
        " should be greater than the replica " + b + "'s generation stamp");
  }
  ReplicaInfo replicaInfo = getReplicaInfo(b);
  LOG.info("Appending to " + replicaInfo);
  if (replicaInfo.getState() != ReplicaState.FINALIZED) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
  }
  if (replicaInfo.getNumBytes() != expectedBlockLen) {
    throw new IOException("Corrupted replica " + replicaInfo +
        " with a length of " + replicaInfo.getNumBytes() +
        " expected length is " + expectedBlockLen);
  }

  return append(b.getBlockPoolId(), (FinalizedReplica) replicaInfo, newGS,
      b.getNumBytes());
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:30,代码来源:FsDatasetImpl.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。