当前位置: 首页>>代码示例>>Java>>正文


Java BlockConstructionStage类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage的典型用法代码示例。如果您正苦于以下问题:Java BlockConstructionStage类的具体用法?Java BlockConstructionStage怎么用?Java BlockConstructionStage使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


BlockConstructionStage类属于org.apache.hadoop.hdfs.protocol.datatransfer包,在下文中一共展示了BlockConstructionStage类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: DataTransfer

import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; //导入依赖的package包/类
/**
 * Connect to the first item in the target list.  Pass along the 
 * entire target list, the block, and the data.
 */
DataTransfer(DatanodeInfo targets[], StorageType[] targetStorageTypes,
    ExtendedBlock b, BlockConstructionStage stage,
    final String clientname)  {
  if (DataTransferProtocol.LOG.isDebugEnabled()) {
    DataTransferProtocol.LOG.debug(getClass().getSimpleName() + ": "
        + b + " (numBytes=" + b.getNumBytes() + ")"
        + ", stage=" + stage
        + ", clientname=" + clientname
        + ", targets=" + Arrays.asList(targets)
        + ", target storage types=" + (targetStorageTypes == null ? "[]" :
        Arrays.asList(targetStorageTypes)));
  }
  this.targets = targets;
  this.targetStorageTypes = targetStorageTypes;
  this.b = b;
  this.stage = stage;
  BPOfferService bpos = blockPoolManager.get(b.getBlockPoolId());
  bpReg = bpos.bpRegistration;
  this.clientname = clientname;
  this.cachingStrategy =
      new CachingStrategy(true, getDnConf().readaheadLength);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:DataNode.java

示例2: testWrite

import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; //导入依赖的package包/类
private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long newGS,
    String description, Boolean eofExcepted) throws IOException {
  sendBuf.reset();
  recvBuf.reset();
  writeBlock(block, stage, newGS, DEFAULT_CHECKSUM);
  if (eofExcepted) {
    sendResponse(Status.ERROR, null, null, recvOut);
    sendRecvData(description, true);
  } else if (stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
    //ok finally write a block with 0 len
    sendResponse(Status.SUCCESS, "", null, recvOut);
    sendRecvData(description, false);
  } else {
    writeZeroLengthPacket(block, description);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestDataTransferProtocol.java

示例3: getBlockReceiver

import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; //导入依赖的package包/类
/**
 * Separated for testing.
 */
@VisibleForTesting
BlockReceiver getBlockReceiver(
    final ExtendedBlock block, final StorageType storageType,
    final DataInputStream in,
    final String inAddr, final String myAddr,
    final BlockConstructionStage stage,
    final long newGs, final long minBytesRcvd, final long maxBytesRcvd,
    final String clientname, final DatanodeInfo srcDataNode,
    final DataNode dn, DataChecksum requestedChecksum,
    CachingStrategy cachingStrategy,
    final boolean allowLazyPersist,
    final boolean pinning) throws IOException {
  return new BlockReceiver(block, storageType, in,
      inAddr, myAddr, stage, newGs, minBytesRcvd, maxBytesRcvd,
      clientname, srcDataNode, dn, requestedChecksum,
      cachingStrategy, allowLazyPersist, pinning);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:21,代码来源:DataXceiver.java

示例4: DataTransfer

import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; //导入依赖的package包/类
/**
 * Connect to the first item in the target list.  Pass along the 
 * entire target list, the block, and the data.
 */
DataTransfer(DatanodeInfo targets[], StorageType[] targetStorageTypes,
    ExtendedBlock b, BlockConstructionStage stage,
    final String clientname) {
  if (DataTransferProtocol.LOG.isDebugEnabled()) {
    DataTransferProtocol.LOG.debug(getClass().getSimpleName() + ": "
        + b + " (numBytes=" + b.getNumBytes() + ")"
        + ", stage=" + stage
        + ", clientname=" + clientname
        + ", targets=" + Arrays.asList(targets)
        + ", target storage types=" + (targetStorageTypes == null ? "[]" :
        Arrays.asList(targetStorageTypes)));
  }
  this.targets = targets;
  this.targetStorageTypes = targetStorageTypes;
  this.b = b;
  this.stage = stage;
  BPOfferService bpos = blockPoolManager.get(b.getBlockPoolId());
  bpReg = bpos.bpRegistration;
  this.clientname = clientname;
  this.cachingStrategy =
      new CachingStrategy(true, getDnConf().readaheadLength);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:27,代码来源:DataNode.java

示例5: DataTransfer

import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; //导入依赖的package包/类
/**
 * Connect to the first item in the target list.  Pass along the 
 * entire target list, the block, and the data.
 */
DataTransfer(DatanodeInfo targets[], ExtendedBlock b, BlockConstructionStage stage,
    final String clientname)  {
  if (DataTransferProtocol.LOG.isDebugEnabled()) {
    DataTransferProtocol.LOG.debug(getClass().getSimpleName() + ": "
        + b + " (numBytes=" + b.getNumBytes() + ")"
        + ", stage=" + stage
        + ", clientname=" + clientname
        + ", targests=" + Arrays.asList(targets));
  }
  this.targets = targets;
  this.b = b;
  this.stage = stage;
  BPOfferService bpos = blockPoolManager.get(b.getBlockPoolId());
  bpReg = bpos.bpRegistration;
  this.clientname = clientname;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:21,代码来源:DataNode.java

示例6: testWrite

import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; //导入依赖的package包/类
private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long newGS,
    String description, Boolean eofExcepted) throws IOException {
  sendBuf.reset();
  recvBuf.reset();
  sender.writeBlock(block, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
      new DatanodeInfo[1], null, stage,
      0, block.getNumBytes(), block.getNumBytes(), newGS,
      DEFAULT_CHECKSUM);
  if (eofExcepted) {
    sendResponse(Status.ERROR, null, null, recvOut);
    sendRecvData(description, true);
  } else if (stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
    //ok finally write a block with 0 len
    sendResponse(Status.SUCCESS, "", null, recvOut);
    sendRecvData(description, false);
  } else {
    writeZeroLengthPacket(block, description);
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:20,代码来源:TestDataTransferProtocol.java

示例7: DataTransfer

import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; //导入依赖的package包/类
/**
 * Connect to the first item in the target list.  Pass along the
 * entire target list, the block, and the data.
 */
DataTransfer(DatanodeInfo targets[], ExtendedBlock b,
    BlockConstructionStage stage, final String clientname) {
  if (DataTransferProtocol.LOG.isDebugEnabled()) {
    DataTransferProtocol.LOG.debug(
        getClass().getSimpleName() + ": " + b + " (numBytes=" +
            b.getNumBytes() + ")" + ", stage=" + stage + ", clientname=" +
            clientname + ", targests=" + Arrays.asList(targets));
  }
  this.targets = targets;
  this.b = b;
  this.stage = stage;
  BPOfferService bpos = blockPoolManager.get(b.getBlockPoolId());
  bpReg = bpos.bpRegistration;
  this.clientname = clientname;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:20,代码来源:DataNode.java

示例8: testWrite

import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; //导入依赖的package包/类
private void testWrite(ExtendedBlock block, BlockConstructionStage stage,
    long newGS, String description, Boolean eofExcepted) throws IOException {
  sendBuf.reset();
  recvBuf.reset();
  sender.writeBlock(block, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
      new DatanodeInfo[1], null, stage, 0, block.getNumBytes(),
      block.getNumBytes(), newGS, DEFAULT_CHECKSUM);
  if (eofExcepted) {
    sendResponse(Status.ERROR, null, null, recvOut);
    sendRecvData(description, true);
  } else if (stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
    //ok finally write a block with 0 len
    sendResponse(Status.SUCCESS, "", null, recvOut);
    sendRecvData(description, false);
  } else {
    writeZeroLengthPacket(block, description);
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:19,代码来源:TestDataTransferProtocol.java

示例9: testWrite

import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; //导入依赖的package包/类
private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long newGS,
    String description, Boolean eofExcepted) throws IOException {
  sendBuf.reset();
  recvBuf.reset();
  sender.writeBlock(block, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
      new DatanodeInfo[1], null, stage,
      0, block.getNumBytes(), block.getNumBytes(), newGS,
      DEFAULT_CHECKSUM, CachingStrategy.newDefaultStrategy());
  if (eofExcepted) {
    sendResponse(Status.ERROR, null, null, recvOut);
    sendRecvData(description, true);
  } else if (stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
    //ok finally write a block with 0 len
    sendResponse(Status.SUCCESS, "", null, recvOut);
    sendRecvData(description, false);
  } else {
    writeZeroLengthPacket(block, description);
  }
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:20,代码来源:TestDataTransferProtocol.java

示例10: initDataStreaming

import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; //导入依赖的package包/类
/**
 * Initialize for data streaming
 */
private void initDataStreaming() {
  this.setName("DataStreamer for file " + src +
      " block " + block);
  response = new ResponseProcessor(nodes);
  response.start();
  stage = BlockConstructionStage.DATA_STREAMING;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:DFSOutputStream.java

示例11: endBlock

import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; //导入依赖的package包/类
private void endBlock() {
  if(DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("Closing old block " + block);
  }
  this.setName("DataStreamer for file " + src);
  closeResponder();
  closeStream();
  setPipeline(null, null, null);
  stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:DFSOutputStream.java

示例12: transferReplicaForPipelineRecovery

import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; //导入依赖的package包/类
/**
 * Transfer a replica to the datanode targets.
 * @param b the block to transfer.
 *          The corresponding replica must be an RBW or a Finalized.
 *          Its GS and numBytes will be set to
 *          the stored GS and the visible length. 
 * @param targets targets to transfer the block to
 * @param client client name
 */
void transferReplicaForPipelineRecovery(final ExtendedBlock b,
    final DatanodeInfo[] targets, final StorageType[] targetStorageTypes,
    final String client) throws IOException {
  final long storedGS;
  final long visible;
  final BlockConstructionStage stage;

  //get replica information
  synchronized(data) {
    Block storedBlock = data.getStoredBlock(b.getBlockPoolId(),
        b.getBlockId());
    if (null == storedBlock) {
      throw new IOException(b + " not found in datanode.");
    }
    storedGS = storedBlock.getGenerationStamp();
    if (storedGS < b.getGenerationStamp()) {
      throw new IOException(storedGS
          + " = storedGS < b.getGenerationStamp(), b=" + b);
    }
    // Update the genstamp with storedGS
    b.setGenerationStamp(storedGS);
    if (data.isValidRbw(b)) {
      stage = BlockConstructionStage.TRANSFER_RBW;
    } else if (data.isValidBlock(b)) {
      stage = BlockConstructionStage.TRANSFER_FINALIZED;
    } else {
      final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId());
      throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);
    }
    visible = data.getReplicaVisibleLength(b);
  }
  //set visible length
  b.setNumBytes(visible);

  if (targets.length > 0) {
    new DataTransfer(targets, targetStorageTypes, b, stage, client).run();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:48,代码来源:DataNode.java

示例13: writeBlock

import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; //导入依赖的package包/类
void writeBlock(ExtendedBlock block, BlockConstructionStage stage,
    long newGS, DataChecksum checksum) throws IOException {
  sender.writeBlock(block, StorageType.DEFAULT,
      BlockTokenSecretManager.DUMMY_TOKEN, "cl",
      new DatanodeInfo[1], new StorageType[1], null, stage,
      0, block.getNumBytes(), block.getNumBytes(), newGS,
      checksum, CachingStrategy.newDefaultStrategy(), false, false, null);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:TestDataTransferProtocol.java

示例14: markExternalErrorOnStreamers

import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; //导入依赖的package包/类
private Set<StripedDataStreamer> markExternalErrorOnStreamers() {
  Set<StripedDataStreamer> healthySet = new HashSet<>();
  for (int i = 0; i < numAllBlocks; i++) {
    final StripedDataStreamer streamer = getStripedDataStreamer(i);
    if (streamer.isHealthy() && isStreamerWriting(i)) {
      Preconditions.checkState(
          streamer.getStage() == BlockConstructionStage.DATA_STREAMING,
          "streamer: " + streamer);
      streamer.setExternalError();
      healthySet.add(streamer);
    }
  }
  return healthySet;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:15,代码来源:DFSStripedOutputStream.java

示例15: DataStreamer

import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; //导入依赖的package包/类
/**
 * construction with tracing info
 */
DataStreamer(HdfsFileStatus stat, ExtendedBlock block, DFSClient dfsClient,
             String src, Progressable progress, DataChecksum checksum,
             AtomicReference<CachingStrategy> cachingStrategy,
             ByteArrayManager byteArrayManage, String[] favoredNodes) {
  this(stat, block, dfsClient, src, progress, checksum, cachingStrategy,
      byteArrayManage, false, favoredNodes);
  stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:12,代码来源:DataStreamer.java


注:本文中的org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。