当前位置: 首页>>代码示例>>Java>>正文


Java BlockConstructionStage.TRANSFER_RBW属性代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.TRANSFER_RBW属性的典型用法代码示例。如果您正苦于以下问题:Java BlockConstructionStage.TRANSFER_RBW属性的具体用法?Java BlockConstructionStage.TRANSFER_RBW怎么用?Java BlockConstructionStage.TRANSFER_RBW使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage的用法示例。


在下文中一共展示了BlockConstructionStage.TRANSFER_RBW属性的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: transferReplicaForPipelineRecovery

/**
 * Transfer a replica to the datanode targets.
 * @param b the block to transfer.
 *          The corresponding replica must be an RBW or a Finalized.
 *          Its GS and numBytes will be set to
 *          the stored GS and the visible length. 
 * @param targets targets to transfer the block to
 * @param client client name
 */
void transferReplicaForPipelineRecovery(final ExtendedBlock b,
    final DatanodeInfo[] targets, final StorageType[] targetStorageTypes,
    final String client) throws IOException {
  final long storedGS;
  final long visible;
  final BlockConstructionStage stage;

  //get replica information
  synchronized(data) {
    Block storedBlock = data.getStoredBlock(b.getBlockPoolId(),
        b.getBlockId());
    if (null == storedBlock) {
      throw new IOException(b + " not found in datanode.");
    }
    storedGS = storedBlock.getGenerationStamp();
    if (storedGS < b.getGenerationStamp()) {
      throw new IOException(storedGS
          + " = storedGS < b.getGenerationStamp(), b=" + b);
    }
    // Update the genstamp with storedGS
    b.setGenerationStamp(storedGS);
    if (data.isValidRbw(b)) {
      stage = BlockConstructionStage.TRANSFER_RBW;
    } else if (data.isValidBlock(b)) {
      stage = BlockConstructionStage.TRANSFER_FINALIZED;
    } else {
      final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId());
      throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);
    }
    visible = data.getReplicaVisibleLength(b);
  }
  //set visible length
  b.setNumBytes(visible);

  if (targets.length > 0) {
    new DataTransfer(targets, targetStorageTypes, b, stage, client).run();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:47,代码来源:DataNode.java

示例2: transferReplicaForPipelineRecovery

/**
 * Transfer a replica to the datanode targets.
 * @param b the block to transfer.
 *          The corresponding replica must be an RBW or a Finalized.
 *          Its GS and numBytes will be set to
 *          the stored GS and the visible length. 
 * @param targets
 * @param client
 */
void transferReplicaForPipelineRecovery(final ExtendedBlock b,
    final DatanodeInfo[] targets, final String client) throws IOException {
  final long storedGS;
  final long visible;
  final BlockConstructionStage stage;

  //get replica information
  synchronized(data) {
    Block storedBlock = data.getStoredBlock(b.getBlockPoolId(),
        b.getBlockId());
    if (null == storedBlock) {
      throw new IOException(b + " not found in datanode.");
    }
    storedGS = storedBlock.getGenerationStamp();
    if (storedGS < b.getGenerationStamp()) {
      throw new IOException(storedGS
          + " = storedGS < b.getGenerationStamp(), b=" + b);
    }
    // Update the genstamp with storedGS
    b.setGenerationStamp(storedGS);
    if (data.isValidRbw(b)) {
      stage = BlockConstructionStage.TRANSFER_RBW;
    } else if (data.isValidBlock(b)) {
      stage = BlockConstructionStage.TRANSFER_FINALIZED;
    } else {
      final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId());
      throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);
    }
    visible = data.getReplicaVisibleLength(b);
  }
  //set visible length
  b.setNumBytes(visible);

  if (targets.length > 0) {
    new DataTransfer(targets, b, stage, client).run();
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:46,代码来源:DataNode.java

示例3: transferReplicaForPipelineRecovery

/**
 * Transfer a replica to the datanode targets.
 *
 * @param b
 *     the block to transfer.
 *     The corresponding replica must be an RBW or a Finalized.
 *     Its GS and numBytes will be set to
 *     the stored GS and the visible length.
 * @param targets
 * @param client
 */
void transferReplicaForPipelineRecovery(final ExtendedBlock b,
    final DatanodeInfo[] targets, final String client) throws IOException {
  final long storedGS;
  final long visible;
  final BlockConstructionStage stage;

  //get replica information
  synchronized (data) {
    Block storedBlock =
        data.getStoredBlock(b.getBlockPoolId(), b.getBlockId());
    if (null == storedBlock) {
      throw new IOException(b + " not found in datanode.");
    }
    storedGS = storedBlock.getGenerationStamp();
    if (storedGS < b.getGenerationStamp()) {
      throw new IOException(
          storedGS + " = storedGS < b.getGenerationStamp(), b=" + b);
    }
    // Update the genstamp with storedGS
    b.setGenerationStamp(storedGS);
    if (data.isValidRbw(b)) {
      stage = BlockConstructionStage.TRANSFER_RBW;
    } else if (data.isValidBlock(b)) {
      stage = BlockConstructionStage.TRANSFER_FINALIZED;
    } else {
      final String r =
          data.getReplicaString(b.getBlockPoolId(), b.getBlockId());
      throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);
    }
    visible = data.getReplicaVisibleLength(b);
  }
  //set visible length
  b.setNumBytes(visible);

  if (targets.length > 0) {
    new DataTransfer(targets, b, stage, client).run();
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:49,代码来源:DataNode.java

示例4: receiveBlock

void receiveBlock(
    DataOutputStream mirrOut, // output to next datanode
    DataInputStream mirrIn,   // input from next datanode
    DataOutputStream replyOut,  // output to previous datanode
    String mirrAddr, DataTransferThrottler throttlerArg,
    DatanodeInfo[] downstreams) throws IOException {

    syncOnClose = datanode.getDnConf().syncOnClose;
    boolean responderClosed = false;
    mirrorOut = mirrOut;
    mirrorAddr = mirrAddr;
    throttler = throttlerArg;

  try {
    if (isClient && !isTransfer) {
      responder = new Daemon(datanode.threadGroup, 
          new PacketResponder(replyOut, mirrIn, downstreams));
      responder.start(); // start thread to processes responses
    }

    /* 
     * Receive until the last packet.
     */
    while (receivePacket() >= 0) {}

    // wait for all outstanding packet responses. And then
    // indicate responder to gracefully shutdown.
    // Mark that responder has been closed for future processing
    if (responder != null) {
      ((PacketResponder)responder.getRunnable()).close();
      responderClosed = true;
    }

    // If this write is for a replication or transfer-RBW/Finalized,
    // then finalize block or convert temporary to RBW.
    // For client-writes, the block is finalized in the PacketResponder.
    if (isDatanode || isTransfer) {
      // close the block/crc files
      close();
      block.setNumBytes(replicaInfo.getNumBytes());

      if (stage == BlockConstructionStage.TRANSFER_RBW) {
        // for TRANSFER_RBW, convert temporary to RBW
        datanode.data.convertTemporaryToRbw(block);
      } else {
        // for isDatnode or TRANSFER_FINALIZED
        // Finalize the block.
        datanode.data.finalizeBlock(block);
      }
      datanode.metrics.incrBlocksWritten();
    }

  } catch (IOException ioe) {
    LOG.info("Exception for " + block, ioe);
    throw ioe;
  } finally {
    if (!responderClosed) { // Abnormal termination of the flow above
      IOUtils.closeStream(this);
      if (responder != null) {
        responder.interrupt();
      }
      cleanupBlock();
    }
    if (responder != null) {
      try {
        responder.join(datanode.getDnConf().getXceiverStopTimeout());
        if (responder.isAlive()) {
          String msg = "Join on responder thread " + responder
              + " timed out";
          LOG.warn(msg + "\n" + StringUtils.getStackTrace(responder));
          throw new IOException(msg);
        }
      } catch (InterruptedException e) {
        responder.interrupt();
        throw new IOException("Interrupted receiveBlock");
      }
      responder = null;
    }
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:80,代码来源:BlockReceiver.java

示例5: receiveBlock

void receiveBlock(DataOutputStream mirrOut, // output to next datanode
    DataInputStream mirrIn,   // input from next datanode
    DataOutputStream replyOut,  // output to previous datanode
    String mirrAddr, DataTransferThrottler throttlerArg,
    DatanodeInfo[] downstreams) throws IOException {

  syncOnClose = datanode.getDnConf().syncOnClose;
  boolean responderClosed = false;
  mirrorOut = mirrOut;
  mirrorAddr = mirrAddr;
  throttler = throttlerArg;

  try {
    if (isClient && !isTransfer) {
      responder = new Daemon(datanode.threadGroup,
          new PacketResponder(replyOut, mirrIn, downstreams));
      responder.start(); // start thread to processes responses
    }

    /* 
     * Receive until the last packet.
     */
    while (receivePacket() >= 0) {
    }

    // wait for all outstanding packet responses. And then
    // indicate responder to gracefully shutdown.
    // Mark that responder has been closed for future processing
    if (responder != null) {
      ((PacketResponder) responder.getRunnable()).close();
      responderClosed = true;
    }

    // If this write is for a replication or transfer-RBW/Finalized,
    // then finalize block or convert temporary to RBW.
    // For client-writes, the block is finalized in the PacketResponder.
    if (isDatanode || isTransfer) {
      // close the block/crc files
      close();
      block.setNumBytes(replicaInfo.getNumBytes());

      if (stage == BlockConstructionStage.TRANSFER_RBW) {
        // for TRANSFER_RBW, convert temporary to RBW
        datanode.data.convertTemporaryToRbw(block);
      } else {
        // for isDatnode or TRANSFER_FINALIZED
        // Finalize the block.
        datanode.data.finalizeBlock(block);
      }
      datanode.metrics.incrBlocksWritten();
    }

  } catch (IOException ioe) {
    LOG.info("Exception for " + block, ioe);
    throw ioe;
  } finally {
    if (!responderClosed) { // Abnormal termination of the flow above
      IOUtils.closeStream(this);
      if (responder != null) {
        responder.interrupt();
      }
      cleanupBlock();
    }
    if (responder != null) {
      try {
        responder.join();
      } catch (InterruptedException e) {
        responder.interrupt();
        throw new IOException("Interrupted receiveBlock");
      }
      responder = null;
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:74,代码来源:BlockReceiver.java


注:本文中的org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.TRANSFER_RBW属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。