当前位置: 首页>>代码示例>>Java>>正文


Java OpWriteBlockProto类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto的典型用法代码示例。如果您正苦于以下问题:Java OpWriteBlockProto类的具体用法?Java OpWriteBlockProto怎么用?Java OpWriteBlockProto使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


OpWriteBlockProto类属于org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos包,在下文中一共展示了OpWriteBlockProto类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: initialize

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入依赖的package包/类
private static void initialize(Configuration conf, Channel channel, DatanodeInfo dnInfo,
    Enum<?> storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder, int timeoutMs,
    DFSClient client, Token<BlockTokenIdentifier> accessToken, Promise<Channel> promise)
    throws IOException {
  Promise<Void> saslPromise = channel.eventLoop().newPromise();
  trySaslNegotiate(conf, channel, dnInfo, timeoutMs, client, accessToken, saslPromise);
  saslPromise.addListener(new FutureListener<Void>() {

    @Override
    public void operationComplete(Future<Void> future) throws Exception {
      if (future.isSuccess()) {
        // setup response processing pipeline first, then send request.
        processWriteBlockResponse(channel, dnInfo, promise, timeoutMs);
        requestWriteBlock(channel, storageType, writeBlockProtoBuilder);
      } else {
        promise.tryFailure(future.cause());
      }
    }
  });
}
 
开发者ID:apache,项目名称:hbase,代码行数:21,代码来源:FanOutOneBlockAsyncDFSOutputHelper.java

示例2: opWriteBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入依赖的package包/类
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
      PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
      proto.getHeader().getClientName(),
      PBHelper.convert(proto.getTargetsList()),
      PBHelper.convert(proto.getSource()),
      fromProto(proto.getStage()),
      proto.getPipelineSize(),
      proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
      proto.getLatestGenerationStamp(),
      fromProto(proto.getRequestedChecksum()),
      (proto.hasCachingStrategy() ?
          getCachingStrategy(proto.getCachingStrategy()) :
        CachingStrategy.newDefaultStrategy()));
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:18,代码来源:Receiver.java

示例3: opWriteBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入依赖的package包/类
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelper.convertStorageType(proto.getStorageType()),
        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
        PBHelper.convert(proto.getSource()),
        fromProto(proto.getStage()),
        proto.getPipelineSize(),
        proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
        proto.getLatestGenerationStamp(),
        fromProto(proto.getRequestedChecksum()),
        (proto.hasCachingStrategy() ?
            getCachingStrategy(proto.getCachingStrategy()) :
          CachingStrategy.newDefaultStrategy()),
        (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false),
        (proto.hasPinning() ? proto.getPinning(): false),
        (PBHelper.convertBooleanList(proto.getTargetPinningsList())));
  } finally {
   if (traceScope != null) traceScope.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:Receiver.java

示例4: opWriteBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入依赖的package包/类
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelperClient.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    writeBlock(PBHelperClient.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelperClient.convertStorageType(proto.getStorageType()),
        PBHelperClient.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelperClient.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
        PBHelperClient.convert(proto.getSource()),
        fromProto(proto.getStage()),
        proto.getPipelineSize(),
        proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
        proto.getLatestGenerationStamp(),
        fromProto(proto.getRequestedChecksum()),
        (proto.hasCachingStrategy() ?
            getCachingStrategy(proto.getCachingStrategy()) :
          CachingStrategy.newDefaultStrategy()),
        (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false),
        (proto.hasPinning() ? proto.getPinning(): false),
        (PBHelperClient.convertBooleanList(proto.getTargetPinningsList())));
  } finally {
   if (traceScope != null) traceScope.close();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:30,代码来源:Receiver.java

示例5: opWriteBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入依赖的package包/类
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelper.convertStorageType(proto.getStorageType()),
        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
        PBHelper.convert(proto.getSource()),
        fromProto(proto.getStage()),
        proto.getPipelineSize(),
        proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
        proto.getLatestGenerationStamp(),
        fromProto(proto.getRequestedChecksum()),
        (proto.hasCachingStrategy() ?
            getCachingStrategy(proto.getCachingStrategy()) :
          CachingStrategy.newDefaultStrategy()),
          (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false));
   } finally {
    if (traceScope != null) traceScope.close();
   }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:28,代码来源:Receiver.java

示例6: writeBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入依赖的package包/类
@Override
public void writeBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);
  
  ChecksumProto checksumProto =
    DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
    .setHeader(header)
    .addAllTargets(PBHelper.convert(targets, 1))
    .setStage(toProto(stage))
    .setPipelineSize(pipelineSize)
    .setMinBytesRcvd(minBytesRcvd)
    .setMaxBytesRcvd(maxBytesRcvd)
    .setLatestGenerationStamp(latestGenerationStamp)
    .setRequestedChecksum(checksumProto);
  
  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:35,代码来源:Sender.java

示例7: opWriteBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入依赖的package包/类
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
      PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
      proto.getHeader().getClientName(),
      PBHelper.convert(proto.getTargetsList()),
      PBHelper.convert(proto.getSource()),
      fromProto(proto.getStage()),
      proto.getPipelineSize(),
      proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
      proto.getLatestGenerationStamp(),
      fromProto(proto.getRequestedChecksum()));
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:15,代码来源:Receiver.java

示例8: writeBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入依赖的package包/类
@Override
public void writeBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken, final String clientName,
    final DatanodeInfo[] targets, final DatanodeInfo source,
    final BlockConstructionStage stage, final int pipelineSize,
    final long minBytesRcvd, final long maxBytesRcvd,
    final long latestGenerationStamp, DataChecksum requestedChecksum)
    throws IOException {
  ClientOperationHeaderProto header =
      DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken);
  
  ChecksumProto checksumProto =
      DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto =
      OpWriteBlockProto.newBuilder().setHeader(header)
          .addAllTargets(PBHelper.convert(targets, 1))
          .setStage(toProto(stage)).setPipelineSize(pipelineSize)
          .setMinBytesRcvd(minBytesRcvd).setMaxBytesRcvd(maxBytesRcvd)
          .setLatestGenerationStamp(latestGenerationStamp)
          .setRequestedChecksum(checksumProto);
  
  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:29,代码来源:Sender.java

示例9: opWriteBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入依赖的package包/类
/**
 * Receive OP_WRITE_BLOCK
 */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto =
      OpWriteBlockProto.parseFrom(vintPrefixed(in));
  writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
      PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
      proto.getHeader().getClientName(),
      PBHelper.convert(proto.getTargetsList()),
      PBHelper.convert(proto.getSource()), fromProto(proto.getStage()),
      proto.getPipelineSize(), proto.getMinBytesRcvd(),
      proto.getMaxBytesRcvd(), proto.getLatestGenerationStamp(),
      fromProto(proto.getRequestedChecksum()));
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:16,代码来源:Receiver.java

示例10: requestWriteBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入依赖的package包/类
private static void requestWriteBlock(Channel channel, Enum<?> storageType,
    OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException {
  OpWriteBlockProto proto = STORAGE_TYPE_SETTER.set(writeBlockProtoBuilder, storageType).build();
  int protoLen = proto.getSerializedSize();
  ByteBuf buffer =
      channel.alloc().buffer(3 + CodedOutputStream.computeRawVarint32Size(protoLen) + protoLen);
  buffer.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
  buffer.writeByte(Op.WRITE_BLOCK.code);
  proto.writeDelimitedTo(new ByteBufOutputStream(buffer));
  channel.writeAndFlush(buffer);
}
 
开发者ID:apache,项目名称:hbase,代码行数:12,代码来源:FanOutOneBlockAsyncDFSOutputHelper.java

示例11: writeBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入依赖的package包/类
@Override
public void writeBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum,
    final CachingStrategy cachingStrategy) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);
  
  ChecksumProto checksumProto =
    DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
    .setHeader(header)
    .addAllTargets(PBHelper.convert(targets, 1))
    .setStage(toProto(stage))
    .setPipelineSize(pipelineSize)
    .setMinBytesRcvd(minBytesRcvd)
    .setMaxBytesRcvd(maxBytesRcvd)
    .setLatestGenerationStamp(latestGenerationStamp)
    .setRequestedChecksum(checksumProto)
    .setCachingStrategy(getCachingStrategy(cachingStrategy));
  
  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:37,代码来源:Sender.java

示例12: writeBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入依赖的package包/类
@Override
public void writeBlock(final ExtendedBlock blk,
    final StorageType storageType, 
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes, 
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum,
    final CachingStrategy cachingStrategy,
    final boolean allowLazyPersist,
    final boolean pinning,
    final boolean[] targetPinnings) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);
  
  ChecksumProto checksumProto =
    DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
    .setHeader(header)
    .setStorageType(PBHelper.convertStorageType(storageType))
    .addAllTargets(PBHelper.convert(targets, 1))
    .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes, 1))
    .setStage(toProto(stage))
    .setPipelineSize(pipelineSize)
    .setMinBytesRcvd(minBytesRcvd)
    .setMaxBytesRcvd(maxBytesRcvd)
    .setLatestGenerationStamp(latestGenerationStamp)
    .setRequestedChecksum(checksumProto)
    .setCachingStrategy(getCachingStrategy(cachingStrategy))
    .setAllowLazyPersist(allowLazyPersist)
    .setPinning(pinning)
    .addAllTargetPinnings(PBHelper.convert(targetPinnings, 1));
  
  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:47,代码来源:Sender.java

示例13: fromProto

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入依赖的package包/类
static BlockConstructionStage fromProto(
    OpWriteBlockProto.BlockConstructionStage stage) {
  return BlockConstructionStage.valueOf(stage.name());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:DataTransferProtoUtil.java

示例14: toProto

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入依赖的package包/类
static OpWriteBlockProto.BlockConstructionStage toProto(
    BlockConstructionStage stage) {
  return OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:DataTransferProtoUtil.java

示例15: writeBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入依赖的package包/类
@Override
public void writeBlock(final ExtendedBlock blk,
    final StorageType storageType,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes,
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum,
    final CachingStrategy cachingStrategy,
    final boolean allowLazyPersist,
    final boolean pinning,
    final boolean[] targetPinnings) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);

  ChecksumProto checksumProto =
      DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
      .setHeader(header)
      .setStorageType(PBHelperClient.convertStorageType(storageType))
      .addAllTargets(PBHelperClient.convert(targets, 1))
      .addAllTargetStorageTypes(
          PBHelperClient.convertStorageTypes(targetStorageTypes, 1))
      .setStage(toProto(stage))
      .setPipelineSize(pipelineSize)
      .setMinBytesRcvd(minBytesRcvd)
      .setMaxBytesRcvd(maxBytesRcvd)
      .setLatestGenerationStamp(latestGenerationStamp)
      .setRequestedChecksum(checksumProto)
      .setCachingStrategy(getCachingStrategy(cachingStrategy))
      .setAllowLazyPersist(allowLazyPersist)
      .setPinning(pinning)
      .addAllTargetPinnings(PBHelperClient.convert(targetPinnings, 1));

  if (source != null) {
    proto.setSource(PBHelperClient.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:48,代码来源:Sender.java


注:本文中的org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。