当前位置: 首页>>代码示例>>Java>>正文


Java OpWriteBlockProto.parseFrom方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.parseFrom方法的典型用法代码示例。如果您正苦于以下问题:Java OpWriteBlockProto.parseFrom方法的具体用法?Java OpWriteBlockProto.parseFrom怎么用?Java OpWriteBlockProto.parseFrom使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto的用法示例。


在下文中一共展示了OpWriteBlockProto.parseFrom方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: opWriteBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入方法依赖的package包/类
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
      PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
      proto.getHeader().getClientName(),
      PBHelper.convert(proto.getTargetsList()),
      PBHelper.convert(proto.getSource()),
      fromProto(proto.getStage()),
      proto.getPipelineSize(),
      proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
      proto.getLatestGenerationStamp(),
      fromProto(proto.getRequestedChecksum()),
      (proto.hasCachingStrategy() ?
          getCachingStrategy(proto.getCachingStrategy()) :
        CachingStrategy.newDefaultStrategy()));
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:18,代码来源:Receiver.java

示例2: opWriteBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入方法依赖的package包/类
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelper.convertStorageType(proto.getStorageType()),
        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
        PBHelper.convert(proto.getSource()),
        fromProto(proto.getStage()),
        proto.getPipelineSize(),
        proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
        proto.getLatestGenerationStamp(),
        fromProto(proto.getRequestedChecksum()),
        (proto.hasCachingStrategy() ?
            getCachingStrategy(proto.getCachingStrategy()) :
          CachingStrategy.newDefaultStrategy()),
        (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false),
        (proto.hasPinning() ? proto.getPinning(): false),
        (PBHelper.convertBooleanList(proto.getTargetPinningsList())));
  } finally {
   if (traceScope != null) traceScope.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:Receiver.java

示例3: opWriteBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入方法依赖的package包/类
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelperClient.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    writeBlock(PBHelperClient.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelperClient.convertStorageType(proto.getStorageType()),
        PBHelperClient.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelperClient.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
        PBHelperClient.convert(proto.getSource()),
        fromProto(proto.getStage()),
        proto.getPipelineSize(),
        proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
        proto.getLatestGenerationStamp(),
        fromProto(proto.getRequestedChecksum()),
        (proto.hasCachingStrategy() ?
            getCachingStrategy(proto.getCachingStrategy()) :
          CachingStrategy.newDefaultStrategy()),
        (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false),
        (proto.hasPinning() ? proto.getPinning(): false),
        (PBHelperClient.convertBooleanList(proto.getTargetPinningsList())));
  } finally {
   if (traceScope != null) traceScope.close();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:30,代码来源:Receiver.java

示例4: opWriteBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入方法依赖的package包/类
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelper.convertStorageType(proto.getStorageType()),
        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
        PBHelper.convert(proto.getSource()),
        fromProto(proto.getStage()),
        proto.getPipelineSize(),
        proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
        proto.getLatestGenerationStamp(),
        fromProto(proto.getRequestedChecksum()),
        (proto.hasCachingStrategy() ?
            getCachingStrategy(proto.getCachingStrategy()) :
          CachingStrategy.newDefaultStrategy()),
          (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false));
   } finally {
    if (traceScope != null) traceScope.close();
   }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:28,代码来源:Receiver.java

示例5: opWriteBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入方法依赖的package包/类
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
      PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
      proto.getHeader().getClientName(),
      PBHelper.convert(proto.getTargetsList()),
      PBHelper.convert(proto.getSource()),
      fromProto(proto.getStage()),
      proto.getPipelineSize(),
      proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
      proto.getLatestGenerationStamp(),
      fromProto(proto.getRequestedChecksum()));
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:15,代码来源:Receiver.java

示例6: opWriteBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; //导入方法依赖的package包/类
/**
 * Receive OP_WRITE_BLOCK
 */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto =
      OpWriteBlockProto.parseFrom(vintPrefixed(in));
  writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
      PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
      proto.getHeader().getClientName(),
      PBHelper.convert(proto.getTargetsList()),
      PBHelper.convert(proto.getSource()), fromProto(proto.getStage()),
      proto.getPipelineSize(), proto.getMinBytesRcvd(),
      proto.getMaxBytesRcvd(), proto.getLatestGenerationStamp(),
      fromProto(proto.getRequestedChecksum()));
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:16,代码来源:Receiver.java


注:本文中的org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.parseFrom方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。