当前位置: 首页>>代码示例>>Java>>正文


Java StorageBlockReportProto类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto的典型用法代码示例。如果您正苦于以下问题:Java StorageBlockReportProto类的具体用法?Java StorageBlockReportProto怎么用?Java StorageBlockReportProto使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


StorageBlockReportProto类属于org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos包,在下文中一共展示了StorageBlockReportProto类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: blockReport

import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto; //导入依赖的package包/类
@Override
public DatanodeCommand blockReport(DatanodeRegistration registration,
    String poolId, StorageBlockReport[] reports, BlockReportContext context)
      throws IOException {
  BlockReportRequestProto.Builder builder = BlockReportRequestProto
      .newBuilder().setRegistration(PBHelper.convert(registration))
      .setBlockPoolId(poolId);
  
  for (StorageBlockReport r : reports) {
    StorageBlockReportProto.Builder reportBuilder = StorageBlockReportProto
        .newBuilder().setStorage(PBHelper.convert(r.getStorage()));
    long[] blocks = r.getBlocks();
    for (int i = 0; i < blocks.length; i++) {
      reportBuilder.addBlocks(blocks[i]);
    }
    builder.addReports(reportBuilder.build());
  }
  builder.setContext(PBHelper.convert(context));
  BlockReportResponseProto resp;
  try {
    resp = rpcProxy.blockReport(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
  return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:27,代码来源:DatanodeProtocolClientSideTranslatorPB.java

示例2: blockReport

import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto; //导入依赖的package包/类
@Override
public DatanodeCommand blockReport(DatanodeRegistration registration,
    String poolId, StorageBlockReport[] reports) throws IOException {
  BlockReportRequestProto.Builder builder = BlockReportRequestProto
      .newBuilder().setRegistration(PBHelper.convert(registration))
      .setBlockPoolId(poolId);
  
  for (StorageBlockReport r : reports) {
    StorageBlockReportProto.Builder reportBuilder = StorageBlockReportProto
        .newBuilder().setStorage(PBHelper.convert(r.getStorage()));
    long[] blocks = r.getBlocks();
    for (int i = 0; i < blocks.length; i++) {
      reportBuilder.addBlocks(blocks[i]);
    }
    builder.addReports(reportBuilder.build());
  }
  BlockReportResponseProto resp;
  try {
    resp = rpcProxy.blockReport(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
  return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:25,代码来源:DatanodeProtocolClientSideTranslatorPB.java

示例3: blockReport

import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto; //导入依赖的package包/类
@Override
public DatanodeCommand blockReport(DatanodeRegistration registration,
    String poolId, StorageBlockReport[] reports) throws IOException {
  BlockReportRequestProto.Builder builder =
      BlockReportRequestProto.newBuilder()
          .setRegistration(PBHelper.convert(registration))
          .setBlockPoolId(poolId);

  for (StorageBlockReport r : reports) {
    StorageBlockReportProto.Builder reportBuilder =
        StorageBlockReportProto.newBuilder()
            .setStorage(PBHelper.convert(r.getStorage()))
            .setReport(PBHelper.convert(r.getReport()));
    builder.addReports(reportBuilder.build());
  }
  BlockReportResponseProto resp;
  try {
    resp = rpcProxy.blockReport(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
  return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:24,代码来源:DatanodeProtocolClientSideTranslatorPB.java

示例4: blockReport

import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto; //导入依赖的package包/类
@Override
public DatanodeCommand blockReport(DatanodeRegistration registration,
    String poolId, StorageBlockReport[] reports, BlockReportContext context)
      throws IOException {
  BlockReportRequestProto.Builder builder = BlockReportRequestProto
      .newBuilder().setRegistration(PBHelper.convert(registration))
      .setBlockPoolId(poolId);
  
  boolean useBlocksBuffer = registration.getNamespaceInfo()
      .isCapabilitySupported(Capability.STORAGE_BLOCK_REPORT_BUFFERS);

  for (StorageBlockReport r : reports) {
    StorageBlockReportProto.Builder reportBuilder = StorageBlockReportProto
        .newBuilder().setStorage(PBHelper.convert(r.getStorage()));
    BlockListAsLongs blocks = r.getBlocks();
    if (useBlocksBuffer) {
      reportBuilder.setNumberOfBlocks(blocks.getNumberOfBlocks());
      reportBuilder.addAllBlocksBuffers(blocks.getBlocksBuffers());
    } else {
      for (long value : blocks.getBlockListAsLongs()) {
        reportBuilder.addBlocks(value);
      }
    }
    builder.addReports(reportBuilder.build());
  }
  builder.setContext(PBHelper.convert(context));
  BlockReportResponseProto resp;
  try {
    resp = rpcProxy.blockReport(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
  return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:DatanodeProtocolClientSideTranslatorPB.java

示例5: blockReport

import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto; //导入依赖的package包/类
@Override
public BlockReportResponseProto blockReport(RpcController controller,
    BlockReportRequestProto request) throws ServiceException {
  DatanodeCommand cmd = null;
  StorageBlockReport[] report = 
      new StorageBlockReport[request.getReportsCount()];
  
  int index = 0;
  for (StorageBlockReportProto s : request.getReportsList()) {
    final BlockListAsLongs blocks;
    if (s.hasNumberOfBlocks()) { // new style buffer based reports
      int num = (int)s.getNumberOfBlocks();
      Preconditions.checkState(s.getBlocksCount() == 0,
          "cannot send both blocks list and buffers");
      blocks = BlockListAsLongs.decodeBuffers(num, s.getBlocksBuffersList());
    } else {
      blocks = BlockListAsLongs.decodeLongs(s.getBlocksList());
    }
    report[index++] = new StorageBlockReport(PBHelper.convert(s.getStorage()),
        blocks);
  }
  try {
    cmd = impl.blockReport(PBHelper.convert(request.getRegistration()),
        request.getBlockPoolId(), report,
        request.hasContext() ?
            PBHelper.convert(request.getContext()) : null);
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  BlockReportResponseProto.Builder builder = 
      BlockReportResponseProto.newBuilder();
  if (cmd != null) {
    builder.setCmd(PBHelper.convert(cmd));
  }
  return builder.build();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:DatanodeProtocolServerSideTranslatorPB.java

示例6: blockReport

import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto; //导入依赖的package包/类
@Override
public DatanodeCommand blockReport(DatanodeRegistration registration,
    String poolId, StorageBlockReport[] reports, BlockReportContext context)
      throws IOException {
  BlockReportRequestProto.Builder builder = BlockReportRequestProto
      .newBuilder().setRegistration(PBHelper.convert(registration))
      .setBlockPoolId(poolId);
  
  boolean useBlocksBuffer = registration.getNamespaceInfo()
      .isCapabilitySupported(Capability.STORAGE_BLOCK_REPORT_BUFFERS);

  for (StorageBlockReport r : reports) {
    StorageBlockReportProto.Builder reportBuilder = StorageBlockReportProto
        .newBuilder().setStorage(PBHelperClient.convert(r.getStorage()));
    BlockListAsLongs blocks = r.getBlocks();
    if (useBlocksBuffer) {
      reportBuilder.setNumberOfBlocks(blocks.getNumberOfBlocks());
      reportBuilder.addAllBlocksBuffers(blocks.getBlocksBuffers());
    } else {
      for (long value : blocks.getBlockListAsLongs()) {
        reportBuilder.addBlocks(value);
      }
    }
    builder.addReports(reportBuilder.build());
  }
  builder.setContext(PBHelper.convert(context));
  BlockReportResponseProto resp;
  try {
    resp = rpcProxy.blockReport(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
  return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:35,代码来源:DatanodeProtocolClientSideTranslatorPB.java

示例7: blockReport

import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto; //导入依赖的package包/类
@Override
public BlockReportResponseProto blockReport(RpcController controller,
    BlockReportRequestProto request) throws ServiceException {
  DatanodeCommand cmd = null;
  StorageBlockReport[] report = 
      new StorageBlockReport[request.getReportsCount()];
  
  int index = 0;
  for (StorageBlockReportProto s : request.getReportsList()) {
    final BlockListAsLongs blocks;
    if (s.hasNumberOfBlocks()) { // new style buffer based reports
      int num = (int)s.getNumberOfBlocks();
      Preconditions.checkState(s.getBlocksCount() == 0,
          "cannot send both blocks list and buffers");
      blocks = BlockListAsLongs.decodeBuffers(num, s.getBlocksBuffersList());
    } else {
      blocks = BlockListAsLongs.decodeLongs(s.getBlocksList());
    }
    report[index++] = new StorageBlockReport(PBHelperClient.convert(s.getStorage()),
        blocks);
  }
  try {
    cmd = impl.blockReport(PBHelper.convert(request.getRegistration()),
        request.getBlockPoolId(), report,
        request.hasContext() ?
            PBHelper.convert(request.getContext()) : null);
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  BlockReportResponseProto.Builder builder = 
      BlockReportResponseProto.newBuilder();
  if (cmd != null) {
    builder.setCmd(PBHelper.convert(cmd));
  }
  return builder.build();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:37,代码来源:DatanodeProtocolServerSideTranslatorPB.java

示例8: blockReport

import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto; //导入依赖的package包/类
@Override
public BlockReportResponseProto blockReport(RpcController controller,
    BlockReportRequestProto request) throws ServiceException {
  DatanodeCommand cmd = null;
  StorageBlockReport[] report = 
      new StorageBlockReport[request.getReportsCount()];
  
  int index = 0;
  for (StorageBlockReportProto s : request.getReportsList()) {
    List<Long> blockIds = s.getBlocksList();
    long[] blocks = new long[blockIds.size()];
    for (int i = 0; i < blockIds.size(); i++) {
      blocks[i] = blockIds.get(i);
    }
    report[index++] = new StorageBlockReport(PBHelper.convert(s.getStorage()),
        blocks);
  }
  try {
    cmd = impl.blockReport(PBHelper.convert(request.getRegistration()),
        request.getBlockPoolId(), report,
        request.hasContext() ?
            PBHelper.convert(request.getContext()) : null);
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  BlockReportResponseProto.Builder builder = 
      BlockReportResponseProto.newBuilder();
  if (cmd != null) {
    builder.setCmd(PBHelper.convert(cmd));
  }
  return builder.build();
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:33,代码来源:DatanodeProtocolServerSideTranslatorPB.java

示例9: blockReport

import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto; //导入依赖的package包/类
@Override
public BlockReportResponseProto blockReport(RpcController controller,
    BlockReportRequestProto request) throws ServiceException {
  DatanodeCommand cmd = null;
  StorageBlockReport[] report = 
      new StorageBlockReport[request.getReportsCount()];
  
  int index = 0;
  for (StorageBlockReportProto s : request.getReportsList()) {
    List<Long> blockIds = s.getBlocksList();
    long[] blocks = new long[blockIds.size()];
    for (int i = 0; i < blockIds.size(); i++) {
      blocks[i] = blockIds.get(i);
    }
    report[index++] = new StorageBlockReport(PBHelper.convert(s.getStorage()),
        blocks);
  }
  try {
    cmd = impl.blockReport(PBHelper.convert(request.getRegistration()),
        request.getBlockPoolId(), report);
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  BlockReportResponseProto.Builder builder = 
      BlockReportResponseProto.newBuilder();
  if (cmd != null) {
    builder.setCmd(PBHelper.convert(cmd));
  }
  return builder.build();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:31,代码来源:DatanodeProtocolServerSideTranslatorPB.java

示例10: blockReport

import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto; //导入依赖的package包/类
@Override
public BlockReportResponseProto blockReport(RpcController controller,
    BlockReportRequestProto request) throws ServiceException {
  DatanodeCommand cmd = null;
  StorageBlockReport[] storageBlockReports =
      new StorageBlockReport[request.getReportsCount()];
  
  int index = 0;
  for (StorageBlockReportProto s : request.getReportsList()) {
    DatanodeProtocolProtos.BlockReportProto report = s.getReport();
    storageBlockReports[index++] =
        new StorageBlockReport(PBHelper.convert(s.getStorage()),
            PBHelper.convert(report));
  }
  try {
    cmd = impl.blockReport(PBHelper.convert(request.getRegistration()),
        request.getBlockPoolId(), storageBlockReports);
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  BlockReportResponseProto.Builder builder =
      BlockReportResponseProto.newBuilder();
  if (cmd != null) {
    builder.setCmd(PBHelper.convert(cmd));
  }
  return builder.build();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:28,代码来源:DatanodeProtocolServerSideTranslatorPB.java


注:本文中的org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。