当前位置: 首页>>代码示例>>Java>>正文


Java BlockListAsLongs.getBlockListAsLongs方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.BlockListAsLongs.getBlockListAsLongs方法的典型用法代码示例。如果您正苦于以下问题:Java BlockListAsLongs.getBlockListAsLongs方法的具体用法?Java BlockListAsLongs.getBlockListAsLongs怎么用?Java BlockListAsLongs.getBlockListAsLongs使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.protocol.BlockListAsLongs的用法示例。


在下文中一共展示了BlockListAsLongs.getBlockListAsLongs方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: blockReport

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入方法依赖的package包/类
@Override
public DatanodeCommand blockReport(DatanodeRegistration registration,
    String poolId, StorageBlockReport[] reports, BlockReportContext context)
      throws IOException {
  BlockReportRequestProto.Builder builder = BlockReportRequestProto
      .newBuilder().setRegistration(PBHelper.convert(registration))
      .setBlockPoolId(poolId);
  
  boolean useBlocksBuffer = registration.getNamespaceInfo()
      .isCapabilitySupported(Capability.STORAGE_BLOCK_REPORT_BUFFERS);

  for (StorageBlockReport r : reports) {
    StorageBlockReportProto.Builder reportBuilder = StorageBlockReportProto
        .newBuilder().setStorage(PBHelper.convert(r.getStorage()));
    BlockListAsLongs blocks = r.getBlocks();
    if (useBlocksBuffer) {
      reportBuilder.setNumberOfBlocks(blocks.getNumberOfBlocks());
      reportBuilder.addAllBlocksBuffers(blocks.getBlocksBuffers());
    } else {
      for (long value : blocks.getBlockListAsLongs()) {
        reportBuilder.addBlocks(value);
      }
    }
    builder.addReports(reportBuilder.build());
  }
  builder.setContext(PBHelper.convert(context));
  BlockReportResponseProto resp;
  try {
    resp = rpcProxy.blockReport(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
  return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:DatanodeProtocolClientSideTranslatorPB.java

示例2: blockReport

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入方法依赖的package包/类
@Override
public DatanodeCommand blockReport(DatanodeRegistration registration,
    String poolId, StorageBlockReport[] reports, BlockReportContext context)
      throws IOException {
  BlockReportRequestProto.Builder builder = BlockReportRequestProto
      .newBuilder().setRegistration(PBHelper.convert(registration))
      .setBlockPoolId(poolId);
  
  boolean useBlocksBuffer = registration.getNamespaceInfo()
      .isCapabilitySupported(Capability.STORAGE_BLOCK_REPORT_BUFFERS);

  for (StorageBlockReport r : reports) {
    StorageBlockReportProto.Builder reportBuilder = StorageBlockReportProto
        .newBuilder().setStorage(PBHelperClient.convert(r.getStorage()));
    BlockListAsLongs blocks = r.getBlocks();
    if (useBlocksBuffer) {
      reportBuilder.setNumberOfBlocks(blocks.getNumberOfBlocks());
      reportBuilder.addAllBlocksBuffers(blocks.getBlocksBuffers());
    } else {
      for (long value : blocks.getBlockListAsLongs()) {
        reportBuilder.addBlocks(value);
      }
    }
    builder.addReports(reportBuilder.build());
  }
  builder.setContext(PBHelper.convert(context));
  BlockReportResponseProto resp;
  try {
    resp = rpcProxy.blockReport(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
  return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:35,代码来源:DatanodeProtocolClientSideTranslatorPB.java

示例3: getBlockReports

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入方法依赖的package包/类
private static StorageBlockReport[] getBlockReports(
    DataNode dn, String bpid, boolean corruptOneBlockGs,
    boolean corruptOneBlockLen) {
  Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists =
      dn.getFSDataset().getBlockReports(bpid);

  // Send block report
  StorageBlockReport[] reports =
      new StorageBlockReport[perVolumeBlockLists.size()];
  boolean corruptedGs = false;
  boolean corruptedLen = false;

  int reportIndex = 0;
  for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
    DatanodeStorage dnStorage = kvPair.getKey();
    BlockListAsLongs blockList = kvPair.getValue();

    // Walk the list of blocks until we find one each to corrupt the
    // generation stamp and length, if so requested.
    for (int i = 0; i < blockList.getNumberOfBlocks(); ++i) {
      if (corruptOneBlockGs && !corruptedGs) {
        blockList.corruptBlockGSForTesting(i, rand);
        LOG.info("Corrupted the GS for block ID " + i);
        corruptedGs = true;
      } else if (corruptOneBlockLen && !corruptedLen) {
        blockList.corruptBlockLengthForTesting(i, rand);
        LOG.info("Corrupted the length for block ID " + i);
        corruptedLen = true;
      } else {
        break;
      }
    }

    reports[reportIndex++] =
        new StorageBlockReport(dnStorage, blockList.getBlockListAsLongs());
  }

  return reports;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:40,代码来源:BlockReportTestBase.java

示例4: testVolumeFailure

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入方法依赖的package包/类
@Test
public void testVolumeFailure() throws Exception {
  System.out.println("Data dir: is " +  dataDir.getPath());
 
  
  // Data dir structure is dataDir/data[1-4]/[current,tmp...]
  // data1,2 is for datanode 1, data2,3 - datanode2 
  String filename = "/test.txt";
  Path filePath = new Path(filename);
  
  // we use only small number of blocks to avoid creating subdirs in the data dir..
  int filesize = block_size*blocks_num;
  DFSTestUtil.createFile(fs, filePath, filesize, repl, 1L);
  DFSTestUtil.waitReplication(fs, filePath, repl);
  System.out.println("file " + filename + "(size " +
      filesize + ") is created and replicated");
 
  // fail the volume
  // delete/make non-writable one of the directories (failed volume)
  data_fail = new File(dataDir, "data3");
  failedDir = MiniDFSCluster.getFinalizedDir(dataDir, 
      cluster.getNamesystem().getBlockPoolId());
  if (failedDir.exists() &&
      //!FileUtil.fullyDelete(failedDir)
      !deteteBlocks(failedDir)
      ) {
    throw new IOException("Could not delete hdfs directory '" + failedDir + "'");
  }
  data_fail.setReadOnly();
  failedDir.setReadOnly();
  System.out.println("Deleteing " + failedDir.getPath() + "; exist=" + failedDir.exists());
  
  // access all the blocks on the "failed" DataNode, 
  // we need to make sure that the "failed" volume is being accessed - 
  // and that will cause failure, blocks removal, "emergency" block report
  triggerFailure(filename, filesize);
  
  // make sure a block report is sent 
  DataNode dn = cluster.getDataNodes().get(1); //corresponds to dir data3
  String bpid = cluster.getNamesystem().getBlockPoolId();
  DatanodeRegistration dnR = dn.getDNRegistrationForBP(bpid);
  
  Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists =
      dn.getFSDataset().getBlockReports(bpid);

  // Send block report
  StorageBlockReport[] reports =
      new StorageBlockReport[perVolumeBlockLists.size()];

  int reportIndex = 0;
  for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
      DatanodeStorage dnStorage = kvPair.getKey();
      BlockListAsLongs blockList = kvPair.getValue();
      reports[reportIndex++] =
          new StorageBlockReport(dnStorage, blockList.getBlockListAsLongs());
  }
  
  cluster.getNameNodeRpc().blockReport(dnR, bpid, reports, null);

  // verify number of blocks and files...
  verify(filename, filesize);
  
  // create another file (with one volume failed).
  System.out.println("creating file test1.txt");
  Path fileName1 = new Path("/test1.txt");
  DFSTestUtil.createFile(fs, fileName1, filesize, repl, 1L);
  
  // should be able to replicate to both nodes (2 DN, repl=2)
  DFSTestUtil.waitReplication(fs, fileName1, repl);
  System.out.println("file " + fileName1.getName() + 
      " is created and replicated");
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:73,代码来源:TestDataNodeVolumeFailure.java

示例5: blockReport

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入方法依赖的package包/类
/**
 * Report the list blocks to the Namenode
 * @throws IOException
 */
DatanodeCommand blockReport() throws IOException {
  // send block report if timer has expired.
  DatanodeCommand cmd = null;
  long startTime = now();
  if (startTime - lastBlockReport > dnConf.blockReportInterval) {

    // Flush any block information that precedes the block report. Otherwise
    // we have a chance that we will miss the delHint information
    // or we will report an RBW replica after the BlockReport already reports
    // a FINALIZED one.
    reportReceivedDeletedBlocks();

    // Create block report
    long brCreateStartTime = now();
    BlockListAsLongs bReport = dn.getFSDataset().getBlockReport(
        bpos.getBlockPoolId());

    // Send block report
    long brSendStartTime = now();
    StorageBlockReport[] report = { new StorageBlockReport(
        new DatanodeStorage(bpRegistration.getStorageID()),
        bReport.getBlockListAsLongs()) };
    cmd = bpNamenode.blockReport(bpRegistration, bpos.getBlockPoolId(), report);

    // Log the block report processing stats from Datanode perspective
    long brSendCost = now() - brSendStartTime;
    long brCreateCost = brSendStartTime - brCreateStartTime;
    dn.getMetrics().addBlockReport(brSendCost);
    LOG.info("BlockReport of " + bReport.getNumberOfBlocks()
        + " blocks took " + brCreateCost + " msec to generate and "
        + brSendCost + " msecs for RPC and NN processing");

    // If we have sent the first block report, then wait a random
    // time before we start the periodic block reports.
    if (resetBlockReportTime) {
      lastBlockReport = startTime - DFSUtil.getRandom().nextInt((int)(dnConf.blockReportInterval));
      resetBlockReportTime = false;
    } else {
      /* say the last block report was at 8:20:14. The current report
       * should have started around 9:20:14 (default 1 hour interval).
       * If current time is :
       *   1) normal like 9:20:18, next report should be at 10:20:14
       *   2) unexpected like 11:35:43, next report should be at 12:20:14
       */
      lastBlockReport += (now() - lastBlockReport) /
      dnConf.blockReportInterval * dnConf.blockReportInterval;
    }
    LOG.info("sent block report, processed command:" + cmd);
  }
  return cmd;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:56,代码来源:BPServiceActor.java

示例6: testVolumeFailure

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入方法依赖的package包/类
@Test
public void testVolumeFailure() throws Exception {
  System.out.println("Data dir: is " +  dataDir.getPath());
 
  
  // Data dir structure is dataDir/data[1-4]/[current,tmp...]
  // data1,2 is for datanode 1, data2,3 - datanode2 
  String filename = "/test.txt";
  Path filePath = new Path(filename);
  
  // we use only small number of blocks to avoid creating subdirs in the data dir..
  int filesize = block_size*blocks_num;
  DFSTestUtil.createFile(fs, filePath, filesize, repl, 1L);
  DFSTestUtil.waitReplication(fs, filePath, repl);
  System.out.println("file " + filename + "(size " +
      filesize + ") is created and replicated");
 
  // fail the volume
  // delete/make non-writable one of the directories (failed volume)
  data_fail = new File(dataDir, "data3");
  failedDir = MiniDFSCluster.getFinalizedDir(dataDir, 
      cluster.getNamesystem().getBlockPoolId());
  if (failedDir.exists() &&
      //!FileUtil.fullyDelete(failedDir)
      !deteteBlocks(failedDir)
      ) {
    throw new IOException("Could not delete hdfs directory '" + failedDir + "'");
  }    
  data_fail.setReadOnly();
  failedDir.setReadOnly();
  System.out.println("Deleteing " + failedDir.getPath() + "; exist=" + failedDir.exists());
  
  // access all the blocks on the "failed" DataNode, 
  // we need to make sure that the "failed" volume is being accessed - 
  // and that will cause failure, blocks removal, "emergency" block report
  triggerFailure(filename, filesize);
  
  // make sure a block report is sent 
  DataNode dn = cluster.getDataNodes().get(1); //corresponds to dir data3
  String bpid = cluster.getNamesystem().getBlockPoolId();
  DatanodeRegistration dnR = dn.getDNRegistrationForBP(bpid);
  
  Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists =
      dn.getFSDataset().getBlockReports(bpid);

  // Send block report
  StorageBlockReport[] reports =
      new StorageBlockReport[perVolumeBlockLists.size()];

  int reportIndex = 0;
  for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
      DatanodeStorage dnStorage = kvPair.getKey();
      BlockListAsLongs blockList = kvPair.getValue();
      reports[reportIndex++] =
          new StorageBlockReport(dnStorage, blockList.getBlockListAsLongs());
  }
  
  cluster.getNameNodeRpc().blockReport(dnR, bpid, reports);

  // verify number of blocks and files...
  verify(filename, filesize);
  
  // create another file (with one volume failed).
  System.out.println("creating file test1.txt");
  Path fileName1 = new Path("/test1.txt");
  DFSTestUtil.createFile(fs, fileName1, filesize, repl, 1L);
  
  // should be able to replicate to both nodes (2 DN, repl=2)
  DFSTestUtil.waitReplication(fs, fileName1, repl);
  System.out.println("file " + fileName1.getName() + 
      " is created and replicated");
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:73,代码来源:TestDataNodeVolumeFailure.java

示例7: testVolumeFailure

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入方法依赖的package包/类
@Test
public void testVolumeFailure() throws Exception {
  FileSystem fs = cluster.getFileSystem();
  dataDir = new File(cluster.getDataDirectory());
  System.out.println("Data dir: is " +  dataDir.getPath());
 
  
  // Data dir structure is dataDir/data[1-4]/[current,tmp...]
  // data1,2 is for datanode 1, data2,3 - datanode2 
  String filename = "/test.txt";
  Path filePath = new Path(filename);
  
  // we use only small number of blocks to avoid creating subdirs in the data dir..
  int filesize = block_size*blocks_num;
  DFSTestUtil.createFile(fs, filePath, filesize, repl, 1L);
  DFSTestUtil.waitReplication(fs, filePath, repl);
  System.out.println("file " + filename + "(size " +
      filesize + ") is created and replicated");
 
  // fail the volume
  // delete/make non-writable one of the directories (failed volume)
  data_fail = new File(dataDir, "data3");
  failedDir = MiniDFSCluster.getFinalizedDir(dataDir, 
      cluster.getNamesystem().getBlockPoolId());
  if (failedDir.exists() &&
      //!FileUtil.fullyDelete(failedDir)
      !deteteBlocks(failedDir)
      ) {
    throw new IOException("Could not delete hdfs directory '" + failedDir + "'");
  }    
  data_fail.setReadOnly();
  failedDir.setReadOnly();
  System.out.println("Deleteing " + failedDir.getPath() + "; exist=" + failedDir.exists());
  
  // access all the blocks on the "failed" DataNode, 
  // we need to make sure that the "failed" volume is being accessed - 
  // and that will cause failure, blocks removal, "emergency" block report
  triggerFailure(filename, filesize);
  
  // make sure a block report is sent 
  DataNode dn = cluster.getDataNodes().get(1); //corresponds to dir data3
  String bpid = cluster.getNamesystem().getBlockPoolId();
  DatanodeRegistration dnR = dn.getDNRegistrationForBP(bpid);
  
  Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists =
      dn.getFSDataset().getBlockReports(bpid);

  // Send block report
  StorageBlockReport[] reports =
      new StorageBlockReport[perVolumeBlockLists.size()];

  int reportIndex = 0;
  for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
      DatanodeStorage dnStorage = kvPair.getKey();
      BlockListAsLongs blockList = kvPair.getValue();
      reports[reportIndex++] =
          new StorageBlockReport(dnStorage, blockList.getBlockListAsLongs());
  }
  
  cluster.getNameNodeRpc().blockReport(dnR, bpid, reports);

  // verify number of blocks and files...
  verify(filename, filesize);
  
  // create another file (with one volume failed).
  System.out.println("creating file test1.txt");
  Path fileName1 = new Path("/test1.txt");
  DFSTestUtil.createFile(fs, fileName1, filesize, repl, 1L);
  
  // should be able to replicate to both nodes (2 DN, repl=2)
  DFSTestUtil.waitReplication(fs, fileName1, repl);
  System.out.println("file " + fileName1.getName() + 
      " is created and replicated");
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:75,代码来源:TestDataNodeVolumeFailure.java


注:本文中的org.apache.hadoop.hdfs.protocol.BlockListAsLongs.getBlockListAsLongs方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。