当前位置: 首页>>代码示例>>Java>>正文


Java BlockListAsLongs类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.BlockListAsLongs的典型用法代码示例。如果您正苦于以下问题:Java BlockListAsLongs类的具体用法?Java BlockListAsLongs怎么用?Java BlockListAsLongs使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


BlockListAsLongs类属于org.apache.hadoop.hdfs.protocol包,在下文中一共展示了BlockListAsLongs类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getBlockFiles

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入依赖的package包/类
static List<File> getBlockFiles(MiniDFSCluster cluster) throws IOException {
  List<File> files = new ArrayList<File>();
  List<DataNode> datanodes = cluster.getDataNodes();
  String poolId = cluster.getNamesystem().getBlockPoolId();
  List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
  for(int i = 0; i < blocks.size(); i++) {
    DataNode dn = datanodes.get(i);
    Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
    for(Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
      for(Block b : e.getValue()) {
        files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
      }
    }        
  }
  return files;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestDFSShell.java

示例2: register

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入依赖的package包/类
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:NNThroughputBenchmark.java

示例3: testSafeModeIBRAfterIncremental

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入依赖的package包/类
@Test
public void testSafeModeIBRAfterIncremental() throws Exception {
  DatanodeDescriptor node = spy(nodes.get(0));
  DatanodeStorageInfo ds = node.getStorageInfos()[0];

  node.isAlive = true;

  DatanodeRegistration nodeReg =
      new DatanodeRegistration(node, null, null, "");

  // pretend to be in safemode
  doReturn(true).when(fsn).isInStartupSafeMode();

  // register new node
  bm.getDatanodeManager().registerDatanode(nodeReg);
  bm.getDatanodeManager().addDatanode(node); // swap in spy    
  assertEquals(node, bm.getDatanodeManager().getDatanode(node));
  assertEquals(0, ds.getBlockReportCount());
  // send block report while pretending to already have blocks
  reset(node);
  doReturn(1).when(node).numBlocks();
  bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
      BlockListAsLongs.EMPTY, null, false);
  assertEquals(1, ds.getBlockReportCount());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestBlockManager.java

示例4: verifyCapturedArguments

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入依赖的package包/类
private void verifyCapturedArguments(
    ArgumentCaptor<StorageBlockReport[]> captor,
    int expectedReportsPerCall,
    int expectedTotalBlockCount) {

  List<StorageBlockReport[]> listOfReports = captor.getAllValues();
  int numBlocksReported = 0;
  for (StorageBlockReport[] reports : listOfReports) {
    assertThat(reports.length, is(expectedReportsPerCall));

    for (StorageBlockReport report : reports) {
      BlockListAsLongs blockList = report.getBlocks();
      numBlocksReported += blockList.getNumberOfBlocks();
    }
  }

  assert(numBlocksReported >= expectedTotalBlockCount);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestDnRespectsBlockReportSplitThreshold.java

示例5: getMaterializedReplicas

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入依赖的package包/类
private static List<MaterializedReplica> getMaterializedReplicas(
    MiniDFSCluster cluster) throws IOException {
  List<MaterializedReplica> replicas = new ArrayList<>();
  String poolId = cluster.getNamesystem().getBlockPoolId();
  List<Map<DatanodeStorage, BlockListAsLongs>> blocks =
      cluster.getAllBlockReports(poolId);
  for(int i = 0; i < blocks.size(); i++) {
    Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
    for(Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
      for(Block b : e.getValue()) {
        replicas.add(cluster.getMaterializedReplica(i,
            new ExtendedBlock(poolId, b)));
      }
    }
  }
  return replicas;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:18,代码来源:TestDFSShell.java

示例6: register

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入依赖的package包/类
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = dataNodeProto.registerDatanode(dnRegistration);
  dnRegistration.setNamespaceInfo(nsInfo);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  dataNodeProto.blockReport(dnRegistration, bpid, reports,
          new BlockReportContext(1, 0, System.nanoTime(), 0L));
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:NNThroughputBenchmark.java

示例7: testSafeModeIBRAfterIncremental

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入依赖的package包/类
@Test
public void testSafeModeIBRAfterIncremental() throws Exception {
  DatanodeDescriptor node = spy(nodes.get(0));
  DatanodeStorageInfo ds = node.getStorageInfos()[0];

  node.setAlive(true);

  DatanodeRegistration nodeReg =
      new DatanodeRegistration(node, null, null, "");

  // pretend to be in safemode
  doReturn(true).when(fsn).isInStartupSafeMode();

  // register new node
  bm.getDatanodeManager().registerDatanode(nodeReg);
  bm.getDatanodeManager().addDatanode(node); // swap in spy    
  assertEquals(node, bm.getDatanodeManager().getDatanode(node));
  assertEquals(0, ds.getBlockReportCount());
  // send block report while pretending to already have blocks
  reset(node);
  doReturn(1).when(node).numBlocks();
  bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
      BlockListAsLongs.EMPTY, null, false);
  assertEquals(1, ds.getBlockReportCount());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:TestBlockManager.java

示例8: register

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入依赖的package包/类
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage,
          new BlockListAsLongs(null, null).getBlockListAsLongs())
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:25,代码来源:NNThroughputBenchmark.java

示例9: verifyCapturedArguments

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入依赖的package包/类
private void verifyCapturedArguments(
    ArgumentCaptor<StorageBlockReport[]> captor,
    int expectedReportsPerCall,
    int expectedTotalBlockCount) {

  List<StorageBlockReport[]> listOfReports = captor.getAllValues();
  int numBlocksReported = 0;
  for (StorageBlockReport[] reports : listOfReports) {
    assertThat(reports.length, is(expectedReportsPerCall));

    for (StorageBlockReport report : reports) {
      BlockListAsLongs blockList = new BlockListAsLongs(report.getBlocks());
      numBlocksReported += blockList.getNumberOfBlocks();
    }
  }

  assert(numBlocksReported >= expectedTotalBlockCount);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:19,代码来源:TestDnRespectsBlockReportSplitThreshold.java

示例10: IncrementalBlockReport

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入依赖的package包/类
public IncrementalBlockReport(Block[] blocks) {

    currentBlock = 0;
    currentHint = 0;

    if (blocks == null || blocks.length == 0) {
      this.delHintsMap = LightWeightBitSet.getBitSet(0);
      this.delHints = new String[0];
      this.blocks = new long[0];
      return;
    }
    this.delHintsMap = LightWeightBitSet.getBitSet(blocks.length);

    ArrayList<String> hints = new ArrayList<String>(0);
 
    for (int i = 0; i < blocks.length; i++) {
      Block b = blocks[i];
      if (b instanceof ReceivedBlockInfo) {
        ReceivedBlockInfo rbi = (ReceivedBlockInfo) b;
        hints.add(rbi.getDelHints());
        LightWeightBitSet.set(delHintsMap, i);
      }
    }
    this.delHints = hints.toArray(new String[hints.size()]);
    this.blocks = BlockListAsLongs.convertToArrayLongs(blocks);
  }
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:27,代码来源:IncrementalBlockReport.java

示例11: blockReport

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入依赖的package包/类
@Override // DatanodeProtocol
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
    String poolId, StorageBlockReport[] reports) throws IOException {
  verifyRequest(nodeReg);
  BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks());
  if(blockStateChangeLog.isDebugEnabled()) {
    blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
         + "from " + nodeReg + " " + blist.getNumberOfBlocks()
         + " blocks");
  }

  namesystem.getBlockManager().processReport(nodeReg, poolId, blist);
  if (nn.getFSImage().isUpgradeFinalized() && !nn.isStandbyState())
    return new FinalizeCommand(poolId);
  return null;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:17,代码来源:NameNodeRpcServer.java

示例12: register

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入依赖的package包/类
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          "", getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo, ""),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  DataNode.setNewStorageID(dnRegistration);
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(dnRegistration.getStorageID());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage,
          new BlockListAsLongs(null, null).getBlockListAsLongs())
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:24,代码来源:NNThroughputBenchmark.java

示例13: testSafeModeIBRAfterIncremental

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入依赖的package包/类
@Test
public void testSafeModeIBRAfterIncremental() throws Exception {
  DatanodeDescriptor node = spy(nodes.get(0));
  node.setStorageID("dummy-storage");
  node.isAlive = true;

  DatanodeRegistration nodeReg =
      new DatanodeRegistration(node, null, null, "");

  // pretend to be in safemode
  doReturn(true).when(fsn).isInStartupSafeMode();

  // register new node
  bm.getDatanodeManager().registerDatanode(nodeReg);
  bm.getDatanodeManager().addDatanode(node); // swap in spy    
  assertEquals(node, bm.getDatanodeManager().getDatanode(node));
  assertTrue(node.isFirstBlockReport());
  // send block report while pretending to already have blocks
  reset(node);
  doReturn(1).when(node).numBlocks();
  bm.processReport(node, "pool", new BlockListAsLongs(null, null));
  verify(node).receivedBlockReport();
  assertFalse(node.isFirstBlockReport());
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:25,代码来源:TestBlockManager.java

示例14: blockReport_06

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入依赖的package包/类
/**
  * Test creates a file and closes it.
  * The second datanode is started in the cluster.
  * As soon as the replication process is completed test runs
  * Block report and checks that no underreplicated blocks are left
  *
  * @throws IOException in case of an error
  */
 @Test
 public void blockReport_06() throws Exception {
   final String METHOD_NAME = GenericTestUtils.getMethodName();
   Path filePath = new Path("/" + METHOD_NAME + ".dat");
   final int DN_N1 = DN_N0 + 1;

   ArrayList<Block> blocks = writeFile(METHOD_NAME, FILE_SIZE, filePath);
   startDNandWait(filePath, true);

// all blocks belong to the same file, hence same BP
   DataNode dn = cluster.getDataNodes().get(DN_N1);
   String poolId = cluster.getNamesystem().getBlockPoolId();
   DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
   StorageBlockReport[] report = { new StorageBlockReport(
       new DatanodeStorage(dnR.getStorageID()),
       new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
   cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
   printStats();
   assertEquals("Wrong number of PendingReplication Blocks",
     0, cluster.getNamesystem().getUnderReplicatedBlocks());
 }
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:30,代码来源:TestBlockReport.java

示例15: register

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入依赖的package包/类
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage,
          new BlockListAsLongs(null, null).getBlockListAsLongs())
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports);
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:24,代码来源:NNThroughputBenchmark.java


注:本文中的org.apache.hadoop.hdfs.protocol.BlockListAsLongs类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。