当前位置: 首页>>代码示例>>Java>>正文


Java DatanodeStorage类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.DatanodeStorage的典型用法代码示例。如果您正苦于以下问题:Java DatanodeStorage类的具体用法?Java DatanodeStorage怎么用?Java DatanodeStorage使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


DatanodeStorage类属于org.apache.hadoop.hdfs.server.protocol包,在下文中一共展示了DatanodeStorage类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getBlockFiles

import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; //导入依赖的package包/类
static List<File> getBlockFiles(MiniDFSCluster cluster) throws IOException {
  List<File> files = new ArrayList<File>();
  List<DataNode> datanodes = cluster.getDataNodes();
  String poolId = cluster.getNamesystem().getBlockPoolId();
  List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
  for(int i = 0; i < blocks.size(); i++) {
    DataNode dn = datanodes.get(i);
    Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
    for(Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
      for(Block b : e.getValue()) {
        files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
      }
    }        
  }
  return files;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestDFSShell.java

示例2: updateStorage

import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; //导入依赖的package包/类
DatanodeStorageInfo updateStorage(DatanodeStorage s) {
  synchronized (storageMap) {
    DatanodeStorageInfo storage = storageMap.get(s.getStorageID());
    if (storage == null) {
      LOG.info("Adding new storage ID " + s.getStorageID() +
               " for DN " + getXferAddr());
      storage = new DatanodeStorageInfo(this, s);
      storageMap.put(s.getStorageID(), storage);
    } else if (storage.getState() != s.getState() ||
               storage.getStorageType() != s.getStorageType()) {
      // For backwards compatibility, make sure that the type and
      // state are updated. Some reports from older datanodes do
      // not include these fields so we may have assumed defaults.
      storage.updateFromStorage(s);
      storageMap.put(storage.getStorageID(), storage);
    }
    return storage;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:DatanodeDescriptor.java

示例3: getIncrementalBRMapForStorage

import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; //导入依赖的package包/类
/**
 * @return pending incremental block report for given {@code storage}
 */
private PerStoragePendingIncrementalBR getIncrementalBRMapForStorage(
    DatanodeStorage storage) {
  PerStoragePendingIncrementalBR mapForStorage =
      pendingIncrementalBRperStorage.get(storage);

  if (mapForStorage == null) {
    // This is the first time we are adding incremental BR state for
    // this storage so create a new map. This is required once per
    // storage, per service actor.
    mapForStorage = new PerStoragePendingIncrementalBR();
    pendingIncrementalBRperStorage.put(storage, mapForStorage);
  }

  return mapForStorage;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:BPServiceActor.java

示例4: format

import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; //导入依赖的package包/类
void format(StorageDirectory sd, NamespaceInfo nsInfo,
            String datanodeUuid) throws IOException {
  sd.clearDirectory(); // create directory
  this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION;
  this.clusterID = nsInfo.getClusterID();
  this.namespaceID = nsInfo.getNamespaceID();
  this.cTime = 0;
  setDatanodeUuid(datanodeUuid);

  if (sd.getStorageUuid() == null) {
    // Assign a new Storage UUID.
    sd.setStorageUuid(DatanodeStorage.generateUuid());
  }

  writeProperties(sd);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:DataStorage.java

示例5: runLayoutUpgradeTest

import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; //导入依赖的package包/类
/**
 * Perform a upgrade using the test image corresponding to
 * testCaseName.
 *
 * @param testCaseName
 * @param expectedStorageId if null, then the upgrade generates a new
 *                          unique storage ID.
 * @throws IOException
 */
private static void runLayoutUpgradeTest(final String testCaseName,
                                         final String expectedStorageId)
    throws IOException {
  TestDFSUpgradeFromImage upgrade = new TestDFSUpgradeFromImage();
  upgrade.unpackStorage(testCaseName + ".tgz", testCaseName + ".txt");
  Configuration conf = new Configuration(TestDFSUpgradeFromImage.upgradeConf);
  initStorageDirs(conf, testCaseName);
  upgradeAndVerify(upgrade, conf, new ClusterVerifier() {
    @Override
    public void verifyClusterPostUpgrade(MiniDFSCluster cluster) throws IOException {
      // Verify that a GUID-based storage ID was generated.
      final String bpid = cluster.getNamesystem().getBlockPoolId();
      StorageReport[] reports =
          cluster.getDataNodes().get(0).getFSDataset().getStorageReports(bpid);
      assertThat(reports.length, is(1));
      final String storageID = reports[0].getStorage().getStorageID();
      assertTrue(DatanodeStorage.isValidStorageId(storageID));

      if (expectedStorageId != null) {
        assertThat(storageID, is(expectedStorageId));
      }
    }
  });
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestDatanodeStartupFixesLegacyStorageIDs.java

示例6: register

import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; //导入依赖的package包/类
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:NNThroughputBenchmark.java

示例7: testQueues

import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; //导入依赖的package包/类
@Test
public void testQueues() {
  DatanodeDescriptor fakeDN = DFSTestUtil.getLocalDatanodeDescriptor();
  DatanodeStorage storage = new DatanodeStorage("STORAGE_ID");
  DatanodeStorageInfo storageInfo = new DatanodeStorageInfo(fakeDN, storage);
  msgs.enqueueReportedBlock(storageInfo, block1Gs1, ReplicaState.FINALIZED);
  msgs.enqueueReportedBlock(storageInfo, block1Gs2, ReplicaState.FINALIZED);

  assertEquals(2, msgs.count());
  
  // Nothing queued yet for block 2
  assertNull(msgs.takeBlockQueue(block2Gs1));
  assertEquals(2, msgs.count());
  
  Queue<ReportedBlockInfo> q =
    msgs.takeBlockQueue(block1Gs2DifferentInstance);
  assertEquals(
      "ReportedBlockInfo [block=blk_1_1, dn=127.0.0.1:50010, reportedState=FINALIZED]," +
      "ReportedBlockInfo [block=blk_1_2, dn=127.0.0.1:50010, reportedState=FINALIZED]",
      Joiner.on(",").join(q));
  assertEquals(0, msgs.count());
  
  // Should be null if we pull again
  assertNull(msgs.takeBlockQueue(block1Gs1));
  assertEquals(0, msgs.count());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestPendingDataNodeMessages.java

示例8: testSafeModeIBRAfterIncremental

import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; //导入依赖的package包/类
@Test
public void testSafeModeIBRAfterIncremental() throws Exception {
  DatanodeDescriptor node = spy(nodes.get(0));
  DatanodeStorageInfo ds = node.getStorageInfos()[0];

  node.isAlive = true;

  DatanodeRegistration nodeReg =
      new DatanodeRegistration(node, null, null, "");

  // pretend to be in safemode
  doReturn(true).when(fsn).isInStartupSafeMode();

  // register new node
  bm.getDatanodeManager().registerDatanode(nodeReg);
  bm.getDatanodeManager().addDatanode(node); // swap in spy    
  assertEquals(node, bm.getDatanodeManager().getDatanode(node));
  assertEquals(0, ds.getBlockReportCount());
  // send block report while pretending to already have blocks
  reset(node);
  doReturn(1).when(node).numBlocks();
  bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
      BlockListAsLongs.EMPTY, null, false);
  assertEquals(1, ds.getBlockReportCount());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestBlockManager.java

示例9: testReplaceStorage

import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; //导入依赖的package包/类
@Test
public void testReplaceStorage() throws Exception {

  // Create two dummy storages.
  final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
  final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
  final int NUM_BLOCKS = 10;
  BlockInfoContiguous[] blockInfos = new BlockInfoContiguous[NUM_BLOCKS];

  // Create a few dummy blocks and add them to the first storage.
  for (int i = 0; i < NUM_BLOCKS; ++i) {
    blockInfos[i] = new BlockInfoContiguous((short) 3);
    storage1.addBlock(blockInfos[i]);
  }

  // Try to move one of the blocks to a different storage.
  boolean added =
      storage2.addBlock(blockInfos[NUM_BLOCKS / 2]) == AddBlockResult.ADDED;
  Assert.assertThat(added, is(false));
  Assert.assertThat(blockInfos[NUM_BLOCKS/2].getStorageInfo(0), is(storage2));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestBlockInfo.java

示例10: updateStorage

import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; //导入依赖的package包/类
DatanodeStorageInfo updateStorage(DatanodeStorage s) {
  synchronized (storageMap) {
    DatanodeStorageInfo storage = storageMap.get(s.getStorageID());
    if (storage == null) {
      LOG.info("Adding new storage ID {} for DN {}", s.getStorageID(),
          getXferAddr());
      storage = new DatanodeStorageInfo(this, s);
      storageMap.put(s.getStorageID(), storage);
    } else if (storage.getState() != s.getState() ||
               storage.getStorageType() != s.getStorageType()) {
      // For backwards compatibility, make sure that the type and
      // state are updated. Some reports from older datanodes do
      // not include these fields so we may have assumed defaults.
      storage.updateFromStorage(s);
      storageMap.put(storage.getStorageID(), storage);
    }
    return storage;
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:DatanodeDescriptor.java

示例11: add

import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; //导入依赖的package包/类
synchronized void add(final DatanodeDescriptor node) {
  xceiverCount += node.getXceiverCount();
  if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
    capacityUsed += node.getDfsUsed();
    blockPoolUsed += node.getBlockPoolUsed();
    nodesInService++;
    nodesInServiceXceiverCount += node.getXceiverCount();
    capacityTotal += node.getCapacity();
    capacityRemaining += node.getRemaining();
    cacheCapacity += node.getCacheCapacity();
    cacheUsed += node.getCacheUsed();
  } else if (!node.isDecommissioned()) {
    cacheCapacity += node.getCacheCapacity();
    cacheUsed += node.getCacheUsed();
  }
  Set<StorageType> storageTypes = new HashSet<>();
  for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
    if (storageInfo.getState() != DatanodeStorage.State.FAILED) {
      statsMap.addStorage(storageInfo, node);
      storageTypes.add(storageInfo.getStorageType());
    }
  }
  for (StorageType storageType : storageTypes) {
    statsMap.addNode(storageType, node);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:27,代码来源:DatanodeStats.java

示例12: subtract

import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; //导入依赖的package包/类
synchronized void subtract(final DatanodeDescriptor node) {
  xceiverCount -= node.getXceiverCount();
  if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
    capacityUsed -= node.getDfsUsed();
    blockPoolUsed -= node.getBlockPoolUsed();
    nodesInService--;
    nodesInServiceXceiverCount -= node.getXceiverCount();
    capacityTotal -= node.getCapacity();
    capacityRemaining -= node.getRemaining();
    cacheCapacity -= node.getCacheCapacity();
    cacheUsed -= node.getCacheUsed();
  } else if (!node.isDecommissioned()) {
    cacheCapacity -= node.getCacheCapacity();
    cacheUsed -= node.getCacheUsed();
  }
  Set<StorageType> storageTypes = new HashSet<>();
  for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
    if (storageInfo.getState() != DatanodeStorage.State.FAILED) {
      statsMap.subtractStorage(storageInfo, node);
      storageTypes.add(storageInfo.getStorageType());
    }
  }
  for (StorageType storageType : storageTypes) {
    statsMap.subtractNode(storageType, node);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:27,代码来源:DatanodeStats.java

示例13: format

import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; //导入依赖的package包/类
void format(StorageDirectory sd, NamespaceInfo nsInfo,
            String datanodeUuid) throws IOException {
  sd.clearDirectory(); // create directory
  this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION;
  this.clusterID = nsInfo.getClusterID();
  this.namespaceID = nsInfo.getNamespaceID();
  this.cTime = 0;
  setDatanodeUuid(datanodeUuid);

  if (sd.getStorageUuid() == null) {
    // Assign a new Storage UUID.
    sd.setStorageUuid(DatanodeStorage.generateUuid());
  }

  writeProperties(sd);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:17,代码来源:DataStorage.java

示例14: register

import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; //导入依赖的package包/类
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = dataNodeProto.registerDatanode(dnRegistration);
  dnRegistration.setNamespaceInfo(nsInfo);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  dataNodeProto.blockReport(dnRegistration, bpid, reports,
          new BlockReportContext(1, 0, System.nanoTime(), 0L));
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:NNThroughputBenchmark.java

示例15: getDatanodeDescriptors

import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; //导入依赖的package包/类
@Override
DatanodeDescriptor[] getDatanodeDescriptors(Configuration conf) {
  final String[] racks = {
      "/d1/r1",
      "/d1/r1",
      "/d1/r2",
      "/d1/r2",
      "/d2/r3",
      "/d2/r3"};
  storages = DFSTestUtil.createDatanodeStorageInfos(racks);
  // create an extra storage for dn5.
  DatanodeStorage extraStorage = new DatanodeStorage(
      storages[5].getStorageID() + "-extra", DatanodeStorage.State.NORMAL,
      StorageType.DEFAULT);
  BlockManagerTestUtil.updateStorage(storages[5].getDatanodeDescriptor(),
      extraStorage);
  return DFSTestUtil.toDatanodeDescriptor(storages);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:19,代码来源:TestReplicationPolicy.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.DatanodeStorage类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。