当前位置: 首页>>代码示例>>Java>>正文


Java DatanodeRegistration类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration的典型用法代码示例。如果您正苦于以下问题:Java DatanodeRegistration类的具体用法?Java DatanodeRegistration怎么用?Java DatanodeRegistration使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


DatanodeRegistration类属于org.apache.hadoop.hdfs.server.protocol包,在下文中一共展示了DatanodeRegistration类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: handleHeartbeat

import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; //导入依赖的package包/类
/**
 * The given node has reported in.  This method should:
 * 1) Record the heartbeat, so the datanode isn't timed out
 * 2) Adjust usage stats for future block allocation
 * 
 * If a substantial amount of time passed since the last datanode 
 * heartbeat then request an immediate block report.  
 * 
 * @return an array of datanode commands 
 * @throws IOException
 */
HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
    StorageReport[] reports, long cacheCapacity, long cacheUsed,
    int xceiverCount, int xmitsInProgress, int failedVolumes,
    VolumeFailureSummary volumeFailureSummary) throws IOException {
  readLock();
  try {
    //get datanode commands
    final int maxTransfer = blockManager.getMaxReplicationStreams()
        - xmitsInProgress;
    DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat(
        nodeReg, reports, blockPoolId, cacheCapacity, cacheUsed,
        xceiverCount, maxTransfer, failedVolumes, volumeFailureSummary);
    
    //create ha status
    final NNHAStatusHeartbeat haState = new NNHAStatusHeartbeat(
        haContext.getState().getServiceState(),
        getFSImage().getLastAppliedOrWrittenTxId());

    return new HeartbeatResponse(cmds, haState, rollingUpgradeInfo);
  } finally {
    readUnlock();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:FSNamesystem.java

示例2: errorReport

import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; //导入依赖的package包/类
@Override // DatanodeProtocol
public void errorReport(DatanodeRegistration nodeReg,
                        int errorCode, String msg) throws IOException { 
  checkNNStartup();
  String dnName = 
     (nodeReg == null) ? "Unknown DataNode" : nodeReg.toString();

  if (errorCode == DatanodeProtocol.NOTIFY) {
    LOG.info("Error report from " + dnName + ": " + msg);
    return;
  }
  verifyRequest(nodeReg);

  if (errorCode == DatanodeProtocol.DISK_ERROR) {
    LOG.warn("Disk error on " + dnName + ": " + msg);
  } else if (errorCode == DatanodeProtocol.FATAL_DISK_ERROR) {
    LOG.warn("Fatal disk error on " + dnName + ": " + msg);
    namesystem.getBlockManager().getDatanodeManager().removeDatanode(nodeReg);            
  } else {
    LOG.info("Error report from " + dnName + ": " + msg);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:NameNodeRpcServer.java

示例3: bpRegistrationSucceeded

import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; //导入依赖的package包/类
/**
 * Check that the registration returned from a NameNode is consistent
 * with the information in the storage. If the storage is fresh/unformatted,
 * sets the storage ID based on this registration.
 * Also updates the block pool's state in the secret manager.
 */
synchronized void bpRegistrationSucceeded(DatanodeRegistration bpRegistration,
    String blockPoolId) throws IOException {
  // Set the ID if we haven't already
  if (null == id) {
    id = bpRegistration;
  }

  if(!storage.getDatanodeUuid().equals(bpRegistration.getDatanodeUuid())) {
    throw new IOException("Inconsistent Datanode IDs. Name-node returned "
        + bpRegistration.getDatanodeUuid()
        + ". Expecting " + storage.getDatanodeUuid());
  }
  
  registerBlockPoolWithSecretManager(bpRegistration, blockPoolId);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:DataNode.java

示例4: reportTo

import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; //导入依赖的package包/类
@Override
public void reportTo(DatanodeProtocolClientSideTranslatorPB bpNamenode, 
  DatanodeRegistration bpRegistration) throws BPServiceActorActionException {
  if (bpRegistration == null) {
    return;
  }
  DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) };
  String[] uuids = { storageUuid };
  StorageType[] types = { storageType };
  LocatedBlock[] locatedBlock = { new LocatedBlock(block,
      dnArr, uuids, types) };

  try {
    bpNamenode.reportBadBlocks(locatedBlock);
  } catch (RemoteException re) {
    DataNode.LOG.info("reportBadBlock encountered RemoteException for "
        + "block:  " + block , re);
  } catch (IOException e) {
    throw new BPServiceActorActionException("Failed to report bad block "
        + block + " to namenode: ");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:ReportBadBlockAction.java

示例5: cacheReport

import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; //导入依赖的package包/类
@Override
public DatanodeCommand cacheReport(DatanodeRegistration registration,
    String poolId, List<Long> blockIds) throws IOException {
  CacheReportRequestProto.Builder builder =
      CacheReportRequestProto.newBuilder()
      .setRegistration(PBHelper.convert(registration))
      .setBlockPoolId(poolId);
  for (Long blockId : blockIds) {
    builder.addBlocks(blockId);
  }
  
  CacheReportResponseProto resp;
  try {
    resp = rpcProxy.cacheReport(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
  if (resp.hasCmd()) {
    return PBHelper.convert(resp.getCmd());
  }
  return null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:DatanodeProtocolClientSideTranslatorPB.java

示例6: blockReceivedAndDeleted

import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; //导入依赖的package包/类
@Override
public void blockReceivedAndDeleted(DatanodeRegistration registration,
    String poolId, StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks)
    throws IOException {
  BlockReceivedAndDeletedRequestProto.Builder builder = 
      BlockReceivedAndDeletedRequestProto.newBuilder()
      .setRegistration(PBHelper.convert(registration))
      .setBlockPoolId(poolId);
  for (StorageReceivedDeletedBlocks storageBlock : receivedAndDeletedBlocks) {
    StorageReceivedDeletedBlocksProto.Builder repBuilder = 
        StorageReceivedDeletedBlocksProto.newBuilder();
    repBuilder.setStorageUuid(storageBlock.getStorage().getStorageID());  // Set for wire compatibility.
    repBuilder.setStorage(PBHelper.convert(storageBlock.getStorage()));
    for (ReceivedDeletedBlockInfo rdBlock : storageBlock.getBlocks()) {
      repBuilder.addBlocks(PBHelper.convert(rdBlock));
    }
    builder.addBlocks(repBuilder.build());
  }
  try {
    rpcProxy.blockReceivedAndDeleted(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:DatanodeProtocolClientSideTranslatorPB.java

示例7: register

import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; //导入依赖的package包/类
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:NNThroughputBenchmark.java

示例8: testSafeModeIBRAfterIncremental

import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; //导入依赖的package包/类
@Test
public void testSafeModeIBRAfterIncremental() throws Exception {
  DatanodeDescriptor node = spy(nodes.get(0));
  DatanodeStorageInfo ds = node.getStorageInfos()[0];

  node.isAlive = true;

  DatanodeRegistration nodeReg =
      new DatanodeRegistration(node, null, null, "");

  // pretend to be in safemode
  doReturn(true).when(fsn).isInStartupSafeMode();

  // register new node
  bm.getDatanodeManager().registerDatanode(nodeReg);
  bm.getDatanodeManager().addDatanode(node); // swap in spy    
  assertEquals(node, bm.getDatanodeManager().getDatanode(node));
  assertEquals(0, ds.getBlockReportCount());
  // send block report while pretending to already have blocks
  reset(node);
  doReturn(1).when(node).numBlocks();
  bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
      BlockListAsLongs.EMPTY, null, false);
  assertEquals(1, ds.getBlockReportCount());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestBlockManager.java

示例9: setupNNMock

import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; //导入依赖的package包/类
/**
 * Set up a mock NN with the bare minimum for a DN to register to it.
 */
private DatanodeProtocolClientSideTranslatorPB setupNNMock(int nnIdx)
    throws Exception {
  DatanodeProtocolClientSideTranslatorPB mock =
      Mockito.mock(DatanodeProtocolClientSideTranslatorPB.class);
  Mockito.doReturn(new NamespaceInfo(1, FAKE_CLUSTERID, FAKE_BPID, 0))
      .when(mock).versionRequest();
  
  Mockito.doReturn(DFSTestUtil.getLocalDatanodeRegistration())
    .when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));
  
  Mockito.doAnswer(new HeartbeatAnswer(nnIdx))
    .when(mock).sendHeartbeat(
        Mockito.any(DatanodeRegistration.class),
        Mockito.any(StorageReport[].class),
        Mockito.anyLong(),
        Mockito.anyLong(),
        Mockito.anyInt(),
        Mockito.anyInt(),
        Mockito.anyInt(),
        Mockito.any(VolumeFailureSummary.class));
  mockHaStatuses[nnIdx] = new NNHAStatusHeartbeat(HAServiceState.STANDBY, 0);
  return mock;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestBPOfferService.java

示例10: waitForBlockReport

import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; //导入依赖的package包/类
private void waitForBlockReport(final DatanodeProtocolClientSideTranslatorPB mockNN)
    throws Exception {
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      try {
        Mockito.verify(mockNN).blockReport(
            Mockito.<DatanodeRegistration>anyObject(),  
            Mockito.eq(FAKE_BPID),
            Mockito.<StorageBlockReport[]>anyObject(),
            Mockito.<BlockReportContext>anyObject());
        return true;
      } catch (Throwable t) {
        LOG.info("waiting on block report: " + t.getMessage());
        return false;
      }
    }
  }, 500, 10000);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestBPOfferService.java

示例11: waitForBlockReceived

import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; //导入依赖的package包/类
private ReceivedDeletedBlockInfo[] waitForBlockReceived(
    final ExtendedBlock fakeBlock,
    final DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception {
  final String fakeBlockPoolId = fakeBlock.getBlockPoolId();
  final ArgumentCaptor<StorageReceivedDeletedBlocks[]> captor =
    ArgumentCaptor.forClass(StorageReceivedDeletedBlocks[].class);
  GenericTestUtils.waitFor(new Supplier<Boolean>() {

    @Override
    public Boolean get() {
      try {
        Mockito.verify(mockNN).blockReceivedAndDeleted(
          Mockito.<DatanodeRegistration>anyObject(),
          Mockito.eq(fakeBlockPoolId),
          captor.capture());
        return true;
      } catch (Throwable t) {
        return false;
      }
    }
  }, 100, 10000);
  return captor.getValue()[0].getBlocks();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestBPOfferService.java

示例12: testReportBlockReceived

import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; //导入依赖的package包/类
/**
 * Ensure that an IBR is generated immediately for a block received by
 * the DN.
 *
 * @throws InterruptedException
 * @throws IOException
 */
@Test (timeout=60000)
public void testReportBlockReceived() throws InterruptedException, IOException {
  try {
    DatanodeProtocolClientSideTranslatorPB nnSpy = spyOnDnCallsToNn();
    injectBlockReceived();

    // Sleep for a very short time, this is necessary since the IBR is
    // generated asynchronously.
    Thread.sleep(2000);

    // Ensure that the received block was reported immediately.
    Mockito.verify(nnSpy, times(1)).blockReceivedAndDeleted(
        any(DatanodeRegistration.class),
        anyString(),
        any(StorageReceivedDeletedBlocks[].class));
  } finally {
    cluster.shutdown();
    cluster = null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestIncrementalBlockReports.java

示例13: blockReport_03

import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; //导入依赖的package包/类
/**
 * Test writes a file and closes it.
 * Block reported is generated with a bad GS for a single block.
 * Block report is forced and the check for # of corrupted blocks is performed.
 *
 * @throws IOException in case of an error
 */
@Test(timeout=300000)
public void blockReport_03() throws IOException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path filePath = new Path("/" + METHOD_NAME + ".dat");
  writeFile(METHOD_NAME, FILE_SIZE, filePath);

  // all blocks belong to the same file, hence same BP
  DataNode dn = cluster.getDataNodes().get(DN_N0);
  String poolId = cluster.getNamesystem().getBlockPoolId();
  DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
  StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
  sendBlockReports(dnR, poolId, reports);
  printStats();

  assertThat("Wrong number of corrupt blocks",
             cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
  assertThat("Wrong number of PendingDeletion blocks",
             cluster.getNamesystem().getPendingDeletionBlocks(), is(0L));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:BlockReportTestBase.java

示例14: blockReport_06

import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; //导入依赖的package包/类
/**
 * Test creates a file and closes it.
 * The second datanode is started in the cluster.
 * As soon as the replication process is completed test runs
 * Block report and checks that no underreplicated blocks are left
 *
 * @throws IOException in case of an error
 */
@Test(timeout=300000)
public void blockReport_06() throws Exception {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path filePath = new Path("/" + METHOD_NAME + ".dat");
  final int DN_N1 = DN_N0 + 1;

  writeFile(METHOD_NAME, FILE_SIZE, filePath);
  startDNandWait(filePath, true);

  // all blocks belong to the same file, hence same BP
  DataNode dn = cluster.getDataNodes().get(DN_N1);
  String poolId = cluster.getNamesystem().getBlockPoolId();
  DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
  StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
  sendBlockReports(dnR, poolId, reports);
  printStats();
  assertEquals("Wrong number of PendingReplication Blocks",
    0, cluster.getNamesystem().getUnderReplicatedBlocks());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:BlockReportTestBase.java

示例15: requestBlockReportLeaseId

import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; //导入依赖的package包/类
public long requestBlockReportLeaseId(DatanodeRegistration nodeReg) {
  assert namesystem.hasReadLock();
  DatanodeDescriptor node = null;
  try {
    node = datanodeManager.getDatanode(nodeReg);
  } catch (UnregisteredNodeException e) {
    LOG.warn("Unregistered datanode {}", nodeReg);
    return 0;
  }
  if (node == null) {
    LOG.warn("Failed to find datanode {}", nodeReg);
    return 0;
  }
  // Request a new block report lease.  The BlockReportLeaseManager has
  // its own internal locking.
  long leaseId = blockReportLeaseManager.requestLease(node);
  BlockManagerFaultInjector.getInstance().
      requestBlockReportLease(node, leaseId);
  return leaseId;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:21,代码来源:BlockManager.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。