当前位置: 首页>>代码示例>>Java>>正文


Java HeartbeatResponse类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse的典型用法代码示例。如果您正苦于以下问题:Java HeartbeatResponse类的具体用法?Java HeartbeatResponse怎么用?Java HeartbeatResponse使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


HeartbeatResponse类属于org.apache.hadoop.hdfs.server.protocol包,在下文中一共展示了HeartbeatResponse类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: handleHeartbeat

import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; //导入依赖的package包/类
/**
 * The given node has reported in.  This method should:
 * 1) Record the heartbeat, so the datanode isn't timed out
 * 2) Adjust usage stats for future block allocation
 * 
 * If a substantial amount of time passed since the last datanode 
 * heartbeat then request an immediate block report.  
 * 
 * @return an array of datanode commands 
 * @throws IOException
 */
HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
    StorageReport[] reports, long cacheCapacity, long cacheUsed,
    int xceiverCount, int xmitsInProgress, int failedVolumes,
    VolumeFailureSummary volumeFailureSummary) throws IOException {
  readLock();
  try {
    //get datanode commands
    final int maxTransfer = blockManager.getMaxReplicationStreams()
        - xmitsInProgress;
    DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat(
        nodeReg, reports, blockPoolId, cacheCapacity, cacheUsed,
        xceiverCount, maxTransfer, failedVolumes, volumeFailureSummary);
    
    //create ha status
    final NNHAStatusHeartbeat haState = new NNHAStatusHeartbeat(
        haContext.getState().getServiceState(),
        getFSImage().getLastAppliedOrWrittenTxId());

    return new HeartbeatResponse(cmds, haState, rollingUpgradeInfo);
  } finally {
    readUnlock();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:FSNamesystem.java

示例2: sendHeartBeat

import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; //导入依赖的package包/类
HeartbeatResponse sendHeartBeat() throws IOException {
  StorageReport[] reports =
      dn.getFSDataset().getStorageReports(bpos.getBlockPoolId());
  if (LOG.isDebugEnabled()) {
    LOG.debug("Sending heartbeat with " + reports.length +
              " storage reports from service actor: " + this);
  }
  
  VolumeFailureSummary volumeFailureSummary = dn.getFSDataset()
      .getVolumeFailureSummary();
  int numFailedVolumes = volumeFailureSummary != null ?
      volumeFailureSummary.getFailedStorageLocations().length : 0;
  return bpNamenode.sendHeartbeat(bpRegistration,
      reports,
      dn.getFSDataset().getCacheCapacity(),
      dn.getFSDataset().getCacheUsed(),
      dn.getXmitsInProgress(),
      dn.getXceiverCount(),
      numFailedVolumes,
      volumeFailureSummary);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:BPServiceActor.java

示例3: sendHeartBeat

import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; //导入依赖的package包/类
HeartbeatResponse sendHeartBeat(boolean requestBlockReportLease)
    throws IOException {
  scheduler.scheduleNextHeartbeat();
  StorageReport[] reports =
      dn.getFSDataset().getStorageReports(bpos.getBlockPoolId());
  if (LOG.isDebugEnabled()) {
    LOG.debug("Sending heartbeat with " + reports.length +
              " storage reports from service actor: " + this);
  }
  
  VolumeFailureSummary volumeFailureSummary = dn.getFSDataset()
      .getVolumeFailureSummary();
  int numFailedVolumes = volumeFailureSummary != null ?
      volumeFailureSummary.getFailedStorageLocations().length : 0;
  return bpNamenode.sendHeartbeat(bpRegistration,
      reports,
      dn.getFSDataset().getCacheCapacity(),
      dn.getFSDataset().getCacheUsed(),
      dn.getXmitsInProgress(),
      dn.getXceiverCount(),
      numFailedVolumes,
      volumeFailureSummary,
      requestBlockReportLease);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:25,代码来源:BPServiceActor.java

示例4: handleHeartbeat

import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; //导入依赖的package包/类
/**
 * The given node has reported in.  This method should:
 * 1) Record the heartbeat, so the datanode isn't timed out
 * 2) Adjust usage stats for future block allocation
 * 
 * If a substantial amount of time passed since the last datanode 
 * heartbeat then request an immediate block report.  
 * 
 * @return an array of datanode commands 
 * @throws IOException
 */
HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
    long capacity, long dfsUsed, long remaining, long blockPoolUsed,
    int xceiverCount, int xmitsInProgress, int failedVolumes) 
      throws IOException {
  readLock();
  try {
    final int maxTransfer = blockManager.getMaxReplicationStreams()
        - xmitsInProgress;
    DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat(
        nodeReg, blockPoolId, capacity, dfsUsed, remaining, blockPoolUsed,
        xceiverCount, maxTransfer, failedVolumes);
    return new HeartbeatResponse(cmds, createHaStatusHeartbeat());
  } finally {
    readUnlock();
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:28,代码来源:FSNamesystem.java

示例5: sendHeartBeat

import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; //导入依赖的package包/类
HeartbeatResponse sendHeartBeat() throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Sending heartbeat from service actor: " + this);
  }
  // reports number of failed volumes
  StorageReport[] report = { new StorageReport(bpRegistration.getStorageID(),
      false,
      dn.getFSDataset().getCapacity(),
      dn.getFSDataset().getDfsUsed(),
      dn.getFSDataset().getRemaining(),
      dn.getFSDataset().getBlockPoolUsed(bpos.getBlockPoolId())) };
  return bpNamenode.sendHeartbeat(bpRegistration, report,
      dn.getXmitsInProgress(),
      dn.getXceiverCount(),
      dn.getFSDataset().getNumFailedVolumes());
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:17,代码来源:BPServiceActor.java

示例6: sendHeartbeat

import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; //导入依赖的package包/类
@Override
public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration,
    StorageReport[] reports, int xmitsInProgress, int xceiverCount,
    int failedVolumes) throws IOException {
  HeartbeatRequestProto.Builder builder = HeartbeatRequestProto.newBuilder()
      .setRegistration(PBHelper.convert(registration))
      .setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount)
      .setFailedVolumes(failedVolumes);
  for (StorageReport r : reports) {
    builder.addReports(PBHelper.convert(r));
  }
  
  HeartbeatResponseProto resp;
  try {
    resp = rpcProxy.sendHeartbeat(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
  DatanodeCommand[] cmds = new DatanodeCommand[resp.getCmdsList().size()];
  int index = 0;
  for (DatanodeCommandProto p : resp.getCmdsList()) {
    cmds[index] = PBHelper.convert(p);
    index++;
  }
  return new HeartbeatResponse(cmds, PBHelper.convert(resp.getHaStatus()));
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:27,代码来源:DatanodeProtocolClientSideTranslatorPB.java

示例7: handleHeartbeat

import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; //导入依赖的package包/类
/**
 * The given node has reported in.  This method should:
 * 1) Record the heartbeat, so the datanode isn't timed out
 * 2) Adjust usage stats for future block allocation
 * 
 * If a substantial amount of time passed since the last datanode 
 * heartbeat then request an immediate block report.  
 * 
 * @return an array of datanode commands 
 * @throws IOException
 */
HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
    StorageReport[] reports, long cacheCapacity, long cacheUsed,
    int xceiverCount, int xmitsInProgress, int failedVolumes)
      throws IOException {
  readLock();
  try {
    //get datanode commands
    final int maxTransfer = blockManager.getMaxReplicationStreams()
        - xmitsInProgress;
    DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat(
        nodeReg, reports, blockPoolId, cacheCapacity, cacheUsed,
        xceiverCount, maxTransfer, failedVolumes);
    
    //create ha status
    final NNHAStatusHeartbeat haState = new NNHAStatusHeartbeat(
        haContext.getState().getServiceState(),
        getFSImage().getLastAppliedOrWrittenTxId());

    return new HeartbeatResponse(cmds, haState, rollingUpgradeInfo);
  } finally {
    readUnlock();
  }
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:35,代码来源:FSNamesystem.java

示例8: sendHeartBeat

import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; //导入依赖的package包/类
HeartbeatResponse sendHeartBeat() throws IOException {
  StorageReport[] reports =
      dn.getFSDataset().getStorageReports(bpos.getBlockPoolId());
  if (LOG.isDebugEnabled()) {
    LOG.debug("Sending heartbeat with " + reports.length +
              " storage reports from service actor: " + this);
  }

  return bpNamenode.sendHeartbeat(bpRegistration,
      reports,
      dn.getFSDataset().getCacheCapacity(),
      dn.getFSDataset().getCacheUsed(),
      dn.getXmitsInProgress(),
      dn.getXceiverCount(),
      dn.getFSDataset().getNumFailedVolumes());
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:17,代码来源:BPServiceActor.java

示例9: sendHeartbeat

import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; //导入依赖的package包/类
@Override
public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration,
    StorageReport[] reports, int xmitsInProgress, int xceiverCount,
    int failedVolumes) throws IOException {
  HeartbeatRequestProto.Builder builder = HeartbeatRequestProto.newBuilder()
      .setRegistration(PBHelper.convert(registration))
      .setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount)
      .setFailedVolumes(failedVolumes);
  for (StorageReport r : reports) {
    builder.addReports(PBHelper.convert(r));
  }

  HeartbeatResponseProto resp;
  try {
    resp = rpcProxy.sendHeartbeat(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
  DatanodeCommand[] cmds = new DatanodeCommand[resp.getCmdsList().size()];
  int index = 0;
  for (DatanodeCommandProto p : resp.getCmdsList()) {
    cmds[index] = PBHelper.convert(p);
    index++;
  }
  return new HeartbeatResponse(cmds);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:27,代码来源:DatanodeProtocolClientSideTranslatorPB.java

示例10: sendHeartbeat

import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; //导入依赖的package包/类
@Override // DatanodeProtocol
public HeartbeatResponse sendHeartbeat(DatanodeRegistration nodeReg,
    StorageReport[] report, long dnCacheCapacity, long dnCacheUsed,
    int xmitsInProgress, int xceiverCount,
    int failedVolumes, VolumeFailureSummary volumeFailureSummary)
    throws IOException {
  checkNNStartup();
  verifyRequest(nodeReg);
  return namesystem.handleHeartbeat(nodeReg, report,
      dnCacheCapacity, dnCacheUsed, xceiverCount, xmitsInProgress,
      failedVolumes, volumeFailureSummary);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:NameNodeRpcServer.java

示例11: handleRollingUpgradeStatus

import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; //导入依赖的package包/类
private void handleRollingUpgradeStatus(HeartbeatResponse resp) throws IOException {
  RollingUpgradeStatus rollingUpgradeStatus = resp.getRollingUpdateStatus();
  if (rollingUpgradeStatus != null &&
      rollingUpgradeStatus.getBlockPoolId().compareTo(bpos.getBlockPoolId()) != 0) {
    // Can this ever occur?
    LOG.error("Invalid BlockPoolId " +
        rollingUpgradeStatus.getBlockPoolId() +
        " in HeartbeatResponse. Expected " +
        bpos.getBlockPoolId());
  } else {
    bpos.signalRollingUpgrade(rollingUpgradeStatus != null);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:BPServiceActor.java

示例12: sendHeartbeat

import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; //导入依赖的package包/类
@Override
public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration,
    StorageReport[] reports, long cacheCapacity, long cacheUsed,
    int xmitsInProgress, int xceiverCount, int failedVolumes,
    VolumeFailureSummary volumeFailureSummary) throws IOException {
  HeartbeatRequestProto.Builder builder = HeartbeatRequestProto.newBuilder()
      .setRegistration(PBHelper.convert(registration))
      .setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount)
      .setFailedVolumes(failedVolumes);
  builder.addAllReports(PBHelper.convertStorageReports(reports));
  if (cacheCapacity != 0) {
    builder.setCacheCapacity(cacheCapacity);
  }
  if (cacheUsed != 0) {
    builder.setCacheUsed(cacheUsed);
  }
  if (volumeFailureSummary != null) {
    builder.setVolumeFailureSummary(PBHelper.convertVolumeFailureSummary(
        volumeFailureSummary));
  }
  HeartbeatResponseProto resp;
  try {
    resp = rpcProxy.sendHeartbeat(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
  DatanodeCommand[] cmds = new DatanodeCommand[resp.getCmdsList().size()];
  int index = 0;
  for (DatanodeCommandProto p : resp.getCmdsList()) {
    cmds[index] = PBHelper.convert(p);
    index++;
  }
  RollingUpgradeStatus rollingUpdateStatus = null;
  if (resp.hasRollingUpgradeStatus()) {
    rollingUpdateStatus = PBHelper.convert(resp.getRollingUpgradeStatus());
  }
  return new HeartbeatResponse(cmds, PBHelper.convert(resp.getHaStatus()),
      rollingUpdateStatus);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:DatanodeProtocolClientSideTranslatorPB.java

示例13: sendHeartbeat

import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; //导入依赖的package包/类
@Override
public HeartbeatResponseProto sendHeartbeat(RpcController controller,
    HeartbeatRequestProto request) throws ServiceException {
  HeartbeatResponse response;
  try {
    final StorageReport[] report = PBHelper.convertStorageReports(
        request.getReportsList());
    VolumeFailureSummary volumeFailureSummary =
        request.hasVolumeFailureSummary() ? PBHelper.convertVolumeFailureSummary(
            request.getVolumeFailureSummary()) : null;
    response = impl.sendHeartbeat(PBHelper.convert(request.getRegistration()),
        report, request.getCacheCapacity(), request.getCacheUsed(),
        request.getXmitsInProgress(),
        request.getXceiverCount(), request.getFailedVolumes(),
        volumeFailureSummary);
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  HeartbeatResponseProto.Builder builder = HeartbeatResponseProto
      .newBuilder();
  DatanodeCommand[] cmds = response.getCommands();
  if (cmds != null) {
    for (int i = 0; i < cmds.length; i++) {
      if (cmds[i] != null) {
        builder.addCmds(PBHelper.convert(cmds[i]));
      }
    }
  }
  builder.setHaStatus(PBHelper.convert(response.getNameNodeHaState()));
  RollingUpgradeStatus rollingUpdateStatus = response
      .getRollingUpdateStatus();
  if (rollingUpdateStatus != null) {
    builder.setRollingUpgradeStatus(PBHelper
        .convertRollingUpgradeStatus(rollingUpdateStatus));
  }
  return builder.build();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:DatanodeProtocolServerSideTranslatorPB.java

示例14: setHeartbeatResponse

import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; //导入依赖的package包/类
private static void setHeartbeatResponse(DatanodeCommand[] cmds)
    throws IOException {
  NNHAStatusHeartbeat ha = new NNHAStatusHeartbeat(HAServiceState.ACTIVE,
      fsImage.getLastAppliedOrWrittenTxId());
  HeartbeatResponse response = new HeartbeatResponse(cmds, ha, null);
  doReturn(response).when(spyNN).sendHeartbeat(
      (DatanodeRegistration) any(),
      (StorageReport[]) any(), anyLong(), anyLong(),
      anyInt(), anyInt(), anyInt(), (VolumeFailureSummary) any());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestFsDatasetCache.java

示例15: handleHeartbeat

import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; //导入依赖的package包/类
/**
 * The given node has reported in.  This method should:
 * 1) Record the heartbeat, so the datanode isn't timed out
 * 2) Adjust usage stats for future block allocation
 *
 * If a substantial amount of time passed since the last datanode
 * heartbeat then request an immediate block report.
 *
 * @return an array of datanode commands
 * @throws IOException
 */
HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
    StorageReport[] reports, long cacheCapacity, long cacheUsed,
    int xceiverCount, int xmitsInProgress, int failedVolumes,
    VolumeFailureSummary volumeFailureSummary,
    boolean requestFullBlockReportLease) throws IOException {
  readLock();
  try {
    //get datanode commands
    final int maxTransfer = blockManager.getMaxReplicationStreams()
        - xmitsInProgress;
    DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat(
        nodeReg, reports, getBlockPoolId(), cacheCapacity, cacheUsed,
        xceiverCount, maxTransfer, failedVolumes, volumeFailureSummary);
    long blockReportLeaseId = 0;
    if (requestFullBlockReportLease) {
      blockReportLeaseId =  blockManager.requestBlockReportLeaseId(nodeReg);
    }
    //create ha status
    final NNHAStatusHeartbeat haState = new NNHAStatusHeartbeat(
        haContext.getState().getServiceState(),
        getFSImage().getLastAppliedOrWrittenTxId());

    return new HeartbeatResponse(cmds, haState, rollingUpgradeInfo,
        blockReportLeaseId);
  } finally {
    readUnlock();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:40,代码来源:FSNamesystem.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。